diff --git a/docs/en/developer/developer-guide/java-format-code.md b/docs/en/developer/developer-guide/java-format-code.md index d87a97f5ca..bad37cc7f1 100644 --- a/docs/en/developer/developer-guide/java-format-code.md +++ b/docs/en/developer/developer-guide/java-format-code.md @@ -42,6 +42,13 @@ standard java package * Do not use `import *` * Do not use `import static` +## Check when compile + +Now, when compiling with `caven`, `CheckStyle` checks are done by default. This will slightly slow down compilation. If you want to skip checkstyle, please use the following command to compile +``` +mvn clean install -DskipTests -Dcheckstyle.skip +``` + ## Checkstyle Plugin Now we have `formatter-check` in `CI` to check the code format. diff --git a/docs/zh-CN/developer/developer-guide/java-format-code.md b/docs/zh-CN/developer/developer-guide/java-format-code.md index 6896107476..6fad1867ec 100644 --- a/docs/zh-CN/developer/developer-guide/java-format-code.md +++ b/docs/zh-CN/developer/developer-guide/java-format-code.md @@ -42,6 +42,13 @@ standard java package * 禁止使用 `import *` * 禁止使用 `import static` +## 编译时检查 + +现在,在使用`maven`进行编译时,会默认进行`CheckStyle`检查。此检查会略微降低编译速度。如果想跳过此检查,请使用如下命令进行编译 +``` +mvn clean install -DskipTests -Dcheckstyle.skip +``` + ## Checkstyle 插件 现在的 `CI` 之中会有 `formatter-check` 进行代码格式化检测。 diff --git a/fe/check/checkstyle/checkstyle.xml b/fe/check/checkstyle/checkstyle.xml index 2b005d16c2..b37927ff1d 100644 --- a/fe/check/checkstyle/checkstyle.xml +++ b/fe/check/checkstyle/checkstyle.xml @@ -24,7 +24,7 @@ under the License. - + @@ -43,7 +43,6 @@ under the License. - @@ -52,24 +51,20 @@ under the License. - - - - @@ -83,33 +78,26 @@ under the License. - - - - - + - - - - - @@ -134,7 +120,6 @@ under the License. value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT, INSTANCE_INIT, ANNOTATION_DEF, ENUM_DEF, INTERFACE_DEF, RECORD_DEF, COMPACT_CTOR_DEF"/> - @@ -148,61 +133,36 @@ under the License. - - - + - - - - - - - - - - - - - + + + + - - - - - - + + + - - - - - - + + - - - - - - - - - - + + + @@ -220,25 +180,20 @@ under the License. - - - + - + + value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, RECORD_DEF, ANNOTATION_DEF"/> - @@ -252,11 +207,9 @@ under the License. - - @@ -265,14 +218,9 @@ under the License. - - - - - - - + + @@ -285,90 +233,75 @@ under the License. value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF, ANNOTATION_FIELD_DEF, PARAMETER_DEF, VARIABLE_DEF, METHOD_DEF, PATTERN_VARIABLE_DEF, RECORD_DEF, RECORD_COMPONENT_DEF"/> - - - - - - - - - - - - - - - @@ -392,35 +325,28 @@ under the License. value="GenericWhitespace ''{0}'' should followed by whitespace."/> - - - - - - - + - - @@ -462,7 +387,6 @@ under the License. - @@ -479,7 +403,6 @@ under the License. LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS, MINUS_ASSIGN, MOD, MOD_ASSIGN, NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION, RCURLY, SL, SLIST, SL_ASSIGN, SR, SR_ASSIGN, STAR, STAR_ASSIGN, LITERAL_ASSERT, TYPE_EXTENSION_AND"/> - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java index f0337217d8..6aea88c41c 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java @@ -70,7 +70,7 @@ public class DataOutputBuffer extends DataOutputStream { public void write(DataInput in, int len) throws IOException { int newcount = count + len; if (newcount > buf.length) { - byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)]; + byte[] newbuf = new byte[Math.max(buf.length << 1, newcount)]; System.arraycopy(buf, 0, newbuf, 0, count); buf = newbuf; } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java index ffd2330bd3..60596f44d1 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java @@ -34,7 +34,7 @@ import java.net.Socket; public class IOUtils { public static long copyBytes(InputStream in, OutputStream out, int buffSize, long len) throws IOException { - byte buf[] = new byte[buffSize]; + byte[] buf = new byte[buffSize]; int totalRead = 0; int toRead = 0; int bytesRead = 0; @@ -76,7 +76,7 @@ public class IOUtils { int buffSize, int speed, boolean close) throws IOException { PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null; - byte buf[] = new byte[buffSize]; + byte[] buf = new byte[buffSize]; long bytesReadTotal = 0; long startTime = 0; long sleepTime = 0; @@ -133,7 +133,7 @@ public class IOUtils { int buffSize, boolean close) throws IOException { PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null; - byte buf[] = new byte[buffSize]; + byte[] buf = new byte[buffSize]; long totalBytes = 0; try { int bytesRead = in.read(buf); @@ -169,7 +169,7 @@ public class IOUtils { * if it could not read requested number of bytes for any reason * (including EOF) */ - public static void readFully(InputStream in, byte buf[], int off, int len) + public static void readFully(InputStream in, byte[] buf, int off, int len) throws IOException { int toRead = len; int tmpOff = off; @@ -263,6 +263,7 @@ public class IOUtils { Text.writeString(output, value); } } + public static String readOptionStringOrNull(DataInput input) throws IOException { if (input.readBoolean()) { return Text.readString(input); diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java index 8f95d7401b..f68a2f179e 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java @@ -66,7 +66,7 @@ public class OutputBuffer extends FilterOutputStream { public void write(InputStream in, int len) throws IOException { int newcount = count + len; if (newcount > buf.length) { - byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)]; + byte[] newbuf = new byte[Math.max(buf.length << 1, newcount)]; System.arraycopy(buf, 0, newbuf, 0, count); buf = newbuf; } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java index 7331195d32..1710cb34e2 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java @@ -606,7 +606,7 @@ public class Text implements Writable { return ch; } - static final int offsetsFromUTF8[] = { 0x00000000, 0x00003080, 0x000E2080, + static final int[] offsetsFromUTF8 = { 0x00000000, 0x00003080, 0x000E2080, 0x03C82080, 0xFA082080, 0x82082080 }; /** diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java index b68d2072bf..e58c62f80e 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java @@ -332,7 +332,7 @@ public abstract class PropertySchema { } } - private static abstract class ComparableProperty extends PropertySchema { + private abstract static class ComparableProperty extends PropertySchema { protected ComparableProperty(String name) { super(name); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java index 0d1ec31434..8004ee15ab 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java +++ b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java @@ -132,7 +132,8 @@ public class PaloFe { // 1. HttpServer for HTTP Server // 2. FeServer for Thrift Server // 3. QeService for MySQL Server - QeService qeService = new QeService(Config.query_port, Config.mysql_service_nio_enabled, ExecuteEnv.getInstance().getScheduler()); + QeService qeService = new QeService(Config.query_port, Config.mysql_service_nio_enabled, + ExecuteEnv.getInstance().getScheduler()); FeServer feServer = new FeServer(Config.rpc_port); feServer.start(); @@ -324,7 +325,8 @@ public class PaloFe { } else if (cmdLineOpts.runImageTool()) { File imageFile = new File(cmdLineOpts.getImagePath()); if (!imageFile.exists()) { - System.out.println("image does not exist: " + imageFile.getAbsolutePath() + " . Please put an absolute path instead"); + System.out.println("image does not exist: " + imageFile.getAbsolutePath() + + " . Please put an absolute path instead"); System.exit(-1); } else { System.out.println("Start to load image: "); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java index 2154ab311c..a11537eb33 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java @@ -171,7 +171,8 @@ public class Alter { } Catalog.getCurrentCatalog().dropPartition(db, olapTable, ((DropPartitionClause) alterClause)); } else if (alterClause instanceof ReplacePartitionClause) { - Catalog.getCurrentCatalog().replaceTempPartition(db, olapTable, (ReplacePartitionClause) alterClause); + Catalog.getCurrentCatalog().replaceTempPartition( + db, olapTable, (ReplacePartitionClause) alterClause); } else if (alterClause instanceof ModifyPartitionClause) { ModifyPartitionClause clause = ((ModifyPartitionClause) alterClause); // expand the partition names if it is 'Modify Partition(*)' @@ -206,7 +207,8 @@ public class Alter { } else if (currentAlterOps.contains(AlterOpType.MODIFY_DISTRIBUTION)) { Preconditions.checkState(alterClauses.size() == 1); AlterClause alterClause = alterClauses.get(0); - Catalog.getCurrentCatalog().modifyDefaultDistributionBucketNum(db, olapTable, (ModifyDistributionClause) alterClause); + Catalog.getCurrentCatalog().modifyDefaultDistributionBucketNum( + db, olapTable, (ModifyDistributionClause) alterClause); } else if (currentAlterOps.contains(AlterOpType.MODIFY_COLUMN_COMMENT)) { processModifyColumnComment(db, olapTable, alterClauses); } else if (currentAlterOps.contains(AlterOpType.MODIFY_TABLE_COMMENT)) { @@ -227,7 +229,8 @@ public class Alter { ModifyTableCommentClause clause = (ModifyTableCommentClause) alterClause; tbl.setComment(clause.getComment()); // log - ModifyCommentOperationLog op = ModifyCommentOperationLog.forTable(db.getId(), tbl.getId(), clause.getComment()); + ModifyCommentOperationLog op = ModifyCommentOperationLog + .forTable(db.getId(), tbl.getId(), clause.getComment()); Catalog.getCurrentCatalog().getEditLog().logModifyComment(op); } finally { tbl.writeUnlock(); @@ -338,7 +341,8 @@ public class Alter { } } - private void processModifyEngineInternal(Database db, Table externalTable, Map prop, boolean isReplay) { + private void processModifyEngineInternal(Database db, Table externalTable, + Map prop, boolean isReplay) { MysqlTable mysqlTable = (MysqlTable) externalTable; Map newProp = Maps.newHashMap(prop); newProp.put(OdbcTable.ODBC_HOST, mysqlTable.getHost()); @@ -393,7 +397,8 @@ public class Alter { processAlterExternalTable(stmt, table, db); return; default: - throw new DdlException("Do not support alter " + table.getType().toString() + " table[" + tableName + "]"); + throw new DdlException("Do not support alter " + + table.getType().toString() + " table[" + tableName + "]"); } // the following ops should done outside table lock. because it contain synchronized create operation @@ -402,7 +407,8 @@ public class Alter { AlterClause alterClause = alterClauses.get(0); if (alterClause instanceof AddPartitionClause) { if (!((AddPartitionClause) alterClause).isTempPartition()) { - DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTableOrMetaException(tableName, TableType.OLAP)); + DynamicPartitionUtil.checkAlterAllowed( + (OlapTable) db.getTableOrMetaException(tableName, TableType.OLAP)); } Catalog.getCurrentCatalog().addPartition(db, tableName, (AddPartitionClause) alterClause); } else if (alterClause instanceof ModifyPartitionClause) { @@ -432,7 +438,8 @@ public class Alter { } // entry of processing replace table - private void processReplaceTable(Database db, OlapTable origTable, List alterClauses) throws UserException { + private void processReplaceTable(Database db, OlapTable origTable, List alterClauses) + throws UserException { ReplaceTableClause clause = (ReplaceTableClause) alterClauses.get(0); String newTblName = clause.getTblName(); boolean swapTable = clause.isSwapTable(); @@ -452,7 +459,8 @@ public class Alter { } replaceTableInternal(db, origTable, olapNewTbl, swapTable, false); // write edit log - ReplaceTableOperationLog log = new ReplaceTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId(), swapTable); + ReplaceTableOperationLog log = new ReplaceTableOperationLog(db.getId(), + origTable.getId(), olapNewTbl.getId(), swapTable); Catalog.getCurrentCatalog().getEditLog().logReplaceTable(log); LOG.info("finish replacing table {} with table {}, is swap: {}", oldTblName, newTblName, swapTable); } finally { @@ -533,7 +541,8 @@ public class Alter { modifyViewDef(db, view, stmt.getInlineViewDef(), ctx.getSessionVariable().getSqlMode(), stmt.getColumns()); } - private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode, List newFullSchema) throws DdlException { + private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode, + List newFullSchema) throws DdlException { db.writeLockOrDdlException(); try { view.writeLockOrDdlException(); @@ -549,7 +558,8 @@ public class Alter { db.dropTable(viewName); db.createTable(view); - AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(), inlineViewDef, newFullSchema, sqlMode); + AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(), + inlineViewDef, newFullSchema, sqlMode); Catalog.getCurrentCatalog().getEditLog().logModifyViewDef(alterViewInfo); LOG.info("modify view[{}] definition to {}", viewName, inlineViewDef); } finally { @@ -680,7 +690,8 @@ public class Alter { DateLiteral dateLiteral = new DateLiteral(dataProperty.getCooldownTimeMs(), TimeUtils.getTimeZone(), Type.DATETIME); newProperties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, dateLiteral.getStringValue()); - newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_RESOURCE, dataProperty.getRemoteStorageResourceName()); + newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_RESOURCE, + dataProperty.getRemoteStorageResourceName()); DateLiteral dateLiteral1 = new DateLiteral(dataProperty.getRemoteCooldownTimeMs(), TimeUtils.getTimeZone(), Type.DATETIME); newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_COOLDOWN_TIME, dateLiteral1.getStringValue()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java index 5205d418b3..347aa36747 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java @@ -116,7 +116,8 @@ public abstract class AlterHandler extends MasterDaemon { AlterJobV2 alterJobV2 = iterator.next().getValue(); if (alterJobV2.isExpire()) { iterator.remove(); - RemoveAlterJobV2OperationLog log = new RemoveAlterJobV2OperationLog(alterJobV2.getJobId(), alterJobV2.getType()); + RemoveAlterJobV2OperationLog log = new RemoveAlterJobV2OperationLog( + alterJobV2.getJobId(), alterJobV2.getType()); Catalog.getCurrentCatalog().getEditLog().logRemoveExpiredAlterJobV2(log); LOG.info("remove expired {} job {}. finish at {}", alterJobV2.getType(), alterJobV2.getJobId(), TimeUtils.longToTimeString(alterJobV2.getFinishedTimeMs())); @@ -169,7 +170,7 @@ public abstract class AlterHandler extends MasterDaemon { * entry function. handle alter ops for external table */ public void processExternalTable(List alterClauses, Database db, Table externalTable) - throws UserException {}; + throws UserException {} /* * cancel alter ops @@ -183,11 +184,13 @@ public abstract class AlterHandler extends MasterDaemon { * We assume that the specified version is X. * Case 1: * After alter table process starts, there is no new load job being submitted. So the new replica - * should be with version (0-1). So we just modify the replica's version to partition's visible version, which is X. + * should be with version (0-1). So we just modify the replica's version to + * partition's visible version, which is X. * Case 2: * After alter table process starts, there are some load job being processed. * Case 2.1: - * None of them succeed on this replica. so the version is still 1. We should modify the replica's version to X. + * None of them succeed on this replica. so the version is still 1. + * We should modify the replica's version to X. * Case 2.2 * There are new load jobs after alter task, and at least one of them is succeed on this replica. * So the replica's version should be larger than X. So we don't need to modify the replica version diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java index a69ad6266a..89dce3e169 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java @@ -43,9 +43,12 @@ import java.util.List; public abstract class AlterJobV2 implements Writable { private static final Logger LOG = LogManager.getLogger(AlterJobV2.class); + public enum JobState { PENDING, // Job is created + // CHECKSTYLE OFF WAITING_TXN, // New replicas are created and Shadow catalog object is visible for incoming txns, waiting for previous txns to be finished + // CHECKSTYLE ON RUNNING, // alter tasks are sent to BE, and waiting for them finished. FINISHED, // job is done CANCELLED; // job is cancelled(failed or be cancelled by user) @@ -175,7 +178,7 @@ public abstract class AlterJobV2 implements Writable { } } - public synchronized final boolean cancel(String errMsg) { + public final synchronized boolean cancel(String errMsg) { return cancelImpl(errMsg); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java index 5d5c346cf3..c60594326b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java @@ -59,8 +59,10 @@ public class AlterOperations { } public boolean hasPartitionOp() { - return currentOps.contains(AlterOpType.ADD_PARTITION) || currentOps.contains(AlterOpType.DROP_PARTITION) - || currentOps.contains(AlterOpType.REPLACE_PARTITION) || currentOps.contains(AlterOpType.MODIFY_PARTITION); + return currentOps.contains(AlterOpType.ADD_PARTITION) + || currentOps.contains(AlterOpType.DROP_PARTITION) + || currentOps.contains(AlterOpType.REPLACE_PARTITION) + || currentOps.contains(AlterOpType.MODIFY_PARTITION); } // MODIFY_TABLE_PROPERTY is also processed by SchemaChangeHandler @@ -103,6 +105,7 @@ public class AlterOperations { currentOps.add(opType); } + public boolean hasEnableFeatureOP() { return currentOps.contains(AlterOpType.ENABLE_FEATURE); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java index 66465e0aac..64319c4cfb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java @@ -92,7 +92,8 @@ public class MaterializedViewHandler extends AlterHandler { } // for batch submit rollup job, tableId -> jobId - // keep table's not final state job size. The job size determine's table's state, = 0 means table is normal, otherwise is rollup + // keep table's not final state job size. The job size determine's table's state, = 0 means table is normal, + // otherwise is rollup private Map> tableNotFinalStateJobMap = new ConcurrentHashMap<>(); // keep table's running job,used for concurrency limit // table id -> set of running job ids @@ -197,8 +198,9 @@ public class MaterializedViewHandler extends AlterHandler { List mvColumns = checkAndPrepareMaterializedView(addMVClause, olapTable); // Step2: create mv job - RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns, addMVClause - .getProperties(), olapTable, db, baseIndexId, addMVClause.getMVKeysType(), addMVClause.getOrigStmt()); + RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns, + addMVClause.getProperties(), olapTable, db, baseIndexId, + addMVClause.getMVKeysType(), addMVClause.getOrigStmt()); addAlterJobV2(rollupJobV2); @@ -223,7 +225,8 @@ public class MaterializedViewHandler extends AlterHandler { * @throws DdlException * @throws AnalysisException */ - public void processBatchAddRollup(List alterClauses, Database db, OlapTable olapTable) throws DdlException, AnalysisException { + public void processBatchAddRollup(List alterClauses, Database db, OlapTable olapTable) + throws DdlException, AnalysisException { Map rollupNameJobMap = new LinkedHashMap<>(); // save job id for log Set logJobIdSet = new HashSet<>(); @@ -265,11 +268,12 @@ public class MaterializedViewHandler extends AlterHandler { long baseIndexId = checkAndGetBaseIndex(baseIndexName, olapTable); // step 2.2 check rollup schema - List rollupSchema = checkAndPrepareMaterializedView(addRollupClause, olapTable, baseIndexId, changeStorageFormat); + List rollupSchema = checkAndPrepareMaterializedView( + addRollupClause, olapTable, baseIndexId, changeStorageFormat); // step 3 create rollup job - RollupJobV2 alterJobV2 = createMaterializedViewJob(rollupIndexName, baseIndexName, rollupSchema, addRollupClause.getProperties(), - olapTable, db, baseIndexId, olapTable.getKeysType(), null); + RollupJobV2 alterJobV2 = createMaterializedViewJob(rollupIndexName, baseIndexName, rollupSchema, + addRollupClause.getProperties(), olapTable, db, baseIndexId, olapTable.getKeysType(), null); rollupNameJobMap.put(addRollupClause.getRollupName(), alterJobV2); logJobIdSet.add(alterJobV2.getJobId()); @@ -319,10 +323,9 @@ public class MaterializedViewHandler extends AlterHandler { * @throws AnalysisException */ private RollupJobV2 createMaterializedViewJob(String mvName, String baseIndexName, - List mvColumns, Map properties, - OlapTable olapTable, Database db, long baseIndexId, KeysType mvKeysType, - OriginStatement origStmt) - throws DdlException, AnalysisException { + List mvColumns, Map properties, + OlapTable olapTable, Database db, long baseIndexId, KeysType mvKeysType, + OriginStatement origStmt) throws DdlException, AnalysisException { if (mvKeysType == null) { // assign rollup index's key type, same as base index's mvKeysType = olapTable.getKeysType(); @@ -384,11 +387,13 @@ public class MaterializedViewHandler extends AlterHandler { if (baseReplica.getState() == Replica.ReplicaState.CLONE || baseReplica.getState() == Replica.ReplicaState.DECOMMISSION || baseReplica.getLastFailedVersion() > 0) { - LOG.info("base replica {} of tablet {} state is {}, and last failed version is {}, skip creating rollup replica", - baseReplica.getId(), baseTabletId, baseReplica.getState(), baseReplica.getLastFailedVersion()); + LOG.info("base replica {} of tablet {} state is {}, and last failed version is {}," + + " skip creating rollup replica", baseReplica.getId(), baseTabletId, + baseReplica.getState(), baseReplica.getLastFailedVersion()); continue; } - Preconditions.checkState(baseReplica.getState() == Replica.ReplicaState.NORMAL, baseReplica.getState()); + Preconditions.checkState(baseReplica.getState() == Replica.ReplicaState.NORMAL, + baseReplica.getState()); // replica's init state is ALTER, so that tablet report process will ignore its report Replica mvReplica = new Replica(mvReplicaId, backendId, Replica.ReplicaState.ALTER, Partition.PARTITION_INIT_VERSION, mvSchemaHash); @@ -441,7 +446,8 @@ public class MaterializedViewHandler extends AlterHandler { int numOfKeys = 0; if (olapTable.getKeysType().isAggregationFamily()) { if (addMVClause.getMVKeysType() != KeysType.AGG_KEYS) { - throw new DdlException("The materialized view of aggregation or unique table must has grouping columns"); + throw new DdlException("The materialized view of aggregation" + + " or unique table must has grouping columns"); } for (MVColumnItem mvColumnItem : mvColumnItemList) { String mvColumnName = mvColumnItem.getName(); @@ -735,7 +741,8 @@ public class MaterializedViewHandler extends AlterHandler { long dbId = db.getId(); long tableId = olapTable.getId(); editLog.logBatchDropRollup(new BatchDropInfo(dbId, tableId, indexIdSet)); - LOG.info("finished drop rollup index[{}] in table[{}]", String.join("", rollupNameSet), olapTable.getName()); + LOG.info("finished drop rollup index[{}] in table[{}]", + String.join("", rollupNameSet), olapTable.getName()); } finally { olapTable.writeUnlock(); } @@ -998,7 +1005,8 @@ public class MaterializedViewHandler extends AlterHandler { continue; } if (ctx != null) { - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(), + alterJob.getTableName(), PrivPredicate.ALTER)) { continue; } } @@ -1041,7 +1049,8 @@ public class MaterializedViewHandler extends AlterHandler { } olapTable.writeLock(); try { - if (olapTable.getState() != OlapTableState.ROLLUP && olapTable.getState() != OlapTableState.WAITING_STABLE) { + if (olapTable.getState() != OlapTableState.ROLLUP + && olapTable.getState() != OlapTableState.WAITING_STABLE) { throw new DdlException("Table[" + tableName + "] is not under ROLLUP. " + "Use 'ALTER TABLE DROP ROLLUP' if you want to."); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java index a05863d9ea..ba707233a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java @@ -157,7 +157,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { } public void addTabletIdMap(long partitionId, long rollupTabletId, long baseTabletId) { - Map tabletIdMap = partitionIdToBaseRollupTabletIdMap.computeIfAbsent(partitionId, k -> Maps.newHashMap()); + Map tabletIdMap = partitionIdToBaseRollupTabletIdMap + .computeIfAbsent(partitionId, k -> Maps.newHashMap()); tabletIdMap.put(rollupTabletId, baseTabletId); } @@ -181,7 +182,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { Preconditions.checkState(jobState == JobState.PENDING, jobState); LOG.info("begin to send create rollup replica tasks. job: {}", jobId); - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, + s -> new AlterCancelException("Database " + s + " does not exist")); if (!checkTableStable(db)) { return; } @@ -291,7 +293,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { tbl.writeUnlock(); } - this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); + this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionIDGenerator().getNextTransactionId(); this.jobState = JobState.WAITING_TXN; // write edit log @@ -333,7 +336,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { } LOG.info("previous transactions are all finished, begin to send rollup tasks. job: {}", jobId); - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Databasee " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, + s -> new AlterCancelException("Databasee " + s + " does not exist")); OlapTable tbl; try { @@ -412,7 +416,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { // must check if db or table still exist first. // or if table is dropped, the tasks will never be finished, // and the job will be in RUNNING state forever. - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Databasee " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, + s -> new AlterCancelException("Databasee " + s + " does not exist")); OlapTable tbl; try { @@ -447,7 +452,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { } long visiableVersion = partition.getVisibleVersion(); - short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation(partitionId).getTotalReplicaNum(); + short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation( + partitionId).getTotalReplicaNum(); MaterializedIndex rollupIndex = entry.getValue(); @@ -464,7 +470,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { if (healthyReplicaNum < expectReplicationNum / 2 + 1) { LOG.warn("rollup tablet {} has few healthy replicas: {}, rollup job: {}", rollupTablet.getId(), replicas, jobId); - throw new AlterCancelException("rollup tablet " + rollupTablet.getId() + " has few healthy replicas"); + throw new AlterCancelException("rollup tablet " + rollupTablet.getId() + + " has few healthy replicas"); } } // end for tablets } // end for partitions @@ -544,7 +551,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { // Check whether transactions of the given database which txnId is less than 'watershedTxnId' are finished. protected boolean isPreviousLoadFinished() throws AnalysisException { - return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); + return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished( + watershedTxnId, dbId, Lists.newArrayList(tableId)); } /** @@ -745,7 +753,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { return; } // parse the define stmt to schema - SqlParser parser = new SqlParser(new SqlScanner(new StringReader(origStmt.originStmt), SqlModeHelper.MODE_DEFAULT)); + SqlParser parser = new SqlParser(new SqlScanner( + new StringReader(origStmt.originStmt), SqlModeHelper.MODE_DEFAULT)); ConnectContext connectContext = new ConnectContext(); Database db; try { @@ -755,7 +764,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { } String clusterName = db.getClusterName(); // It's almost impossible that db's cluster name is null, just in case - // because before user want to create database, he must first enter a cluster which means that cluster is set to current ConnectContext + // because before user want to create database, he must first enter a cluster + // which means that cluster is set to current ConnectContext // then when createDBStmt is executed, cluster name is set to Database if (clusterName == null || clusterName.length() == 0) { clusterName = SystemInfoService.DEFAULT_CLUSTER; diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index 1ffa5b8607..6329834d59 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -111,7 +111,8 @@ public class SchemaChangeHandler extends AlterHandler { public static final int CYCLE_COUNT_TO_CHECK_EXPIRE_SCHEMA_CHANGE_JOB = 20; - public final ThreadPoolExecutor schemaChangeThreadPool = ThreadPoolManager.newDaemonCacheThreadPool(MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE, "schema-change-pool", true); + public final ThreadPoolExecutor schemaChangeThreadPool = ThreadPoolManager.newDaemonCacheThreadPool( + MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE, "schema-change-pool", true); public final Map activeSchemaChangeJobsV2 = Maps.newConcurrentMap(); @@ -144,7 +145,8 @@ public class SchemaChangeHandler extends AlterHandler { indexSchemaMap, newColNameSet); } - private void processAddColumn(AddColumnClause alterClause, Table externalTable, List newSchema) throws DdlException { + private void processAddColumn(AddColumnClause alterClause, + Table externalTable, List newSchema) throws DdlException { Column column = alterClause.getColumn(); ColumnPosition columnPos = alterClause.getColPos(); Set newColNameSet = Sets.newHashSet(column.getName()); @@ -152,7 +154,8 @@ public class SchemaChangeHandler extends AlterHandler { addColumnInternal(column, columnPos, newSchema, newColNameSet); } - private void processAddColumns(AddColumnsClause alterClause, Table externalTable, List newSchema) throws DdlException { + private void processAddColumns(AddColumnsClause alterClause, + Table externalTable, List newSchema) throws DdlException { List columns = alterClause.getColumns(); Set newColNameSet = Sets.newHashSet(); for (Column column : alterClause.getColumns()) { @@ -190,7 +193,8 @@ public class SchemaChangeHandler extends AlterHandler { } } - private void processDropColumn(DropColumnClause alterClause, Table externalTable, List newSchema) throws DdlException { + private void processDropColumn(DropColumnClause alterClause, + Table externalTable, List newSchema) throws DdlException { String dropColName = alterClause.getColName(); // find column in base index and remove it @@ -216,7 +220,7 @@ public class SchemaChangeHandler extends AlterHandler { } private void processDropColumn(DropColumnClause alterClause, OlapTable olapTable, - Map> indexSchemaMap, List indexes) throws DdlException { + Map> indexSchemaMap, List indexes) throws DdlException { String dropColName = alterClause.getColName(); String targetIndexName = alterClause.getRollupName(); checkIndexExists(olapTable, targetIndexName); @@ -261,7 +265,8 @@ public class SchemaChangeHandler extends AlterHandler { } } if (isKey && hasReplaceColumn) { - throw new DdlException("Can not drop key column when table has value column with REPLACE aggregation method"); + throw new DdlException( + "Can not drop key column when table has value column with REPLACE aggregation method"); } } else { // drop column in rollup and base index @@ -279,7 +284,8 @@ public class SchemaChangeHandler extends AlterHandler { } } if (isKey && hasReplaceColumn) { - throw new DdlException("Can not drop key column when rollup has value column with REPLACE aggregation metho"); + throw new DdlException( + "Can not drop key column when rollup has value column with REPLACE aggregation method"); } } } @@ -352,7 +358,8 @@ public class SchemaChangeHandler extends AlterHandler { } // User can modify column type and column position - private void processModifyColumn(ModifyColumnClause alterClause, Table externalTable, List newSchema) throws DdlException { + private void processModifyColumn(ModifyColumnClause alterClause, + Table externalTable, List newSchema) throws DdlException { Column modColumn = alterClause.getColumn(); ColumnPosition columnPos = alterClause.getColPos(); @@ -430,14 +437,16 @@ public class SchemaChangeHandler extends AlterHandler { } } else if (KeysType.UNIQUE_KEYS == olapTable.getKeysType()) { if (null != modColumn.getAggregationType()) { - throw new DdlException("Can not assign aggregation method on column in Unique data model table: " + modColumn.getName()); + throw new DdlException("Can not assign aggregation method" + + " on column in Unique data model table: " + modColumn.getName()); } if (!modColumn.isKey()) { modColumn.setAggregationType(AggregateType.REPLACE, true); } } else { if (null != modColumn.getAggregationType()) { - throw new DdlException("Can not assign aggregation method on column in Duplicate data model table: " + modColumn.getName()); + throw new DdlException("Can not assign aggregation method" + + " on column in Duplicate data model table: " + modColumn.getName()); } if (!modColumn.isKey()) { modColumn.setAggregationType(AggregateType.NONE, true); @@ -604,7 +613,8 @@ public class SchemaChangeHandler extends AlterHandler { } } - private void processReorderColumn(ReorderColumnsClause alterClause, Table externalTable, List newSchema) throws DdlException { + private void processReorderColumn(ReorderColumnsClause alterClause, + Table externalTable, List newSchema) throws DdlException { List orderedColNames = alterClause.getColumnsByPos(); newSchema.clear(); @@ -763,18 +773,21 @@ public class SchemaChangeHandler extends AlterHandler { newColumn.setIsKey(true); } else if (newColumn.getAggregationType() == AggregateType.SUM && newColumn.getDefaultValue() != null && !newColumn.getDefaultValue().equals("0")) { - throw new DdlException("The default value of '" + newColName + "' with SUM aggregation function must be zero"); + throw new DdlException("The default value of '" + + newColName + "' with SUM aggregation function must be zero"); } } else if (KeysType.UNIQUE_KEYS == olapTable.getKeysType()) { if (newColumn.getAggregationType() != null) { - throw new DdlException("Can not assign aggregation method on column in Unique data model table: " + newColName); + throw new DdlException("Can not assign aggregation method" + + " on column in Unique data model table: " + newColName); } if (!newColumn.isKey()) { newColumn.setAggregationType(AggregateType.REPLACE, true); } } else { if (newColumn.getAggregationType() != null) { - throw new DdlException("Can not assign aggregation method on column in Duplicate data model table: " + newColName); + throw new DdlException("Can not assign aggregation method" + + " on column in Duplicate data model table: " + newColName); } if (!newColumn.isKey()) { if (targetIndexId != -1L @@ -790,7 +803,8 @@ public class SchemaChangeHandler extends AlterHandler { throw new DdlException("HLL type column can only be in Aggregation data model table: " + newColName); } - if (newColumn.getAggregationType() == AggregateType.BITMAP_UNION && KeysType.AGG_KEYS != olapTable.getKeysType()) { + if (newColumn.getAggregationType() == AggregateType.BITMAP_UNION + && KeysType.AGG_KEYS != olapTable.getKeysType()) { throw new DdlException("BITMAP_UNION must be used in AGG_KEYS"); } @@ -1095,7 +1109,8 @@ public class SchemaChangeHandler extends AlterHandler { // create job Catalog catalog = Catalog.getCurrentCatalog(); long jobId = catalog.getNextId(); - SchemaChangeJobV2 schemaChangeJob = new SchemaChangeJobV2(jobId, dbId, olapTable.getId(), olapTable.getName(), timeoutSecond * 1000); + SchemaChangeJobV2 schemaChangeJob = new SchemaChangeJobV2(jobId, dbId, + olapTable.getId(), olapTable.getName(), timeoutSecond * 1000); schemaChangeJob.setBloomFilterInfo(hasBfChange, bfColumns, bfFpp); schemaChangeJob.setAlterIndexInfo(hasIndexChange, indexes); @@ -1288,7 +1303,8 @@ public class SchemaChangeHandler extends AlterHandler { /* * Create schema change job - * 1. For each index which has been changed, create a SHADOW index, and save the mapping of origin index to SHADOW index. + * 1. For each index which has been changed, create a SHADOW index, + * and save the mapping of origin index to SHADOW index. * 2. Create all tablets and replicas of all SHADOW index, add them to tablet inverted index. * 3. Change table's state as SCHEMA_CHANGE */ @@ -1316,7 +1332,8 @@ public class SchemaChangeHandler extends AlterHandler { // index state is SHADOW MaterializedIndex shadowIndex = new MaterializedIndex(shadowIndexId, IndexState.SHADOW); MaterializedIndex originIndex = partition.getIndex(originIndexId); - TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId, shadowIndexId, newSchemaHash, medium); + TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId, + shadowIndexId, newSchemaHash, medium); ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partitionId); Short totalReplicaNum = replicaAlloc.getTotalReplicaNum(); for (Tablet originTablet : originIndex.getTablets()) { @@ -1338,11 +1355,14 @@ public class SchemaChangeHandler extends AlterHandler { if (originReplica.getState() == Replica.ReplicaState.CLONE || originReplica.getState() == Replica.ReplicaState.DECOMMISSION || originReplica.getLastFailedVersion() > 0) { - LOG.info("origin replica {} of tablet {} state is {}, and last failed version is {}, skip creating shadow replica", - originReplica.getId(), originReplica, originReplica.getState(), originReplica.getLastFailedVersion()); + LOG.info("origin replica {} of tablet {} state is {}," + + " and last failed version is {}, skip creating shadow replica", + originReplica.getId(), originReplica, originReplica.getState(), + originReplica.getLastFailedVersion()); continue; } - Preconditions.checkState(originReplica.getState() == ReplicaState.NORMAL, originReplica.getState()); + Preconditions.checkState(originReplica.getState() == ReplicaState.NORMAL, + originReplica.getState()); // replica's init state is ALTER, so that tablet report process will ignore its report Replica shadowReplica = new Replica(shadowReplicaId, backendId, ReplicaState.ALTER, Partition.PARTITION_INIT_VERSION, newSchemaHash); @@ -1353,8 +1373,8 @@ public class SchemaChangeHandler extends AlterHandler { if (healthyReplicaNum < totalReplicaNum / 2 + 1) { /* * TODO(cmy): This is a bad design. - * Because in the schema change job, we will only send tasks to the shadow replicas that have been created, - * without checking whether the quorum of replica number are satisfied. + * Because in the schema change job, we will only send tasks to the shadow replicas + * that have been created, without checking whether the quorum of replica number are satisfied. * This will cause the job to fail until we find that the quorum of replica number * is not satisfied until the entire job is done. * So here we check the replica number strictly and do not allow to submit the job @@ -1370,7 +1390,8 @@ public class SchemaChangeHandler extends AlterHandler { schemaChangeJob.addPartitionShadowIndex(partitionId, shadowIndexId, shadowIndex); } // end for partition - schemaChangeJob.addIndexSchema(shadowIndexId, originIndexId, newIndexName, newSchemaVersion, newSchemaHash, newShortKeyColumnCount, entry.getValue()); + schemaChangeJob.addIndexSchema(shadowIndexId, originIndexId, newIndexName, + newSchemaVersion, newSchemaHash, newShortKeyColumnCount, entry.getValue()); } // end for index // set table state @@ -1428,14 +1449,16 @@ public class SchemaChangeHandler extends AlterHandler { return schemaChangeJobInfos; } - private void getAlterJobV2Infos(Database db, List alterJobsV2, List> schemaChangeJobInfos) { + private void getAlterJobV2Infos(Database db, List alterJobsV2, + List> schemaChangeJobInfos) { ConnectContext ctx = ConnectContext.get(); for (AlterJobV2 alterJob : alterJobsV2) { if (alterJob.getDbId() != db.getId()) { continue; } if (ctx != null) { - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) { continue; } } @@ -1487,9 +1510,11 @@ public class SchemaChangeHandler extends AlterHandler { } else if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(properties)) { if (!olapTable.dynamicPartitionExists()) { try { - DynamicPartitionUtil.checkInputDynamicPartitionProperties(properties, olapTable.getPartitionInfo()); + DynamicPartitionUtil.checkInputDynamicPartitionProperties( + properties, olapTable.getPartitionInfo()); } catch (DdlException e) { - // This table is not a dynamic partition table and didn't supply all dynamic partition properties + // This table is not a dynamic partition table + // and didn't supply all dynamic partition properties throw new DdlException("Table " + db.getFullName() + "." + olapTable.getName() + " is not a dynamic partition table." + " Use command `HELP ALTER TABLE` " @@ -1498,8 +1523,10 @@ public class SchemaChangeHandler extends AlterHandler { } Catalog.getCurrentCatalog().modifyTableDynamicPartition(db, olapTable, properties); return; - } else if (properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) { - Preconditions.checkNotNull(properties.get("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)); + } else if (properties.containsKey( + "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) { + Preconditions.checkNotNull(properties.get("default." + + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)); Catalog.getCurrentCatalog().modifyTableDefaultReplicaAllocation(db, olapTable, properties); return; } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) { @@ -1615,7 +1642,8 @@ public class SchemaChangeHandler extends AlterHandler { /** * Update all partitions' in-memory property of table */ - public void updateTableInMemoryMeta(Database db, String tableName, Map properties) throws UserException { + public void updateTableInMemoryMeta(Database db, String tableName, Map properties) + throws UserException { List partitions = Lists.newArrayList(); OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP); olapTable.readLock(); @@ -1646,9 +1674,8 @@ public class SchemaChangeHandler extends AlterHandler { * Update some specified partitions' in-memory property of table */ public void updatePartitionsInMemoryMeta(Database db, - String tableName, - List partitionNames, - Map properties) throws DdlException, MetaNotFoundException { + String tableName, List partitionNames, Map properties) + throws DdlException, MetaNotFoundException { OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP); boolean isInMemory = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY)); if (isInMemory == olapTable.isInMemory()) { @@ -1735,7 +1762,8 @@ public class SchemaChangeHandler extends AlterHandler { } else { List>>> unfinishedMarks = countDownLatch.getLeftMarks(); // only show at most 3 results - List>>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3)); + List>>> subList + = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3)); if (!subList.isEmpty()) { errMsg += " Unfinished mark: " + Joiner.on(", ").join(subList); } @@ -1770,9 +1798,11 @@ public class SchemaChangeHandler extends AlterHandler { // find from new alter jobs first List schemaChangeJobV2List = getUnfinishedAlterJobV2ByTableId(olapTable.getId()); // current schemaChangeJob job doesn't support batch operation,so just need to get one job - schemaChangeJobV2 = schemaChangeJobV2List.size() == 0 ? null : Iterables.getOnlyElement(schemaChangeJobV2List); + schemaChangeJobV2 = schemaChangeJobV2List.size() == 0 + ? null : Iterables.getOnlyElement(schemaChangeJobV2List); if (schemaChangeJobV2 == null) { - throw new DdlException("Table[" + tableName + "] is under schema change state but could not find related job"); + throw new DdlException("Table[" + tableName + "] is under schema change state" + + " but could not find related job"); } } finally { olapTable.writeUnlock(); @@ -1804,7 +1834,8 @@ public class SchemaChangeHandler extends AlterHandler { for (Index existedIdx : existedIndexes) { if (existedIdx.getIndexName().equalsIgnoreCase(indexDef.getIndexName())) { if (indexDef.isSetIfNotExists()) { - LOG.info("create index[{}] which already exists on table[{}]", indexDef.getIndexName(), olapTable.getName()); + LOG.info("create index[{}] which already exists on table[{}]", + indexDef.getIndexName(), olapTable.getName()); return true; } throw new DdlException("index `" + indexDef.getIndexName() + "` already exist."); @@ -1834,7 +1865,8 @@ public class SchemaChangeHandler extends AlterHandler { * Returns true if the index does not exist, there is no need to create the job to drop the index. * Otherwise return false, there is need to create a job to drop the index. */ - private boolean processDropIndex(DropIndexClause alterClause, OlapTable olapTable, List indexes) throws DdlException { + private boolean processDropIndex(DropIndexClause alterClause, OlapTable olapTable, + List indexes) throws DdlException { String indexName = alterClause.getIndexName(); List existedIndexes = olapTable.getIndexes(); Index found = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java index a95bdede1d..cf1dc977a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java @@ -198,7 +198,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { protected void runPendingJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.PENDING, jobState); LOG.info("begin to send create replica tasks. job: {}", jobId); - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog() + .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); if (!checkTableStable(db)) { return; @@ -261,7 +262,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { tbl.isInMemory(), tbl.getPartitionInfo().getTabletType(partitionId), tbl.getCompressionType()); - createReplicaTask.setBaseTablet(partitionIndexTabletMap.get(partitionId, shadowIdxId).get(shadowTabletId), originSchemaHash); + createReplicaTask.setBaseTablet(partitionIndexTabletMap.get(partitionId, shadowIdxId) + .get(shadowTabletId), originSchemaHash); if (this.storageFormat != null) { createReplicaTask.setStorageFormat(this.storageFormat); } @@ -317,12 +319,14 @@ public class SchemaChangeJobV2 extends AlterJobV2 { tbl.writeUnlock(); } - this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); + this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionIDGenerator().getNextTransactionId(); this.jobState = JobState.WAITING_TXN; // write edit log Catalog.getCurrentCatalog().getEditLog().logAlterJob(this); - LOG.info("transfer schema change job {} state to {}, watershed txn id: {}", jobId, this.jobState, watershedTxnId); + LOG.info("transfer schema change job {} state to {}, watershed txn id: {}", + jobId, this.jobState, watershedTxnId); } private void addShadowIndexToCatalog(OlapTable tbl) { @@ -369,7 +373,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { } LOG.info("previous transactions are all finished, begin to send schema change tasks. job: {}", jobId); - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog() + .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); OlapTable tbl; try { @@ -473,7 +478,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { // must check if db or table still exist first. // or if table is dropped, the tasks will never be finished, // and the job will be in RUNNING state forever. - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); + Database db = Catalog.getCurrentCatalog() + .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); OlapTable tbl; try { @@ -487,7 +493,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { List tasks = schemaChangeBatchTask.getUnfinishedTasks(2000); for (AgentTask task : tasks) { if (task.getFailedTimes() >= 3) { - throw new AlterCancelException("schema change task failed after try three times: " + task.getErrorMsg()); + throw new AlterCancelException("schema change task failed after try three times: " + + task.getErrorMsg()); } } return; @@ -507,7 +514,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { Preconditions.checkNotNull(partition, partitionId); long visiableVersion = partition.getVisibleVersion(); - short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short expectReplicationNum = tbl.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); Map shadowIndexMap = partitionIndexMap.row(partitionId); for (Map.Entry entry : shadowIndexMap.entrySet()) { @@ -680,7 +688,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 { // Check whether transactions of the given database which txnId is less than 'watershedTxnId' are finished. protected boolean isPreviousLoadFinished() throws AnalysisException { - return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); + return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished( + watershedTxnId, dbId, Lists.newArrayList(tableId)); } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java index 44cd210c78..b0aa25a6fe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java @@ -117,7 +117,8 @@ public class SystemHandler extends AlterHandler { AddBackendClause addBackendClause = (AddBackendClause) alterClause; final String destClusterName = addBackendClause.getDestCluster(); - if ((!Strings.isNullOrEmpty(destClusterName) || addBackendClause.isFree()) && Config.disable_cluster_feature) { + if ((!Strings.isNullOrEmpty(destClusterName) || addBackendClause.isFree()) + && Config.disable_cluster_feature) { ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "ADD BACKEND TO CLUSTER"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java index 2bf8a48fd5..8a602921a4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java @@ -36,8 +36,8 @@ import java.util.Map; public class AbstractBackupStmt extends DdlStmt { private static final Logger LOG = LogManager.getLogger(AbstractBackupStmt.class); - private final static String PROP_TIMEOUT = "timeout"; - private final static long MIN_TIMEOUT_MS = 600 * 1000L; // 10 min + private static final String PROP_TIMEOUT = "timeout"; + private static final long MIN_TIMEOUT_MS = 600 * 1000L; // 10 min protected LabelName labelName; protected String repoName; @@ -46,8 +46,9 @@ public class AbstractBackupStmt extends DdlStmt { protected long timeoutMs; - public AbstractBackupStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause abstractBackupTableRefClause, - Map properties) { + public AbstractBackupStmt(LabelName labelName, String repoName, + AbstractBackupTableRefClause abstractBackupTableRefClause, + Map properties) { this.labelName = labelName; this.repoName = repoName; this.abstractBackupTableRefClause = abstractBackupTableRefClause; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java index f9006f4c33..818032ccfd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java @@ -39,7 +39,7 @@ public class AdminCancelRebalanceDiskStmt extends DdlStmt { ImmutableMap backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend(); Map backendsID = new HashMap(); for (Backend backend : backendsInfo.values()) { - backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId()); + backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId()); } if (backends == null) { for (Backend backend : backendsInfo.values()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java index 6a1e716102..b0a0b8caa4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java @@ -39,7 +39,7 @@ public class AdminCleanTrashStmt extends DdlStmt { ImmutableMap backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend(); Map backendsID = new HashMap(); for (Backend backend : backendsInfo.values()) { - backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId()); + backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId()); } if (backends == null) { for (Backend backend : backendsInfo.values()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java index 1cd448a0fd..d65ad0acff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java @@ -83,7 +83,8 @@ public class AdminCompactTableStmt extends DdlStmt { // analyze where clause if not null if (where == null) { - throw new AnalysisException("Compaction type must be specified in Where clause like: type = 'BASE/CUMULATIVE'"); + throw new AnalysisException("Compaction type must be specified in" + + " Where clause like: type = 'BASE/CUMULATIVE'"); } if (!analyzeWhere()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java index cbda427a66..f99c0126bb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java @@ -40,12 +40,10 @@ public class AdminRebalanceDiskStmt extends DdlStmt { ImmutableMap backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend(); Map backendsID = new HashMap(); for (Backend backend : backendsInfo.values()) { - backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId()); + backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId()); } if (backends == null) { - for (Backend backend : backendsInfo.values()) { - this.backends.add(backend); - } + this.backends.addAll(backendsInfo.values()); } else { for (String backend : backends) { if (backendsID.get(backend) != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java index 298837d11e..28084d44ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java @@ -70,7 +70,7 @@ import java.util.List; * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1. */ public final class AggregateInfo extends AggregateInfoBase { - private final static Logger LOG = LogManager.getLogger(AggregateInfo.class); + private static final Logger LOG = LogManager.getLogger(AggregateInfo.class); public enum AggPhase { FIRST, @@ -81,7 +81,7 @@ public final class AggregateInfo extends AggregateInfoBase { public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; } - }; + } // created by createMergeAggInfo() private AggregateInfo mergeAggInfo; @@ -173,7 +173,7 @@ public final class AggregateInfo extends AggregateInfoBase { * If an aggTupleDesc is created, also registers eq predicates between the * grouping exprs and their respective slots with 'analyzer'. */ - static public AggregateInfo create( + public static AggregateInfo create( ArrayList groupingExprs, ArrayList aggExprs, TupleDescriptor tupleDesc, Analyzer analyzer) throws AnalysisException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java index b44dc45589..f0298afff6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java @@ -38,7 +38,7 @@ import java.util.List; * tuple descriptors as well as their smaps for evaluating aggregate functions. */ public abstract class AggregateInfoBase { - private final static Logger LOG = + private static final Logger LOG = LoggerFactory.getLogger(AggregateInfoBase.class); // For aggregations: All unique grouping expressions from a select block. @@ -248,7 +248,7 @@ public abstract class AggregateInfoBase { if (intermediateType != null) { return true; } - if (noGrouping && ((AggregateFunction) aggExpr.fn).getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT)) { + if (noGrouping && aggExpr.fn.getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT)) { return true; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java index 6afb62abff..12ff07d99a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java @@ -43,7 +43,7 @@ class AggregateParamsList { isDistinct = false; } - static public AggregateParamsList createStarParam() { + public static AggregateParamsList createStarParam() { return new AggregateParamsList(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java index b5627c2565..8a21d18fcd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java @@ -67,8 +67,8 @@ public class AlterColumnStatsStmt extends DdlStmt { throw new AnalysisException(optional.get() + " is invalid statistic"); } // check auth - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), - PrivPredicate.ALTER)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER COLUMN STATS", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java index daacdadb91..d9ce04fdab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java @@ -63,7 +63,8 @@ public class AlterDatabaseQuotaStmt extends DdlStmt { super.analyze(analyzer); if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, + analyzer.getQualifiedUser(), dbName); } if (Strings.isNullOrEmpty(dbName)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java index b7606a3e1a..ff9ed52e8f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java @@ -57,10 +57,9 @@ public class AlterDatabaseRename extends DdlStmt { } if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, - PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, - PaloPrivilege.ALTER_PRIV), - Operator.OR))) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, PaloPrivilege.ALTER_PRIV), Operator.OR))) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, + analyzer.getQualifiedUser(), dbName); } if (Strings.isNullOrEmpty(newDbName)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java index 0bd3f6f438..4656d81f2d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java @@ -174,32 +174,35 @@ public class AlterRoutineLoadStmt extends DdlStmt { } if (jobProperties.containsKey(CreateRoutineLoadStmt.JSONPATHS)) { - analyzedJobProperties.put(CreateRoutineLoadStmt.JSONPATHS, jobProperties.get(CreateRoutineLoadStmt.JSONPATHS)); + analyzedJobProperties.put(CreateRoutineLoadStmt.JSONPATHS, + jobProperties.get(CreateRoutineLoadStmt.JSONPATHS)); } if (jobProperties.containsKey(CreateRoutineLoadStmt.JSONROOT)) { - analyzedJobProperties.put(CreateRoutineLoadStmt.JSONROOT, jobProperties.get(CreateRoutineLoadStmt.JSONROOT)); + analyzedJobProperties.put(CreateRoutineLoadStmt.JSONROOT, + jobProperties.get(CreateRoutineLoadStmt.JSONROOT)); } if (jobProperties.containsKey(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY)) { - boolean stripOuterArray = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY)); + boolean stripOuterArray = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY)); analyzedJobProperties.put(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY, String.valueOf(stripOuterArray)); } if (jobProperties.containsKey(CreateRoutineLoadStmt.NUM_AS_STRING)) { - boolean numAsString = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.NUM_AS_STRING)); + boolean numAsString = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.NUM_AS_STRING)); analyzedJobProperties.put(CreateRoutineLoadStmt.NUM_AS_STRING, String.valueOf(numAsString)); } if (jobProperties.containsKey(CreateRoutineLoadStmt.FUZZY_PARSE)) { - boolean fuzzyParse = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.FUZZY_PARSE)); + boolean fuzzyParse = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.FUZZY_PARSE)); analyzedJobProperties.put(CreateRoutineLoadStmt.FUZZY_PARSE, String.valueOf(fuzzyParse)); } } private void checkDataSourceProperties() throws UserException { if (!FeConstants.runningUnitTest) { - RoutineLoadJob job = Catalog.getCurrentCatalog().getRoutineLoadManager().checkPrivAndGetJob(getDbName(), getLabel()); + RoutineLoadJob job = Catalog.getCurrentCatalog().getRoutineLoadManager() + .checkPrivAndGetJob(getDbName(), getLabel()); dataSourceProperties.setTimezone(job.getTimezone()); } else { dataSourceProperties.setTimezone(TimeUtils.DEFAULT_TIME_ZONE); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java index cc2a13c228..a51e20d98a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java @@ -73,16 +73,21 @@ public class AlterSqlBlockRuleStmt extends DdlStmt { private void setProperties(Map properties) throws AnalysisException { this.sql = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_PROPERTY, CreateSqlBlockRuleStmt.STRING_NOT_SET); - this.sqlHash = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_HASH_PROPERTY, CreateSqlBlockRuleStmt.STRING_NOT_SET); + this.sqlHash = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_HASH_PROPERTY, + CreateSqlBlockRuleStmt.STRING_NOT_SET); String partitionNumString = properties.get(CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM); String tabletNumString = properties.get(CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM); String cardinalityString = properties.get(CreateSqlBlockRuleStmt.SCANNED_CARDINALITY); SqlBlockUtil.checkSqlAndSqlHashSetBoth(sql, sqlHash); - SqlBlockUtil.checkSqlAndLimitationsSetBoth(sql, sqlHash, partitionNumString, tabletNumString, cardinalityString); - this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM + " should be a long"); - this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM + " should be a long"); - this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_CARDINALITY + " should be a long"); + SqlBlockUtil.checkSqlAndLimitationsSetBoth(sql, sqlHash, + partitionNumString, tabletNumString, cardinalityString); + this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, LONG_NOT_SET, null, + CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM + " should be a long"); + this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, LONG_NOT_SET, null, + CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM + " should be a long"); + this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, LONG_NOT_SET, null, + CreateSqlBlockRuleStmt.SCANNED_CARDINALITY + " should be a long"); // allow null, represents no modification String globalStr = properties.get(CreateSqlBlockRuleStmt.GLOBAL_PROPERTY); this.global = StringUtils.isNotEmpty(globalStr) ? Boolean.parseBoolean(globalStr) : null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java index 42661b7bdd..b25f7c2897 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java @@ -61,8 +61,8 @@ public class AlterTableStatsStmt extends DdlStmt { throw new AnalysisException(optional.get() + " is invalid statistic"); } // check auth - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), - PrivPredicate.ALTER)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER TABLE STATS", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java index 1085197b12..1734dcc0c7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java @@ -87,10 +87,12 @@ public class AlterTableStmt extends DdlStmt { if (alterFeature == null || alterFeature == EnableFeatureClause.Features.UNKNOWN) { throw new AnalysisException("unknown feature for alter clause"); } - if (table.getKeysType() != KeysType.UNIQUE_KEYS && alterFeature == EnableFeatureClause.Features.BATCH_DELETE) { + if (table.getKeysType() != KeysType.UNIQUE_KEYS + && alterFeature == EnableFeatureClause.Features.BATCH_DELETE) { throw new AnalysisException("Batch delete only supported in unique tables."); } - if (table.getKeysType() != KeysType.UNIQUE_KEYS && alterFeature == EnableFeatureClause.Features.SEQUENCE_LOAD) { + if (table.getKeysType() != KeysType.UNIQUE_KEYS + && alterFeature == EnableFeatureClause.Features.SEQUENCE_LOAD) { throw new AnalysisException("Sequence load only supported in unique tables."); } // analyse sequence column diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java index 124b7cc850..93a55b9908 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java @@ -51,11 +51,12 @@ public class AlterViewStmt extends BaseViewStmt { Table table = analyzer.getTableOrAnalysisException(tableName); if (!(table instanceof View)) { - throw new AnalysisException(String.format("ALTER VIEW not allowed on a table:%s.%s", getDbName(), getTable())); + throw new AnalysisException(String.format("ALTER VIEW not allowed on a table:%s.%s", + getDbName(), getTable())); } - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), - PrivPredicate.ALTER)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER VIEW", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java index b0147dff55..a241da5db3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java @@ -62,7 +62,7 @@ import java.util.Objects; * and need to be substituted as such; example: COUNT(COUNT(..)) OVER (..) */ public class AnalyticExpr extends Expr { - private final static Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class); + private static final Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class); private static String NTILE = "NTILE"; private FunctionCallExpr fnCall; @@ -134,12 +134,15 @@ public class AnalyticExpr extends Expr { public FunctionCallExpr getFnCall() { return fnCall; } + public List getPartitionExprs() { return partitionExprs; } + public List getOrderByElements() { return orderByElements; } + public AnalyticWindow getWindow() { return window; } @@ -210,11 +213,10 @@ public class AnalyticExpr extends Expr { || fn.functionName().equalsIgnoreCase(MAX) || fn.functionName().equalsIgnoreCase(COUNT)) { return true; } - return false; } - static private boolean isOffsetFn(Function fn) { + private static boolean isOffsetFn(Function fn) { if (!isAnalyticFn(fn)) { return false; } @@ -222,7 +224,7 @@ public class AnalyticExpr extends Expr { return fn.functionName().equalsIgnoreCase(LEAD) || fn.functionName().equalsIgnoreCase(LAG); } - static private boolean isMinMax(Function fn) { + private static boolean isMinMax(Function fn) { if (!isAnalyticFn(fn)) { return false; } @@ -230,7 +232,7 @@ public class AnalyticExpr extends Expr { return fn.functionName().equalsIgnoreCase(MIN) || fn.functionName().equalsIgnoreCase(MAX); } - static private boolean isRankingFn(Function fn) { + private static boolean isRankingFn(Function fn) { if (!isAnalyticFn(fn)) { return false; } @@ -241,7 +243,7 @@ public class AnalyticExpr extends Expr { || fn.functionName().equalsIgnoreCase(NTILE); } - static private boolean isHllAggFn(Function fn) { + private static boolean isHllAggFn(Function fn) { if (!isAnalyticFn(fn)) { return false; } @@ -376,6 +378,7 @@ public class AnalyticExpr extends Expr { + orderByElements.get(0).getExpr().toSql()); } } + /** * check the value out of range in lag/lead() function */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java index 8b88f4c4fc..aa993e34bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java @@ -36,7 +36,7 @@ import java.util.List; * the corresponding analytic result tuple and its substitution map. */ public final class AnalyticInfo extends AggregateInfoBase { - private final static Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class); + private static final Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class); // All unique analytic exprs of a select block. Used to populate // super.aggregateExprs_ based on AnalyticExpr.getFnCall() for each analytic expr @@ -87,7 +87,7 @@ public final class AnalyticInfo extends AggregateInfoBase { * Creates complete AnalyticInfo for analyticExprs, including tuple descriptors and * smaps. */ - static public AnalyticInfo create(ArrayList analyticExprs, Analyzer analyzer) { + public static AnalyticInfo create(ArrayList analyticExprs, Analyzer analyzer) { Preconditions.checkState(analyticExprs != null && !analyticExprs.isEmpty()); Expr.removeDuplicates(analyticExprs); AnalyticInfo result = new AnalyticInfo(analyticExprs); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java index b6bc06d3bb..4266d02888 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java @@ -55,6 +55,7 @@ public class AnalyticWindow { public String toString() { return description; } + public TAnalyticWindowType toThrift() { return this == ROWS ? TAnalyticWindowType.ROWS : TAnalyticWindowType.RANGE; } @@ -77,6 +78,7 @@ public class AnalyticWindow { public String toString() { return description; } + public TAnalyticWindowBoundaryType toThrift() { Preconditions.checkState(!isAbsolutePos()); @@ -140,6 +142,7 @@ public class AnalyticWindow { public BoundaryType getType() { return type; } + public Expr getExpr() { return expr; } @@ -243,12 +246,15 @@ public class AnalyticWindow { public Type getType() { return type; } + public Boundary getLeftBoundary() { return leftBoundary; } + public Boundary getRightBoundary() { return rightBoundary; } + public Boundary setRightBoundary(Boundary b) { return rightBoundary = b; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java index 2a598b77c1..997891253f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java @@ -98,7 +98,7 @@ import java.util.stream.Collectors; * simple. */ public class Analyzer { - private final static Logger LOG = LogManager.getLogger(Analyzer.class); + private static final Logger LOG = LogManager.getLogger(Analyzer.class); // used for contains inlineview analytic function's tuple changed private ExprSubstitutionMap changeResSmap = new ExprSubstitutionMap(); @@ -969,7 +969,8 @@ public class Analyzer { * At this time, vectorization cannot support this situation, * so it is necessary to fall back to non-vectorization for processing. * For example: - * Query: select * from t1 left join (select k1, count(k2) as count_k2 from t2 group by k1) tmp on t1.k1=tmp.k1 + * Query: select * from t1 left join + * (select k1, count(k2) as count_k2 from t2 group by k1) tmp on t1.k1=tmp.k1 * Origin: tmp.k1 not null, tmp.count_k2 not null * Result: throw VecNotImplException */ @@ -1528,6 +1529,7 @@ public class Analyzer { public Set getGlobalInDeDuplication() { return Sets.newHashSet(globalState.globalInDeDuplication); } + /** * Makes the given semi-joined tuple visible such that its slots can be referenced. * If tid is null, makes the currently visible semi-joined tuple invisible again. @@ -2000,7 +2002,8 @@ public class Analyzer { if (globalState.context == null) { return false; } - return !globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() && !globalState.context.getSessionVariable().isDisableJoinReorder(); + return !globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() + && !globalState.context.getSessionVariable().isDisableJoinReorder(); } public boolean enableInferPredicate() { @@ -2028,7 +2031,8 @@ public class Analyzer { if (globalState.context == null) { return false; } - return globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() && !globalState.context.getSessionVariable().isDisableJoinReorder(); + return globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() + && !globalState.context.getSessionVariable().isDisableJoinReorder(); } public boolean safeIsEnableFoldConstantByBe() { @@ -2176,6 +2180,7 @@ public class Analyzer { public List getUnassignedConjuncts(PlanNode node) { return getUnassignedConjuncts(node.getTblRefIds()); } + /** * Returns true if e must be evaluated by a join node. Note that it may still be * safe to evaluate e elsewhere as well, but in any case the join must evaluate e. @@ -2196,6 +2201,7 @@ public class Analyzer { return false; } + /** * Mark all slots that are referenced in exprs as materialized. */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java index bd1ac2d0b7..8629d771ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java @@ -77,15 +77,19 @@ public class ArithmeticExpr extends Expr { public String toString() { return description; } + public String getName() { return name; } + public OperatorPosition getPos() { return pos; } + public TExprOpcode getOpcode() { return opcode; } + public boolean isUnary() { return pos == OperatorPosition.UNARY_PREFIX || pos == OperatorPosition.UNARY_POSTFIX; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java index 43dee100be..a1eef45ec2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java @@ -28,8 +28,8 @@ import com.google.common.collect.Maps; import java.util.Map; public class BackupStmt extends AbstractBackupStmt { - private final static String PROP_TYPE = "type"; - public final static String PROP_CONTENT = "content"; + private static final String PROP_TYPE = "type"; + public static final String PROP_CONTENT = "content"; public enum BackupType { INCREMENTAL, FULL diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java index 12d6aa7b24..7da7356b36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java @@ -49,7 +49,7 @@ import java.util.Objects; * Most predicates with two operands.. */ public class BinaryPredicate extends Predicate implements Writable { - private final static Logger LOG = LogManager.getLogger(BinaryPredicate.class); + private static final Logger LOG = LogManager.getLogger(BinaryPredicate.class); // true if this BinaryPredicate is inferred from slot equivalences, false otherwise. private boolean isInferred = false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java index eb79f65267..f1155086fe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java @@ -41,11 +41,11 @@ import java.util.Map; // "password" = "password0" // ) public class BrokerDesc extends StorageDesc implements Writable { - private final static Logger LOG = LogManager.getLogger(BrokerDesc.class); + private static final Logger LOG = LogManager.getLogger(BrokerDesc.class); // just for multi load - public final static String MULTI_LOAD_BROKER = "__DORIS_MULTI_LOAD_BROKER__"; - public final static String MULTI_LOAD_BROKER_BACKEND_KEY = "__DORIS_MULTI_LOAD_BROKER_BACKEND__"; + public static final String MULTI_LOAD_BROKER = "__DORIS_MULTI_LOAD_BROKER__"; + public static final String MULTI_LOAD_BROKER_BACKEND_KEY = "__DORIS_MULTI_LOAD_BROKER_BACKEND__"; // Only used for recovery private BrokerDesc() { @@ -108,6 +108,7 @@ public class BrokerDesc extends StorageDesc implements Writable { } return TFileType.FILE_BROKER; } + public StorageBackend.StorageType storageType() { return storageType; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java index 00b99e87de..d14f3400d0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java @@ -40,6 +40,7 @@ public class BuiltinAggregateFunction extends Function { public boolean isAnalyticFn() { return isAnalyticFn; } + // TODO: this is not used yet until the planner understand this. private org.apache.doris.catalog.Type intermediateType; private boolean reqIntermediateTuple = false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java index 787d658552..40f0016453 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java @@ -300,7 +300,8 @@ public class CaseExpr extends Expr { // this method just compare literal value and not completely consistent with be,for two cases // 1 not deal float - // 2 just compare literal value with same type. for a example sql 'select case when 123 then '1' else '2' end as col' + // 2 just compare literal value with same type. + // for a example sql 'select case when 123 then '1' else '2' end as col' // for be will return '1', because be only regard 0 as false // but for current LiteralExpr.compareLiteral, `123`' won't be regard as true // the case which two values has different type left to be @@ -349,7 +350,8 @@ public class CaseExpr extends Expr { // early return when the `when expr` can't be converted to constants Expr startExpr = expr.getChild(startIndex); if ((!startExpr.isLiteral() || startExpr instanceof DecimalLiteral || startExpr instanceof FloatLiteral) - || (!(startExpr instanceof NullLiteral) && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) { + || (!(startExpr instanceof NullLiteral) + && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) { return expr; } @@ -363,7 +365,9 @@ public class CaseExpr extends Expr { // 1 not literal // 2 float // 3 `case expr` and `when expr` don't have same type - if ((!currentWhenExpr.isLiteral() || currentWhenExpr instanceof DecimalLiteral || currentWhenExpr instanceof FloatLiteral) + if ((!currentWhenExpr.isLiteral() + || currentWhenExpr instanceof DecimalLiteral + || currentWhenExpr instanceof FloatLiteral) || !currentWhenExpr.getClass().toString().equals(caseExpr.getClass().toString())) { // remove the expr which has been evaluated List exprLeft = new ArrayList<>(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java index f5dd01c112..5b4ed33e1c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java @@ -160,7 +160,8 @@ public class CastExpr extends Expr { if (toType.isNull() || disableRegisterCastingFunction(fromType, toType)) { continue; } - String beClass = toType.isDecimalV2() || fromType.isDecimalV2() ? "DecimalV2Operators" : "CastFunctions"; + String beClass = toType.isDecimalV2() + || fromType.isDecimalV2() ? "DecimalV2Operators" : "CastFunctions"; if (fromType.isTime()) { beClass = "TimeOperators"; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java index 13ff1438e6..86f7482f02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java @@ -62,7 +62,8 @@ public class ChannelDescription implements Writable { @SerializedName(value = "channelId") private long channelId; - public ChannelDescription(String srcDatabase, String srcTableName, String targetTable, PartitionNames partitionNames, List colNames) { + public ChannelDescription(String srcDatabase, String srcTableName, String targetTable, + PartitionNames partitionNames, List colNames) { this.srcDatabase = srcDatabase; this.srcTableName = srcTableName; this.targetTable = targetTable; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java index cffa8df492..e98f4c2dfc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java @@ -50,8 +50,8 @@ public class ColumnDef { * k1 INT NULL DEFAULT NULL * * ColumnnDef will be transformed to Column in Analysis phase, and in Column, default value is a String. - * No matter does the user set the default value as NULL explicitly, or not set default value, - * the default value in Column will be "null", so that Doris can not distinguish between "not set" and "set as null". + * No matter does the user set the default value as NULL explicitly, or not set default value, the default value + * in Column will be "null", so that Doris can not distinguish between "not set" and "set as null". * * But this is OK because Column has another attribute "isAllowNull". * If the column is not allowed to be null, and user does not set the default value, @@ -113,6 +113,7 @@ public class ColumnDef { this.comment = ""; this.defaultValue = DefaultValue.NOT_SET; } + public ColumnDef(String name, TypeDef typeDef, boolean isKey, AggregateType aggregateType, boolean isAllowNull, DefaultValue defaultValue, String comment) { this(name, typeDef, isKey, aggregateType, isAllowNull, defaultValue, comment, true); @@ -146,7 +147,8 @@ public class ColumnDef { } public static ColumnDef newSequenceColumnDef(Type type, AggregateType aggregateType) { - return new ColumnDef(Column.SEQUENCE_COL, new TypeDef(type), false, aggregateType, true, DefaultValue.NULL_DEFAULT_VALUE, + return new ColumnDef(Column.SEQUENCE_COL, new TypeDef(type), false, + aggregateType, true, DefaultValue.NULL_DEFAULT_VALUE, "sequence column hidden column", false); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java index d2175117a0..2672d5564c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java @@ -40,7 +40,7 @@ import java.util.Objects; * &&, ||, ! predicates. */ public class CompoundPredicate extends Predicate { - private final static Logger LOG = LogManager.getLogger(CompoundPredicate.class); + private static final Logger LOG = LogManager.getLogger(CompoundPredicate.class); private final Operator op; public static void initBuiltins(FunctionSet functionSet) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java index cbf0bdbf13..67159630c3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java @@ -96,10 +96,12 @@ public class CreateDataSyncJobStmt extends DdlStmt { Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(dbName); OlapTable olapTable = db.getOlapTableOrAnalysisException(tableName); if (olapTable.getKeysType() != KeysType.UNIQUE_KEYS) { - throw new AnalysisException("Table: " + tableName + " is not a unique table, key type: " + olapTable.getKeysType()); + throw new AnalysisException("Table: " + tableName + + " is not a unique table, key type: " + olapTable.getKeysType()); } if (!olapTable.hasDeleteSign()) { - throw new AnalysisException("Table: " + tableName + " don't support batch delete. Please upgrade it to support, see `help alter table`."); + throw new AnalysisException("Table: " + tableName + + " don't support batch delete. Please upgrade it to support, see `help alter table`."); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java index 3ee94b70ae..90b4354844 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java @@ -65,7 +65,8 @@ public class CreateDbStmt extends DdlStmt { dbName = ClusterNamespace.getFullName(getClusterName(), dbName); if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.CREATE)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + ErrorReport.reportAnalysisException( + ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java index 5cbbc5102a..9115a25391 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java @@ -64,9 +64,9 @@ import java.net.URL; import java.net.URLClassLoader; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.HashMap; import java.time.LocalDate; import java.time.LocalDateTime; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -246,10 +246,12 @@ public class CreateFunctionStmt extends DdlStmt { } private void analyzeUda() throws AnalysisException { - AggregateFunction.AggregateFunctionBuilder builder = AggregateFunction.AggregateFunctionBuilder.createUdfBuilder(); + AggregateFunction.AggregateFunctionBuilder builder + = AggregateFunction.AggregateFunctionBuilder.createUdfBuilder(); - builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()). - hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).location(URI.create(userFile)); + builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()) + .hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()) + .location(URI.create(userFile)); String initFnSymbol = properties.get(INIT_KEY); if (initFnSymbol == null && !(binaryType == TFunctionBinaryType.JAVA_UDF)) { throw new AnalysisException("No 'init_fn' in properties"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java index b8125417ee..61e33892a4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java @@ -87,7 +87,8 @@ public class CreateMaterializedViewStmt extends DdlStmt { private String baseIndexName; private String dbName; private KeysType mvKeysType = KeysType.DUP_KEYS; - //if process is replaying log, isReplay is true, otherwise is false, avoid replay process error report, only in Rollup or MaterializedIndexMeta is true + //if process is replaying log, isReplay is true, otherwise is false, avoid replay process error report, + // only in Rollup or MaterializedIndexMeta is true private boolean isReplay = false; public CreateMaterializedViewStmt(String mvName, SelectStmt selectStmt, Map properties) { @@ -317,7 +318,8 @@ public class CreateMaterializedViewStmt extends DdlStmt { for (; theBeginIndexOfValue < mvColumnItemList.size(); theBeginIndexOfValue++) { MVColumnItem column = mvColumnItemList.get(theBeginIndexOfValue); keySizeByte += column.getType().getIndexSize(); - if (theBeginIndexOfValue + 1 > FeConstants.shortkey_max_column_count || keySizeByte > FeConstants.shortkey_maxsize_bytes) { + if (theBeginIndexOfValue + 1 > FeConstants.shortkey_max_column_count + || keySizeByte > FeConstants.shortkey_maxsize_bytes) { if (theBeginIndexOfValue == 0 && column.getType().getPrimitiveType().isCharFamily()) { column.setIsKey(true); theBeginIndexOfValue++; @@ -413,8 +415,7 @@ public class CreateMaterializedViewStmt extends DdlStmt { default: throw new AnalysisException("Unsupported function:" + functionName); } - MVColumnItem mvColumnItem = new MVColumnItem(mvColumnName, type, mvAggregateType, false, defineExpr, baseColumnName); - return mvColumnItem; + return new MVColumnItem(mvColumnName, type, mvAggregateType, false, defineExpr, baseColumnName); } public Map parseDefineExprWithoutAnalyze() throws AnalysisException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java index eff6a8bb7c..a55d61d945 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java @@ -417,7 +417,8 @@ public class CreateRoutineLoadStmt extends DdlStmt { throw new AnalysisException(optional.get() + " is invalid property"); } - desiredConcurrentNum = ((Long) Util.getLongPropertyOrDefault(jobProperties.get(DESIRED_CONCURRENT_NUMBER_PROPERTY), + desiredConcurrentNum = ((Long) Util.getLongPropertyOrDefault( + jobProperties.get(DESIRED_CONCURRENT_NUMBER_PROPERTY), Config.max_routine_load_task_concurrent_num, DESIRED_CONCURRENT_NUMBER_PRED, DESIRED_CONCURRENT_NUMBER_PROPERTY + " should > 0")).intValue(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java index a458a468ea..4d2018cf3d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java @@ -123,12 +123,17 @@ public class CreateSqlBlockRuleStmt extends DdlStmt { SqlBlockUtil.checkSqlAndSqlHashSetBoth(sql, sqlHash); SqlBlockUtil.checkPropertiesValidate(sql, sqlHash, partitionNumString, tabletNumString, cardinalityString); - this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, 0L, null, SCANNED_PARTITION_NUM + " should be a long"); - this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, 0L, null, SCANNED_TABLET_NUM + " should be a long"); - this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, 0L, null, SCANNED_CARDINALITY + " should be a long"); + this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, 0L, null, + SCANNED_PARTITION_NUM + " should be a long"); + this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, 0L, null, + SCANNED_TABLET_NUM + " should be a long"); + this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, 0L, null, + SCANNED_CARDINALITY + " should be a long"); - this.global = Util.getBooleanPropertyOrDefault(properties.get(GLOBAL_PROPERTY), false, GLOBAL_PROPERTY + " should be a boolean"); - this.enable = Util.getBooleanPropertyOrDefault(properties.get(ENABLE_PROPERTY), true, ENABLE_PROPERTY + " should be a boolean"); + this.global = Util.getBooleanPropertyOrDefault(properties.get(GLOBAL_PROPERTY), + false, GLOBAL_PROPERTY + " should be a boolean"); + this.enable = Util.getBooleanPropertyOrDefault(properties.get(ENABLE_PROPERTY), + true, ENABLE_PROPERTY + " should be a boolean"); } public static void checkCommonProperties(Map properties) throws UserException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java index b65aaa4d28..1b1bf6dac6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java @@ -47,7 +47,8 @@ public class CreateTableLikeStmt extends DdlStmt { private final ArrayList rollupNames; private final boolean withAllRollup; - public CreateTableLikeStmt(boolean ifNotExists, TableName tableName, TableName existedTableName, ArrayList rollupNames, boolean withAllRollup) throws DdlException { + public CreateTableLikeStmt(boolean ifNotExists, TableName tableName, TableName existedTableName, + ArrayList rollupNames, boolean withAllRollup) throws DdlException { this.ifNotExists = ifNotExists; this.tableName = tableName; this.existedTableName = existedTableName; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java index 44de0d27f9..56f20d2113 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java @@ -381,7 +381,8 @@ public class CreateTableStmt extends DdlStmt { if (columnDef.getType().isArrayType()) { if (columnDef.getAggregateType() != null && columnDef.getAggregateType() != AggregateType.NONE) { - throw new AnalysisException("Array column can't support aggregation " + columnDef.getAggregateType()); + throw new AnalysisException("Array column can't support aggregation " + + columnDef.getAggregateType()); } if (columnDef.isKey()) { throw new AnalysisException("Array can only be used in the non-key column of" @@ -409,7 +410,8 @@ public class CreateTableStmt extends DdlStmt { if (partitionDesc instanceof ListPartitionDesc || partitionDesc instanceof RangePartitionDesc) { partitionDesc.analyze(columnDefs, properties); } else { - throw new AnalysisException("Currently only support range and list partition with engine type olap"); + throw new AnalysisException("Currently only support range" + + " and list partition with engine type olap"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java index 25bdf87b93..180d3f532f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java @@ -134,7 +134,8 @@ public class CreateUserStmt extends DdlStmt { } // check if current user has GRANT priv on GLOBAL or DATABASE level. - if (!Catalog.getCurrentCatalog().getAuth().checkHasPriv(ConnectContext.get(), PrivPredicate.GRANT, PrivLevel.GLOBAL, PrivLevel.DATABASE)) { + if (!Catalog.getCurrentCatalog().getAuth().checkHasPriv(ConnectContext.get(), + PrivPredicate.GRANT, PrivLevel.GLOBAL, PrivLevel.DATABASE)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java index bda93a8651..86d7722305 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java @@ -130,13 +130,14 @@ public class DataDescription { * For hadoop load, this param is also used to persistence. * The function in this param is copied from 'parsedColumnExprList' */ - private final Map>> columnToHadoopFunction = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + private final Map>> columnToHadoopFunction + = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); private boolean isHadoopLoad = false; private LoadTask.MergeType mergeType = LoadTask.MergeType.APPEND; private final Expr deleteCondition; - private Map properties; + private final Map properties; public DataDescription(String tableName, PartitionNames partitionNames, @@ -572,7 +573,8 @@ public class DataDescription { * columnToHadoopFunction = {"col3": "strftime("%Y-%m-%d %H:%M:%S", tmp_col3)"} */ private void analyzeColumns() throws AnalysisException { - if ((fileFieldNames == null || fileFieldNames.isEmpty()) && (columnsFromPath != null && !columnsFromPath.isEmpty())) { + if ((fileFieldNames == null || fileFieldNames.isEmpty()) + && (columnsFromPath != null && !columnsFromPath.isEmpty())) { throw new AnalysisException("Can not specify columns_from_path without column_list"); } @@ -719,7 +721,8 @@ public class DataDescription { } // check olapTable schema and sequenceCol if (olapTable.hasSequenceCol() && !hasSequenceCol()) { - throw new AnalysisException("Table " + olapTable.getName() + " has sequence column, need to specify the sequence column"); + throw new AnalysisException("Table " + olapTable.getName() + + " has sequence column, need to specify the sequence column"); } if (hasSequenceCol() && !olapTable.hasSequenceCol()) { throw new AnalysisException("There is no sequence column in the table " + olapTable.getName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java index a59952f55e..24f552e100 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java @@ -84,10 +84,10 @@ public class DateLiteral extends LiteralExpr { private static Map MONTH_NAME_DICT = Maps.newHashMap(); private static Map MONTH_ABBR_NAME_DICT = Maps.newHashMap(); private static Map WEEK_DAY_NAME_DICT = Maps.newHashMap(); - private final static int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; - private final static int ALLOW_SPACE_MASK = 4 | 64; - private final static int MAX_DATE_PARTS = 8; - private final static int YY_PART_YEAR = 70; + private static final int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + private static final int ALLOW_SPACE_MASK = 4 | 64; + private static final int MAX_DATE_PARTS = 8; + private static final int YY_PART_YEAR = 70; static { try { @@ -148,6 +148,7 @@ public class DateLiteral extends LiteralExpr { //Regex used to determine if the TIME field exists int date_format private static final Pattern HAS_TIME_PART = Pattern.compile("^.*[HhIiklrSsTp]+.*$"); + //Date Literal persist type in meta private enum DateLiteralType { DATETIME(0), @@ -597,7 +598,9 @@ public class DateLiteral extends LiteralExpr { case 'v': // %v Week (01..53), where Monday is the first day of the week; used with %x builder.appendWeekOfWeekyear(2); break; - case 'x': // %x Year for the week, where Monday is the first day of the week, numeric, four digits; used with %v + case 'x': + // %x Year for the week, where Monday is the first day of the week, + // numeric, four digits; used with %v builder.appendWeekyear(4, 4); break; case 'W': // %W Weekday name (Sunday..Saturday) @@ -614,9 +617,12 @@ public class DateLiteral extends LiteralExpr { case 'U': // %U Week (00..53), where Sunday is the first day of the week case 'u': // %u Week (00..53), where Monday is the first day of the week case 'V': // %V Week (01..53), where Sunday is the first day of the week; used with %X - case 'X': // %X Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V + case 'X': + // %X Year for the week where Sunday is the first day of the week, + // numeric, four digits; used with %V case 'D': // %D Day of the month with English suffix (0th, 1st, 2nd, 3rd, …) - throw new AnalysisException(String.format("%%%s not supported in date format string", character)); + throw new AnalysisException( + String.format("%%%s not supported in date format string", character)); case '%': // %% A literal "%" character builder.appendLiteral('%'); break; @@ -957,7 +963,8 @@ public class DateLiteral extends LiteralExpr { } } else if (format.charAt(fp) != ' ') { if (format.charAt(fp) != value.charAt(vp)) { - throw new InvalidFormatException("Invalid char: " + value.charAt(vp) + ", expected: " + format.charAt(fp)); + throw new InvalidFormatException("Invalid char: " + value.charAt(vp) + + ", expected: " + format.charAt(fp)); } fp++; vp++; @@ -1053,6 +1060,7 @@ public class DateLiteral extends LiteralExpr { || hour > MAX_DATETIME.hour || minute > MAX_DATETIME.minute || second > MAX_DATETIME.second || microsecond > MAX_MICROSECOND; } + private boolean checkDate() { if (month != 0 && day > DAYS_IN_MONTH[((int) month)]) { if (month == 2 && day == 29 && Year.isLeap(year)) { @@ -1195,7 +1203,8 @@ public class DateLiteral extends LiteralExpr { int start = pre; int tempVal = 0; boolean scanToDelim = (!isIntervalFormat) && (fieldIdx != 6); - while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre)) && (scanToDelim || fieldLen-- != 0)) { + while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre)) + && (scanToDelim || fieldLen-- != 0)) { tempVal = tempVal * 10 + (dateStr.charAt(pre++) - '0'); } dateVal[fieldIdx] = tempVal; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java index 87c22b5242..e938a46361 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java @@ -107,7 +107,8 @@ public class DecimalLiteral extends LiteralExpr { @Override public ByteBuffer getHashValue(PrimitiveType type) { ByteBuffer buffer; - // no need to consider the overflow when cast decimal to other type, because this func only be used when querying, not storing. + // no need to consider the overflow when cast decimal to other type, + // because this func only be used when querying, not storing. // e.g. For column A with type INT, the data stored certainly no overflow. switch (type) { case TINYINT: diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java index 9cfd26eece..465813177b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java @@ -132,7 +132,8 @@ public class DeleteStmt extends DdlStmt { int inElementNum = inPredicate.getInElementNum(); int maxAllowedInElementNumOfDelete = Config.max_allowed_in_element_num_of_delete; if (inElementNum > maxAllowedInElementNumOfDelete) { - throw new AnalysisException("Element num of in predicate should not be more than " + maxAllowedInElementNumOfDelete); + throw new AnalysisException("Element num of in predicate should not be more than " + + maxAllowedInElementNumOfDelete); } for (int i = 1; i <= inPredicate.getInElementNum(); i++) { Expr expr = inPredicate.getChild(i); @@ -142,7 +143,8 @@ public class DeleteStmt extends DdlStmt { } deleteConditions.add(inPredicate); } else { - throw new AnalysisException("Where clause only supports compound predicate, binary predicate, is_null predicate or in predicate"); + throw new AnalysisException("Where clause only supports compound predicate," + + " binary predicate, is_null predicate or in predicate"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java index a2c5139adb..257ea98c83 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java @@ -169,7 +169,8 @@ public class DescribeStmt extends ShowStmt { column.getOriginType().toString(), column.isAllowNull() ? "Yes" : "No", ((Boolean) column.isKey()).toString(), - column.getDefaultValue() == null ? FeConstants.null_string : column.getDefaultValue(), + column.getDefaultValue() == null + ? FeConstants.null_string : column.getDefaultValue(), extraStr, ((Boolean) column.isVisible()).toString() ); @@ -221,6 +222,7 @@ public class DescribeStmt extends ShowStmt { public String getTableName() { return dbTableName.getTbl(); } + public String getDb() { return dbTableName.getDb(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java index f733e3a9a6..00139913d9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java @@ -41,7 +41,7 @@ import java.util.List; * them unique ids.. */ public class DescriptorTable { - private final static Logger LOG = LogManager.getLogger(DescriptorTable.class); + private static final Logger LOG = LogManager.getLogger(DescriptorTable.class); private final HashMap tupleDescs = new HashMap(); // List of referenced tables with no associated TupleDescriptor to ship to the BE. diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java index 2c01a967c4..43cc9f5b47 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java @@ -61,7 +61,8 @@ public class DropDbStmt extends DdlStmt { dbName = ClusterNamespace.getFullName(getClusterName(), dbName); // Don't allowed to drop 'information_schema' if (dbName.equalsIgnoreCase(ClusterNamespace.getFullName(getClusterName(), InfoSchemaDb.DATABASE_NAME))) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, + analyzer.getQualifiedUser(), dbName); } if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.DROP)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java index 6df62c42f4..b8f2c2c9be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java @@ -54,7 +54,7 @@ import java.util.UUID; // [PROPERTIES("key"="value")] // BY BROKER 'broker_name' [( $broker_attrs)] public class ExportStmt extends StatementBase { - private final static Logger LOG = LogManager.getLogger(ExportStmt.class); + private static final Logger LOG = LogManager.getLogger(ExportStmt.class); public static final String TABLET_NUMBER_PER_TASK_PROP = "tablet_num_per_task"; public static final String LABEL = "label"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java index 5387539254..5c5a6d8c55 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java @@ -60,7 +60,7 @@ import java.util.Set; /** * Root of the expr node hierarchy. */ -abstract public class Expr extends TreeNode implements ParseNode, Cloneable, Writable { +public abstract class Expr extends TreeNode implements ParseNode, Cloneable, Writable { private static final Logger LOG = LogManager.getLogger(Expr.class); // Name of the function that needs to be implemented by every Expr that @@ -70,10 +70,10 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl // to be used where we can't come up with a better estimate public static final double DEFAULT_SELECTIVITY = 0.1; - public final static float FUNCTION_CALL_COST = 10; + public static final float FUNCTION_CALL_COST = 10; // returns true if an Expr is a non-analytic aggregate. - private final static com.google.common.base.Predicate IS_AGGREGATE_PREDICATE = + private static final com.google.common.base.Predicate IS_AGGREGATE_PREDICATE = new com.google.common.base.Predicate() { public boolean apply(Expr arg) { return arg instanceof FunctionCallExpr @@ -82,7 +82,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl }; // Returns true if an Expr is a NOT CompoundPredicate. - public final static com.google.common.base.Predicate IS_NOT_PREDICATE = + public static final com.google.common.base.Predicate IS_NOT_PREDICATE = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -92,7 +92,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl }; // Returns true if an Expr is an OR CompoundPredicate. - public final static com.google.common.base.Predicate IS_OR_PREDICATE = + public static final com.google.common.base.Predicate IS_OR_PREDICATE = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -102,7 +102,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl }; // Returns true if an Expr is a scalar subquery - public final static com.google.common.base.Predicate IS_SCALAR_SUBQUERY = + public static final com.google.common.base.Predicate IS_SCALAR_SUBQUERY = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -112,7 +112,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl // Returns true if an Expr is an aggregate function that returns non-null on // an empty set (e.g. count). - public final static com.google.common.base.Predicate NON_NULL_EMPTY_AGG = + public static final com.google.common.base.Predicate NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -121,7 +121,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl }; // Returns true if an Expr is a builtin aggregate function. - public final static com.google.common.base.Predicate CORRELATED_SUBQUERY_SUPPORT_AGG_FN = + public static final com.google.common.base.Predicate CORRELATED_SUBQUERY_SUPPORT_AGG_FN = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -139,7 +139,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl }; - public final static com.google.common.base.Predicate IS_TRUE_LITERAL = + public static final com.google.common.base.Predicate IS_TRUE_LITERAL = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -147,7 +147,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl } }; - public final static com.google.common.base.Predicate IS_FALSE_LITERAL = + public static final com.google.common.base.Predicate IS_FALSE_LITERAL = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -155,7 +155,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl } }; - public final static com.google.common.base.Predicate IS_EQ_BINARY_PREDICATE = + public static final com.google.common.base.Predicate IS_EQ_BINARY_PREDICATE = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -163,7 +163,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl } }; - public final static com.google.common.base.Predicate IS_BINARY_PREDICATE = + public static final com.google.common.base.Predicate IS_BINARY_PREDICATE = new com.google.common.base.Predicate() { @Override public boolean apply(Expr arg) { @@ -423,7 +423,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl /** * Does subclass-specific analysis. Subclasses should override analyzeImpl(). */ - abstract protected void analyzeImpl(Analyzer analyzer) throws AnalysisException; + protected abstract void analyzeImpl(Analyzer analyzer) throws AnalysisException; /** * Set the expr to be analyzed and computes isConstant_. @@ -665,6 +665,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl } } } + /** * Returns true if the list contains an aggregate expr. */ @@ -828,6 +829,7 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl child.markAgg(); } } + /** * Returns the product of the given exprs' number of distinct values or -1 if any of * the exprs have an invalid number of distinct values. @@ -1354,7 +1356,8 @@ abstract public class Expr extends TreeNode implements ParseNode, Cloneabl && (this.type.isStringType() || this.type.isHllType())) { return this; } - // Preconditions.checkState(PrimitiveType.isImplicitCast(type, targetType), "cast %s to %s", this.type, targetType); + // Preconditions.checkState(PrimitiveType.isImplicitCast(type, targetType), + // "cast %s to %s", this.type, targetType); // TODO(zc): use implicit cast if (!Type.canCastTo(this.type, targetType)) { throw new AnalysisException("type not match, originType=" + this.type diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java index 79b2fc721e..2045b71367 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java @@ -27,7 +27,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; public class ExprId extends Id { - private final static Logger LOG = LogManager.getLogger(ExprId.class); + private static final Logger LOG = LogManager.getLogger(ExprId.class); // Construction only allowed via an IdGenerator. public ExprId(int id) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java index 46b9caa0fa..966cfa7e0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java @@ -37,7 +37,7 @@ import java.util.List; * See Expr.substitute() and related functions for details on the actual substitution. */ public final class ExprSubstitutionMap { - private final static Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class); + private static final Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class); private boolean checkAnalyzed = true; private List lhs; // left-hand side diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java index 220fe55299..035cd8a67b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java @@ -75,7 +75,8 @@ public enum ExpressionFunctions { // 2. Not in NonNullResultWithNullParamFunctions // 3. Has null parameter if ((fn.getNullableMode() == Function.NullableMode.DEPEND_ON_ARGUMENT - || Catalog.getCurrentCatalog().isNullResultWithOneNullParamFunction(fn.getFunctionName().getFunction())) + || Catalog.getCurrentCatalog().isNullResultWithOneNullParamFunction( + fn.getFunctionName().getFunction())) && !fn.isUdf()) { for (Expr e : constExpr.getChildren()) { if (e instanceof NullLiteral) { @@ -205,7 +206,8 @@ public enum ExpressionFunctions { if (argType.isArray()) { Preconditions.checkArgument(method.getParameterTypes().length == typeIndex + 1); final List variableLengthExprs = Lists.newArrayList(); - for (int variableLengthArgIndex = typeIndex; variableLengthArgIndex < args.size(); variableLengthArgIndex++) { + for (int variableLengthArgIndex = typeIndex; + variableLengthArgIndex < args.size(); variableLengthArgIndex++) { variableLengthExprs.add(args.get(variableLengthArgIndex)); } LiteralExpr[] variableLengthArgs = createVariableLengthArgs(variableLengthExprs, typeIndex); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java index a9ca2504a1..26f55a321f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java @@ -466,7 +466,8 @@ public class FunctionCallExpr extends Expr { if (fnName.getFunction().equalsIgnoreCase("json_object")) { if ((children.size() & 1) == 1 && (originChildSize == children.size())) { - throw new AnalysisException("json_object can't be odd parameters, need even parameters: " + this.toSql()); + throw new AnalysisException("json_object can't be odd parameters, need even parameters: " + + this.toSql()); } String res = parseJsonDataType(true); if (children.size() == originChildSize) { @@ -505,7 +506,8 @@ public class FunctionCallExpr extends Expr { if (children.size() > 2) { if (!getChild(1).isConstant() || !getChild(2).isConstant()) { throw new AnalysisException( - "The default parameter (parameter 2 or parameter 3) of LEAD/LAG must be a constant: " + this.toSql()); + "The default parameter (parameter 2 or parameter 3) of LEAD/LAG must be a constant: " + + this.toSql()); } uncheckedCastChild(Type.BIGINT, 1); if (!getChild(2).type.matchesType(getChild(0).type) && !getChild(2).type.matchesType(Type.NULL)) { @@ -590,7 +592,8 @@ public class FunctionCallExpr extends Expr { } Type inputType = getChild(0).getType(); if (!inputType.isBitmapType()) { - throw new AnalysisException(fnName + " function's argument should be of BITMAP type, but was " + inputType); + throw new AnalysisException(fnName + + " function's argument should be of BITMAP type, but was " + inputType); } return; } @@ -601,7 +604,8 @@ public class FunctionCallExpr extends Expr { } Type inputType = getChild(0).getType(); if (!inputType.isQuantileStateType()) { - throw new AnalysisException(fnName + " function's argument should be of QUANTILE_STATE type, but was" + inputType); + throw new AnalysisException(fnName + + " function's argument should be of QUANTILE_STATE type, but was" + inputType); } } @@ -885,7 +889,8 @@ public class FunctionCallExpr extends Expr { } for (int i = 3; i < children.size(); i++) { if (children.get(i).type != Type.BOOLEAN) { - throw new AnalysisException("The 4th and subsequent params of " + fnName + " function must be boolean"); + throw new AnalysisException("The 4th and subsequent params of " + + fnName + " function must be boolean"); } childTypes[i] = children.get(i).type; } @@ -919,7 +924,8 @@ public class FunctionCallExpr extends Expr { if (fn == null) { if (!analyzer.isUDFAllowed()) { throw new AnalysisException( - "Does not support non-builtin functions, or function does not exist: " + this.toSqlImpl()); + "Does not support non-builtin functions, or function does not exist: " + + this.toSqlImpl()); } String dbName = fnName.analyzeDb(analyzer); @@ -1081,7 +1087,8 @@ public class FunctionCallExpr extends Expr { List inputParamsExprs = retExpr.fnParams.exprs(); List parameters = ((AliasFunction) retExpr.fn).getParameters(); Preconditions.checkArgument(inputParamsExprs.size() == parameters.size(), - "Alias function [" + retExpr.fn.getFunctionName().getFunction() + "] args number is not equal to it's definition"); + "Alias function [" + retExpr.fn.getFunctionName().getFunction() + + "] args number is not equal to it's definition"); List oriParamsExprs = oriExpr.fnParams.exprs(); // replace origin function params exprs' with input params expr depending on parameter name @@ -1108,7 +1115,8 @@ public class FunctionCallExpr extends Expr { * @return * @throws AnalysisException */ - private Expr replaceParams(List parameters, List inputParamsExprs, Expr oriExpr) throws AnalysisException { + private Expr replaceParams(List parameters, List inputParamsExprs, Expr oriExpr) + throws AnalysisException { for (int i = 0; i < oriExpr.getChildren().size(); i++) { Expr retExpr = replaceParams(parameters, inputParamsExprs, oriExpr.getChild(i)); oriExpr.setChild(i, retExpr); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java index 8234e25201..32cfba0351 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java @@ -58,7 +58,7 @@ public class FunctionParams implements Writable { isDistinct = false; } - static public FunctionParams createStarParam() { + public static FunctionParams createStarParam() { return new FunctionParams(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java index 18621f19b2..4e849e7c61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java @@ -59,7 +59,8 @@ public class GrantStmt extends DdlStmt { this.privileges = privs.toPrivilegeList(); } - public GrantStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List privileges) { + public GrantStmt(UserIdentity userIdent, String role, + ResourcePattern resourcePattern, List privileges) { this.userIdent = userIdent; this.role = role; this.tblPattern = null; @@ -168,12 +169,14 @@ public class GrantStmt extends DdlStmt { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } else if (tblPattern.getPrivLevel() == PrivLevel.DATABASE) { - if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), tblPattern.getQualifiedDb(), PrivPredicate.GRANT)) { + if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), + tblPattern.getQualifiedDb(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } else { // table level - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tblPattern.getQualifiedDb(), tblPattern.getTbl(), PrivPredicate.GRANT)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), + tblPattern.getQualifiedDb(), tblPattern.getTbl(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } @@ -204,7 +207,8 @@ public class GrantStmt extends DdlStmt { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } else { - if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(), resourcePattern.getResourceName(), PrivPredicate.GRANT)) { + if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(), + resourcePattern.getResourceName(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java index d74218c69e..f8d490977e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java @@ -45,10 +45,10 @@ import java.util.stream.Collectors; * In this class we produce the rule of generating rows base on the group by clause. */ public class GroupByClause implements ParseNode { - private final static Logger LOG = LogManager.getLogger(GroupByClause.class); + private static final Logger LOG = LogManager.getLogger(GroupByClause.class); // max num of distinct sets in grouping sets clause - private final static int MAX_GROUPING_SETS_NUM = 64; + private static final int MAX_GROUPING_SETS_NUM = 64; // max num of distinct expressions private boolean analyzed = false; private boolean exprGenerated = false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java index 666b143305..f0f9c421b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java @@ -288,7 +288,8 @@ public class InlineViewRef extends TableRef { false, null, selectItemExpr.isNullable(), null, "")); } - InlineView inlineView = (view != null) ? new InlineView(view, columnList) : new InlineView(getExplicitAlias(), columnList); + InlineView inlineView = (view != null) ? new InlineView(view, columnList) + : new InlineView(getExplicitAlias(), columnList); // Create the non-materialized tuple and set the fake table in it. TupleDescriptor result = analyzer.getDescTbl().createTupleDescriptor(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java index 579d9d1512..62a8a9887e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java @@ -143,7 +143,8 @@ public class InsertStmt extends DdlStmt { isUserSpecifiedLabel = true; } - this.isValuesOrConstantSelect = (queryStmt instanceof SelectStmt && ((SelectStmt) queryStmt).getTableRefs().isEmpty()); + this.isValuesOrConstantSelect = (queryStmt instanceof SelectStmt + && ((SelectStmt) queryStmt).getTableRefs().isEmpty()); } // Ctor for CreateTableAsSelectStmt @@ -187,7 +188,8 @@ public class InsertStmt extends DdlStmt { return tblName.getTbl(); } - public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { + public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) + throws AnalysisException { // get dbs of statement queryStmt.getTables(analyzer, tableMap, parentViewNameSet); tblName.analyze(analyzer); @@ -269,8 +271,8 @@ public class InsertStmt extends DdlStmt { if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tblName.getDb(), tblName.getTbl(), PrivPredicate.LOAD)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "LOAD", - ConnectContext.get().getQualifiedUser(), - ConnectContext.get().getRemoteIP(), tblName.getDb() + ": " + tblName.getTbl()); + ConnectContext.get().getQualifiedUser(), + ConnectContext.get().getRemoteIP(), tblName.getDb() + ": " + tblName.getTbl()); } // check partition @@ -457,7 +459,8 @@ public class InsertStmt extends DdlStmt { if (column.isNameWithPrefix(CreateMaterializedViewStmt.MATERIALIZED_VIEW_NAME_PREFIX)) { SlotRef refColumn = column.getRefColumn(); if (refColumn == null) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_FIELD_ERROR, column.getName(), targetTable.getName()); + ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_FIELD_ERROR, + column.getName(), targetTable.getName()); } String origName = refColumn.getColumnName(); for (int originColumnIdx = 0; originColumnIdx < targetColumns.size(); originColumnIdx++) { @@ -526,7 +529,8 @@ public class InsertStmt extends DdlStmt { ExprSubstitutionMap smap = new ExprSubstitutionMap(); smap.getLhs().add(entry.second.getRefColumn()); smap.getRhs().add(queryStmt.getResultExprs().get(entry.first)); - Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0); + Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), + smap, analyzer, false).get(0); queryStmt.getResultExprs().add(e); } } @@ -551,7 +555,8 @@ public class InsertStmt extends DdlStmt { ExprSubstitutionMap smap = new ExprSubstitutionMap(); smap.getLhs().add(entry.second.getRefColumn()); smap.getRhs().add(queryStmt.getResultExprs().get(entry.first)); - Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0); + Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), + smap, analyzer, false).get(0); queryStmt.getBaseTblResultExprs().add(e); } } @@ -605,7 +610,8 @@ public class InsertStmt extends DdlStmt { ExprSubstitutionMap smap = new ExprSubstitutionMap(); smap.getLhs().add(entry.second.getRefColumn()); smap.getRhs().add(extentedRow.get(entry.first)); - extentedRow.add(Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0)); + extentedRow.add(Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), + smap, analyzer, false).get(0)); } } } @@ -620,7 +626,8 @@ public class InsertStmt extends DdlStmt { if (expr instanceof DefaultValueExpr) { if (targetColumns.get(i).getDefaultValue() == null) { - throw new AnalysisException("Column has no default value, column=" + targetColumns.get(i).getName()); + throw new AnalysisException("Column has no default value, column=" + + targetColumns.get(i).getName()); } expr = new StringLiteral(targetColumns.get(i).getDefaultValue()); } @@ -727,7 +734,8 @@ public class InsertStmt extends DdlStmt { if (!isExplain() && targetTable instanceof OlapTable) { ((OlapTableSink) dataSink).complete(); // add table indexes to transaction state - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), transactionId); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(db.getId(), transactionId); if (txnState == null) { throw new DdlException("txn does not exist: " + transactionId); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java index a196c89f55..8c17ff9af7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java @@ -143,6 +143,7 @@ public class IsNullPredicate extends Predicate { public boolean isNullable() { return false; } + /** * fix issue 6390 */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java index 0e3fd0c7a8..33f13c74e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java @@ -39,7 +39,7 @@ import java.util.Objects; // large int for the num that native types can not public class LargeIntLiteral extends LiteralExpr { - private final static Logger LOG = LogManager.getLogger(LargeIntLiteral.class); + private static final Logger LOG = LogManager.getLogger(LargeIntLiteral.class); // -2^127 public static final BigInteger LARGE_INT_MIN = new BigInteger("-170141183460469231731687303715884105728"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java index 1c12ad69e5..469f4d4523 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java @@ -124,7 +124,7 @@ public class LoadStmt extends DdlStmt { private EtlJobType etlJobType = EtlJobType.UNKNOWN; - public final static ImmutableMap PROPERTIES_MAP = new ImmutableMap.Builder() + public static final ImmutableMap PROPERTIES_MAP = new ImmutableMap.Builder() .put(TIMEOUT_PROPERTY, new Function() { @Override public @Nullable Long apply(@Nullable String s) { @@ -337,7 +337,8 @@ public class LoadStmt extends DdlStmt { } Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(label.getDbName()); OlapTable table = db.getOlapTableOrAnalysisException(dataDescription.getTableName()); - if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND && table.getKeysType() != KeysType.UNIQUE_KEYS) { + if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND + && table.getKeysType() != KeysType.UNIQUE_KEYS) { throw new AnalysisException("load by MERGE or DELETE is only supported in unique tables."); } if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND && !table.hasDeleteSign()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java index 07b51caf1b..4fbaa33651 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java @@ -34,6 +34,7 @@ public class LockTable { return desc; } } + private TableName tableName; private String alias; private LockType lockType; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java index 45bf9948f8..8dabace3a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java @@ -61,7 +61,8 @@ public class MVColumnBitmapUnionPattern implements MVColumnPattern { @Override public String toString() { - return FunctionSet.BITMAP_UNION + "(" + FunctionSet.TO_BITMAP + "(column)), type of column could not be integer. " + return FunctionSet.BITMAP_UNION + "(" + FunctionSet.TO_BITMAP + + "(column)), type of column could not be integer. " + "Or " + FunctionSet.BITMAP_UNION + "(bitmap_column) in agg table"; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java index 37b50fa73e..d91b770ec9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java @@ -82,7 +82,8 @@ public class ModifyTablePropertiesClause extends AlterTableClause { } else if (properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM) || properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) { ReplicaAllocation replicaAlloc = PropertyAnalyzer.analyzeReplicaAllocation(properties, "default"); - properties.put("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, replicaAlloc.toCreateStmt()); + properties.put("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, + replicaAlloc.toCreateStmt()); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) { this.needTableStable = false; this.opType = AlterOpType.MODIFY_TABLE_PROPERTY_SYNC; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java deleted file mode 100644 index 8b3f9f8460..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java +++ /dev/null @@ -1,314 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -/** - * The OpcodeRegistry provides a mapping between function signatures and opcodes. The - * supported functions are code-gen'ed and added to the registry with an assigned opcode. - * The opcode is shared with the backend. The frontend can use the registry to look up - * a function's opcode. - *

- * The OpcodeRegistry also contains a mapping between function names (as strings) to - * operators. - *

- * The OpcodeRegistry is a singleton. - *

- * TODO: The opcode registry should be versioned in the FE/BE. - */ -public class OpcodeRegistry { -// -// private final static Logger LOG = LogManager.getLogger(OpcodeRegistry.class); -// private static OpcodeRegistry instance = new OpcodeRegistry(); -// /** -// * This is a mapping of Operator,#args to signatures with a fixed number of arguments. -// * The signature is defined by the operator enum and the arguments -// * and is a one to one mapping to opcodes. -// * The map is structured this way to more efficiently look for signature matches. -// * Signatures that have the same number of arguments have a potential to be matches -// * by allowing types to be implicitly cast. -// * Functions with a variable number of arguments are put into the varArgOperations map. -// */ -// private final Map, List> operations; -// /** -// * This is a mapping of Operator,varArgType to signatures of vararg functions only. -// * varArgType must be a maximum-resolution type. -// * We use a separate map to be able to support multiple vararg signatures for the same -// * FunctionOperator. -// * Contains a special entry mapping from Operator,NULL_TYPE to signatures for each -// * Operator to correctly match varag functions when all args are NULL. -// * Limitations: Since we do not consider the number of arguments, each FunctionOperator -// * is limited to having one vararg signature per maximum-resolution PrimitiveType. -// * For example, one can have two signatures func(float, int ...) and func(string ...), -// * but not func(float, int ...) and func (int ...). -// */ -// private final Map, List> -// varArgOperations; -// /** -// * This contains a mapping of function names to a FunctionOperator enum. This is used -// * by FunctionCallExpr to go from the parser input to function opcodes. -// * This is a many to one mapping (i.e. substr and substring both map to the same -// * operation). -// * The mappings are filled in in FunctionRegistry.java which is auto-generated. -// */ -// private final HashMap functionNameMap; -// -// private final HashMap> funcByOp; -// -// // Singleton interface, don't call the constructor -// private OpcodeRegistry() { -// operations = Maps.newHashMap(); -// varArgOperations = Maps.newHashMap(); -// functionNameMap = Maps.newHashMap(); -// funcByOp = Maps.newHashMap(); -// -// // Add all the function signatures to the registry and the function name(string) -// // to FunctionOperator mapping -// FunctionRegistry.InitFunctions(this); -// } -// -// // Singleton interface -// public static OpcodeRegistry instance() { -// return instance; -// } -// -// /** -// * Static utility functions -// */ -// public static boolean isBitwiseOperation(FunctionOperator operator) { -// return operator == FunctionOperator.BITAND || operator == FunctionOperator.BITNOT || -// operator == FunctionOperator.BITOR || operator == FunctionOperator.BITXOR; -// } -// -// /** -// * Returns the set of function names. -// * -// * @return -// */ -// public Set getFunctionNames() { -// return functionNameMap.keySet(); -// } -// -// /** -// * Returns the function operator enum. The lookup is case insensitive. -// * (i.e. "Substring" --> TExprOperator.STRING_SUBSTR). -// * Returns INVALID_OP is that function name is unknown. -// */ -// public FunctionOperator getFunctionOperator(String fnName) { -// String lookup = fnName.toLowerCase(); -// if (functionNameMap.containsKey(lookup)) { -// return functionNameMap.get(lookup); -// } -// return FunctionOperator.INVALID_OPERATOR; -// } -// -// /** -// * Query for a function in the registry, specifying the operation, 'op', the arguments. -// * If there is no matching signature, null will be returned. -// * If there is a match, the matching signature will be returned. -// * If 'allowImplicitCasts' is true the matching signature does not have to match the -// * input identically, implicit type promotion is allowed. -// */ -// public BuiltinFunction getFunctionInfo( -// FunctionOperator op, boolean allowImplicitCasts, -// boolean vectorFunction, PrimitiveType... argTypes) { -// Pair lookup = Pair.create(op, argTypes.length); -// List> varArgMatchTypes = null; -// if (argTypes.length > 0) { -// Set maxResolutionTypes = getMaxResolutionTypes(argTypes); -// Preconditions.checkNotNull(maxResolutionTypes); -// varArgMatchTypes = Lists.newArrayList(); -// for (PrimitiveType maxResolutionType : maxResolutionTypes) { -// varArgMatchTypes.add(Pair.create(op, maxResolutionType)); -// } -// } -// List functions = null; -// if (operations.containsKey(lookup)) { -// functions = operations.get(lookup); -// } else if (!varArgMatchTypes.isEmpty()) { -// functions = Lists.newArrayList(); -// List matchedFunctions = null; -// for (Pair varArgsMatchType : varArgMatchTypes) { -// matchedFunctions = varArgOperations.get(varArgsMatchType); -// if (matchedFunctions != null) { -// functions.addAll(matchedFunctions); -// } -// } -// } -// -// if (functions == null || functions.isEmpty()) { -// // may be we can find from funByOp -// if (funcByOp.containsKey(op)) { -// functions = funcByOp.get(op); -// } else { -// return null; -// } -// } -// Type[] args = new Type[argTypes.length]; -// int i = 0; -// for (PrimitiveType type : argTypes) { -// args[i] = Type.fromPrimitiveType(type); -// i ++; -// } -// BuiltinFunction search = new BuiltinFunction(op, args); -// -// BuiltinFunction compatibleMatch = null; -// List compatibleMatchFunctions = Lists.newArrayList(); -// // We firstly choose functions using IS_SUBTYPE(only check cast-method is implemented), -// // if more than one functions are found, give priority to the assign-copatible one. -// for (BuiltinFunction function : functions) { -// if (function.compare(search, Function.CompareMode.IS_INDISTINGUISHABLE)) { -// if (vectorFunction == function.vectorFunction) { -// return function; -// } -// } else if (allowImplicitCasts -// && function.compare(search, Function.CompareMode.IS_SUPERTYPE_OF)) { -// if (vectorFunction == function.vectorFunction) { -// compatibleMatchFunctions.add(function); -// } -// } -// } -// -// // If there are many compatible functions, we priority to choose the non-loss-precision one. -// for (BuiltinFunction function : compatibleMatchFunctions) { -// if (function.compare(search, Function.CompareMode.IS_SUPERTYPE_OF)) { -// compatibleMatch = function; -// } else { -// LOG.info(" false {} {}", function.getReturnType(), function.getArgs()); -// } -// } -// if (compatibleMatch == null && compatibleMatchFunctions.size() > 0) { -// compatibleMatch = compatibleMatchFunctions.get(0); -// } -// -// return compatibleMatch; -// } -// -// /** -// * Returns the max resolution type for each argType that is not a NULL_TYPE. If all -// * argument types are NULL_TYPE then a set will be returned containing NULL_TYPE. -// */ -// private Set getMaxResolutionTypes(PrimitiveType[] argTypes) { -// Set maxResolutionTypes = Sets.newHashSet(); -// for (int i = 0; i < argTypes.length; ++i) { -// if (!argTypes[i].isNull()) { -// maxResolutionTypes.add(argTypes[i].getMaxResolutionType()); -// } -// } -// if (maxResolutionTypes.isEmpty()) { -// maxResolutionTypes.add(PrimitiveType.NULL_TYPE); -// } -// return maxResolutionTypes; -// } -// -// /** -// * Add a function with the specified opcode/signature to the registry. -// */ -// -// public boolean add(boolean udfInterface, boolean vectorFunction, FunctionOperator op, -// TExprOpcode opcode, boolean varArgs, PrimitiveType retType, PrimitiveType... args) { -// List functions; -// Pair lookup = Pair.create(op, args.length); -// // Take the last argument's type as the vararg type. -// Pair varArgsLookup = null; -// // Special signature for vararg functions to handle matching when all args are NULL. -// Pair varArgsNullLookup = null; -// Preconditions.checkArgument((varArgs) ? args.length > 0 : true); -// if (varArgs && args.length > 0) { -// varArgsLookup = Pair.create(op, args[args.length - 1].getMaxResolutionType()); -// varArgsNullLookup = Pair.create(op, PrimitiveType.NULL_TYPE); -// } -// if (operations.containsKey(lookup)) { -// functions = operations.get(lookup); -// } else if (varArgsLookup != null && varArgOperations.containsKey(varArgsLookup)) { -// functions = varArgOperations.get(varArgsLookup); -// } else { -// functions = new ArrayList(); -// if (varArgs) { -// varArgOperations.put(varArgsLookup, functions); -// varArgOperations.put(varArgsNullLookup, functions); -// } else { -// operations.put(lookup, functions); -// } -// } -// -// Type[] argsType = new Type[args.length]; -// int i = 0; -// for (PrimitiveType type : args) { -// argsType[i] = Type.fromPrimitiveType(type); -// i ++; -// } -// -// BuiltinFunction function = -// new BuiltinFunction(udfInterface, vectorFunction, opcode, op, varArgs, Type.fromPrimitiveType(retType), argsType); -// if (functions.contains(function)) { -// LOG.error("OpcodeRegistry: Function already exists: " + opcode); -// return false; -// } -// functions.add(function); -// -// // add to op map -// if (funcByOp.containsKey(op)) { -// functions = funcByOp.get(op); -// } else { -// functions = Lists.newArrayList(); -// funcByOp.put(op, functions); -// } -// functions.add(function); -// return true; -// } -// -// public boolean addFunctionMapping(String functionName, FunctionOperator op) { -// if (functionNameMap.containsKey(functionName)) { -// LOG.error("OpcodeRegistry: Function mapping already exists: " + functionName); -// return false; -// } -// functionNameMap.put(functionName, op); -// return true; -// } -// -// /** -// * Contains all the information about a builtin function. -// * TODO: merge with Function and Udf -// */ -// public static class BuiltinFunction extends Function { -// // If true, this builtin is implemented against the Udf interface. -// public final boolean udfInterface; -// public final boolean vectorFunction; -// public TExprOpcode opcode; -// public FunctionOperator operator; -// -// // Constructor for searching, specifying the op and arguments -// public BuiltinFunction(FunctionOperator operator, Type[] args) { -// super(new FunctionName(operator.toString()), args, Type.INVALID, false); -// this.operator = operator; -// this.udfInterface = false; -// this.vectorFunction = false; -// this.setBinaryType(TFunctionBinaryType.BUILTIN); -// } -// -// private BuiltinFunction(boolean udfInterface, boolean vectorFunction, TExprOpcode opcode, -// FunctionOperator operator, boolean varArgs, Type ret, Type[] args) { -// super(new FunctionName(opcode.toString()), args, ret, varArgs); -// this.operator = operator; -// this.opcode = opcode; -// this.udfInterface = udfInterface; -// this.vectorFunction = vectorFunction; -// this.setBinaryType(TFunctionBinaryType.BUILTIN); -// } -// } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java index c9d96ed043..39b8bd328d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java @@ -60,11 +60,13 @@ public class OrderByElement { public Boolean getNullsFirstParam() { return nullsFirstParam; } + public OrderByElement clone() { OrderByElement clone = new OrderByElement( expr.clone(), isAsc, nullsFirstParam); return clone; } + /** * Returns a new list of OrderByElements with the same (cloned) expressions but the * ordering direction reversed (asc becomes desc, nulls first becomes nulls last, etc.) @@ -82,6 +84,7 @@ public class OrderByElement { return result; } + /** * Extracts the order-by exprs from the list of order-by elements and returns them. */ @@ -111,6 +114,7 @@ public class OrderByElement { return result; } + public String toSql() { StringBuilder strBuilder = new StringBuilder(); strBuilder.append(expr.toSql()); @@ -167,6 +171,7 @@ public class OrderByElement { OrderByElement o = (OrderByElement) obj; return expr.equals(o.expr) && isAsc == o.isAsc && nullsFirstParam == o.nullsFirstParam; } + /** * Compute nullsFirst. * diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java index 8443949d35..7f8b1436a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java @@ -226,8 +226,8 @@ public class OutFileClause { switch (resultType.getPrimitiveType()) { case BOOLEAN: if (!type.equals("boolean")) { - throw new AnalysisException("project field type is BOOLEAN, should use boolean, but the type of column " - + i + " is " + type); + throw new AnalysisException("project field type is BOOLEAN, should use boolean," + + " but the type of column " + i + " is " + type); } break; case TINYINT: @@ -248,14 +248,14 @@ public class OutFileClause { break; case FLOAT: if (!type.equals("float")) { - throw new AnalysisException("project field type is FLOAT, should use float, but the definition type of column " - + i + " is " + type); + throw new AnalysisException("project field type is FLOAT, should use float," + + " but the definition type of column " + i + " is " + type); } break; case DOUBLE: if (!type.equals("double")) { - throw new AnalysisException("project field type is DOUBLE, should use double, but the definition type of column " - + i + " is " + type); + throw new AnalysisException("project field type is DOUBLE, should use double," + + " but the definition type of column " + i + " is " + type); } break; case CHAR: @@ -263,23 +263,26 @@ public class OutFileClause { case STRING: case DECIMALV2: if (!type.equals("byte_array")) { - throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL, should use byte_array, " - + "but the definition type of column " + i + " is " + type); + throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL," + + " should use byte_array, but the definition type of column " + i + " is " + type); } break; case HLL: case BITMAP: - if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isReturnObjectDataAsBinary()) { + if (ConnectContext.get() != null && ConnectContext.get() + .getSessionVariable().isReturnObjectDataAsBinary()) { if (!type.equals("byte_array")) { throw new AnalysisException("project field type is HLL/BITMAP, should use byte_array, " + "but the definition type of column " + i + " is " + type); } } else { - throw new AnalysisException("Parquet format does not support column type: " + resultType.getPrimitiveType()); + throw new AnalysisException("Parquet format does not support column type: " + + resultType.getPrimitiveType()); } break; default: - throw new AnalysisException("Parquet format does not support column type: " + resultType.getPrimitiveType()); + throw new AnalysisException("Parquet format does not support column type: " + + resultType.getPrimitiveType()); } } } @@ -318,12 +321,14 @@ public class OutFileClause { break; case HLL: case BITMAP: - if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isReturnObjectDataAsBinary()) { + if (ConnectContext.get() != null && ConnectContext.get() + .getSessionVariable().isReturnObjectDataAsBinary()) { column.add("byte_array"); } break; default: - throw new AnalysisException("currently parquet do not support column type: " + expr.getType().getPrimitiveType()); + throw new AnalysisException("currently parquet do not support column type: " + + expr.getType().getPrimitiveType()); } column.add("col" + i); this.schema.add(column); @@ -338,7 +343,8 @@ public class OutFileClause { if (filePath.startsWith(LOCAL_FILE_PREFIX)) { if (!Config.enable_outfile_to_local) { throw new AnalysisException("Exporting results to local disk is not allowed." - + " To enable this feature, you need to add `enable_outfile_to_local=true` in fe.conf and restart FE"); + + " To enable this feature, you need to add `enable_outfile_to_local=true`" + + " in fe.conf and restart FE"); } isLocalOutput = true; filePath = filePath.substring(LOCAL_FILE_PREFIX.length() - 1); // leave last '/' @@ -433,11 +439,12 @@ public class OutFileClause { brokerProps.put(entry.getKey(), entry.getValue()); processedPropKeys.add(entry.getKey()); } else if (entry.getKey().contains(BrokerUtil.HADOOP_FS_NAME) - && storageType == StorageBackend.StorageType.HDFS) { + && storageType == StorageBackend.StorageType.HDFS) { brokerProps.put(entry.getKey(), entry.getValue()); processedPropKeys.add(entry.getKey()); - } else if ((entry.getKey().startsWith(HADOOP_FS_PROP_PREFIX) || entry.getKey().startsWith(HADOOP_PROP_PREFIX)) - && storageType == StorageBackend.StorageType.HDFS) { + } else if ((entry.getKey().startsWith(HADOOP_FS_PROP_PREFIX) + || entry.getKey().startsWith(HADOOP_PROP_PREFIX)) + && storageType == StorageBackend.StorageType.HDFS) { brokerProps.put(entry.getKey(), entry.getValue()); processedPropKeys.add(entry.getKey()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java index c34e9f7728..1c2f87b4f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java @@ -119,14 +119,15 @@ public abstract class Predicate extends Expr { Preconditions.checkState(right != null); // ATTN(cmy): Usually, the BinaryPredicate in the query will be rewritten through ExprRewriteRule, - // and all SingleColumnPredicate will be rewritten as "column on the left and the constant on the right". + // and all SingleColumnPredicate will be rewritten as "column on the left and the constant on the right" // So usually the right child is constant. // // But if there is a subquery in where clause, the planner will equal the subquery to join. // During the equal, some auxiliary BinaryPredicate will be automatically generated, // and these BinaryPredicates will not go through ExprRewriteRule. // As a result, these BinaryPredicates may be as "column on the right and the constant on the left". - // Example can be found in QueryPlanTest.java -> testJoinPredicateTransitivityWithSubqueryInWhereClause(). + // Example can be found in QueryPlanTest.java + // -> testJoinPredicateTransitivityWithSubqueryInWhereClause(). // // Because our current planner implementation is very error-prone, so when this happens, // we simply assume that these kind of BinaryPredicates cannot be pushed down, diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java index 5ca6044214..9e79548e6d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java @@ -51,7 +51,7 @@ import java.util.stream.Collectors; * analysis of the ORDER BY and LIMIT clauses. */ public abstract class QueryStmt extends StatementBase { - private final static Logger LOG = LogManager.getLogger(QueryStmt.class); + private static final Logger LOG = LogManager.getLogger(QueryStmt.class); ///////////////////////////////////////// // BEGIN: Members that need to be reset() @@ -455,7 +455,8 @@ public abstract class QueryStmt extends StatementBase { return resultExprs.get((int) pos - 1).clone(); } - public void getWithClauseTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { + public void getWithClauseTables(Analyzer analyzer, Map tableMap, + Set parentViewNameSet) throws AnalysisException { if (withClause != null) { withClause.getTables(analyzer, tableMap, parentViewNameSet); } @@ -532,8 +533,10 @@ public abstract class QueryStmt extends StatementBase { // "select a.siteid, b.citycode, a.siteid from (select siteid, citycode from tmp) a " + // "left join (select siteid, citycode from tmp) b on a.siteid = b.siteid;"; // tmp in child stmt "(select siteid, citycode from tmp)" do not contain with_Clause - // so need to check is view name by parentViewNameSet. issue link: https://github.com/apache/incubator-doris/issues/4598 - public abstract void getTables(Analyzer analyzer, Map tables, Set parentViewNameSet) throws AnalysisException; + // so need to check is view name by parentViewNameSet. + // issue link: https://github.com/apache/incubator-doris/issues/4598 + public abstract void getTables(Analyzer analyzer, Map tables, Set parentViewNameSet) + throws AnalysisException; // get TableRefs in this query, including physical TableRefs of this statement and // nested statements of inline views and with_Clause. diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java index 5be7c6bf24..05ccbf1fba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java @@ -51,11 +51,10 @@ public class RecoverDbStmt extends DdlStmt { dbName = ClusterNamespace.getFullName(getClusterName(), dbName); if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, - PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV, - PaloPrivilege.CREATE_PRIV, - PaloPrivilege.ADMIN_PRIV), - Operator.OR))) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + PrivPredicate.of(PrivBitSet.of( + PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), Operator.OR))) { + ErrorReport.reportAnalysisException( + ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java index eb1ef63eb2..312cd8129c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java @@ -55,11 +55,8 @@ public class RecoverPartitionStmt extends DdlStmt { public void analyze(Analyzer analyzer) throws AnalysisException, UserException { dbTblName.analyze(analyzer); if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbTblName.getDb(), - dbTblName.getTbl(), - PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV, - PaloPrivilege.CREATE_PRIV, - PaloPrivilege.ADMIN_PRIV), - Operator.OR))) { + dbTblName.getTbl(), PrivPredicate.of(PrivBitSet.of( + PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), Operator.OR))) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "RECOVERY", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java index 5928b5fa44..dbe24f9e19 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java @@ -49,12 +49,10 @@ public class RecoverTableStmt extends DdlStmt { public void analyze(Analyzer analyzer) throws AnalysisException, UserException { dbTblName.analyze(analyzer); - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbTblName.getDb(), - dbTblName.getTbl(), - PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV, - PaloPrivilege.CREATE_PRIV, - PaloPrivilege.ADMIN_PRIV), - Operator.OR))) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), dbTblName.getDb(), dbTblName.getTbl(), PrivPredicate.of( + PrivBitSet.of(PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), + Operator.OR))) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "RECOVERY", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java index 66a4cc9ee8..ff70985fe7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java @@ -57,7 +57,8 @@ public class RefreshDbStmt extends DdlStmt { // Don't allow dropping 'information_schema' database if (dbName.equalsIgnoreCase(ClusterNamespace.getFullName(getClusterName(), InfoSchemaDb.DATABASE_NAME))) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + ErrorReport.reportAnalysisException( + ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); } // check access if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.DROP)) { @@ -65,7 +66,8 @@ public class RefreshDbStmt extends DdlStmt { ConnectContext.get().getQualifiedUser(), dbName); } if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.CREATE)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); + ErrorReport.reportAnalysisException( + ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java index 666f1c0b18..6d7f88c089 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java @@ -44,8 +44,10 @@ public class ReplacePartitionClause extends AlterTableClause { // Otherwise, the replaced partition's name will be the temp partitions name. // This parameter is valid only when the number of partitions is the same as the number of temp partitions. // For example: - // 1. REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false"); - // "use_temp_partition_name" will take no effect after replacing, and the partition names will be "tp1" and "tp2". + // 1. REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2) + // PROPERTIES("use_temp_partition_name" = "false"); + // "use_temp_partition_name" will take no effect after replacing, + // and the partition names will be "tp1" and "tp2". // // 2. REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false"); // alter replacing, the partition names will be "p1" and "p2". @@ -90,7 +92,8 @@ public class ReplacePartitionClause extends AlterTableClause { throw new AnalysisException("Only support replace partitions with temp partitions"); } - this.isStrictRange = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_STRICT_RANGE, true); + this.isStrictRange = PropertyAnalyzer.analyzeBooleanProp( + properties, PropertyAnalyzer.PROPERTIES_STRICT_RANGE, true); this.useTempPartitionName = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_USE_TEMP_PARTITION_NAME, false); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java index 059f4bfa6a..19771f68b5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java @@ -39,6 +39,7 @@ public class ResourcePattern implements Writable { private String resourceName; public static ResourcePattern ALL; + static { ALL = new ResourcePattern("*"); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java index 6a0bfe6f53..cb9b4e3bb4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java @@ -32,10 +32,10 @@ import java.util.Map; import java.util.Set; public class RestoreStmt extends AbstractBackupStmt { - private final static String PROP_ALLOW_LOAD = "allow_load"; - private final static String PROP_REPLICATION_NUM = "replication_num"; - private final static String PROP_BACKUP_TIMESTAMP = "backup_timestamp"; - private final static String PROP_META_VERSION = "meta_version"; + private static final String PROP_ALLOW_LOAD = "allow_load"; + private static final String PROP_REPLICATION_NUM = "replication_num"; + private static final String PROP_BACKUP_TIMESTAMP = "backup_timestamp"; + private static final String PROP_META_VERSION = "meta_version"; private boolean allowLoad = false; private ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java index 05e229d1a6..c84f490ddf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java @@ -54,7 +54,8 @@ public class RevokeStmt extends DdlStmt { this.privileges = privs.toPrivilegeList(); } - public RevokeStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List privileges) { + public RevokeStmt(UserIdentity userIdent, String role, + ResourcePattern resourcePattern, List privileges) { this.userIdent = userIdent; this.role = role; this.tblPattern = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java index d7dbfab53e..26d335a18e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java @@ -49,7 +49,8 @@ public class RoutineLoadDataSourceProperties { .add(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS) .build(); - private static final ImmutableSet CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET = new ImmutableSet.Builder() + private static final ImmutableSet CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET + = new ImmutableSet.Builder() .add(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY) .add(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY) .add(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY) @@ -160,7 +161,8 @@ public class RoutineLoadDataSourceProperties { * 4. other properties start with "property." */ private void checkKafkaProperties() throws UserException { - ImmutableSet propertySet = isAlter ? CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET : DATA_SOURCE_PROPERTIES_SET; + ImmutableSet propertySet = isAlter + ? CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET : DATA_SOURCE_PROPERTIES_SET; Optional optional = properties.keySet().stream() .filter(entity -> !propertySet.contains(entity)) .filter(entity -> !entity.startsWith("property.")) @@ -170,7 +172,8 @@ public class RoutineLoadDataSourceProperties { } // check broker list - kafkaBrokerList = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY)).replaceAll(" ", ""); + kafkaBrokerList = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY)) + .replaceAll(" ", ""); if (!isAlter && Strings.isNullOrEmpty(kafkaBrokerList)) { throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY + " is a required property"); } @@ -185,7 +188,8 @@ public class RoutineLoadDataSourceProperties { } // check topic - kafkaTopic = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY)).replaceAll(" ", ""); + kafkaTopic = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY)) + .replaceAll(" ", ""); if (!isAlter && Strings.isNullOrEmpty(kafkaTopic)) { throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY + " is a required property"); } @@ -223,20 +227,23 @@ public class RoutineLoadDataSourceProperties { throw new AnalysisException("Only one of " + CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + " and " + CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can be set."); } - if (isAlter && kafkaPartitionsString != null && kafkaOffsetsString == null && kafkaDefaultOffsetString == null) { + if (isAlter && kafkaPartitionsString != null + && kafkaOffsetsString == null && kafkaDefaultOffsetString == null) { // if this is an alter operation, the partition and (default)offset must be set together. throw new AnalysisException("Must set offset or default offset with partition property"); } if (kafkaOffsetsString != null) { - this.isOffsetsForTimes = analyzeKafkaOffsetProperty(kafkaOffsetsString, this.kafkaPartitionOffsets, this.timezone); + this.isOffsetsForTimes = analyzeKafkaOffsetProperty(kafkaOffsetsString, + this.kafkaPartitionOffsets, this.timezone); } else { // offset is not set, check default offset. this.isOffsetsForTimes = analyzeKafkaDefaultOffsetProperty(this.customKafkaProperties, this.timezone); if (!this.kafkaPartitionOffsets.isEmpty()) { // Case C kafkaDefaultOffsetString = customKafkaProperties.get(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS); - setDefaultOffsetForPartition(this.kafkaPartitionOffsets, kafkaDefaultOffsetString, this.isOffsetsForTimes); + setDefaultOffsetForPartition(this.kafkaPartitionOffsets, + kafkaDefaultOffsetString, this.isOffsetsForTimes); } } } @@ -259,10 +266,12 @@ public class RoutineLoadDataSourceProperties { } // If the default offset is not set, set the default offset to OFFSET_END. - // If the offset is in datetime format, convert it to a timestamp, and also save the origin datatime formatted offset + // If the offset is in datetime format, convert it to a timestamp, + // and also save the origin datatime formatted offset // in "customKafkaProperties" // return true if the offset is in datetime format. - private static boolean analyzeKafkaDefaultOffsetProperty(Map customKafkaProperties, String timeZoneStr) + private static boolean analyzeKafkaDefaultOffsetProperty( + Map customKafkaProperties, String timeZoneStr) throws AnalysisException { customKafkaProperties.putIfAbsent(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS, KafkaProgress.OFFSET_END); String defaultOffsetStr = customKafkaProperties.get(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS); @@ -275,8 +284,10 @@ public class RoutineLoadDataSourceProperties { customKafkaProperties.put(CreateRoutineLoadStmt.KAFKA_ORIGIN_DEFAULT_OFFSETS, defaultOffsetStr); return true; } else { - if (!defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_BEGINNING) && !defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_END)) { - throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can only be set to OFFSET_BEGINNING, OFFSET_END or date time"); + if (!defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_BEGINNING) + && !defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_END)) { + throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + + " can only be set to OFFSET_BEGINNING, OFFSET_END or date time"); } return false; } @@ -285,16 +296,17 @@ public class RoutineLoadDataSourceProperties { // init "kafkaPartitionOffsets" with partition property. // The offset will be set to OFFSET_END for now, and will be changed in later analysis process. private static void analyzeKafkaPartitionProperty(String kafkaPartitionsString, - List> kafkaPartitionOffsets) throws AnalysisException { + List> kafkaPartitionOffsets) throws AnalysisException { kafkaPartitionsString = kafkaPartitionsString.replaceAll(" ", ""); if (kafkaPartitionsString.isEmpty()) { - throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY + " could not be a empty string"); + throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY + + " could not be a empty string"); } String[] kafkaPartitionsStringList = kafkaPartitionsString.split(","); for (String s : kafkaPartitionsStringList) { try { - kafkaPartitionOffsets.add(Pair.create(getIntegerValueFromString(s, CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY), - KafkaProgress.OFFSET_END_VAL)); + kafkaPartitionOffsets.add(Pair.create(getIntegerValueFromString( + s, CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY), KafkaProgress.OFFSET_END_VAL)); } catch (AnalysisException e) { throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY + " must be a number string with comma-separated"); @@ -304,8 +316,8 @@ public class RoutineLoadDataSourceProperties { // Fill the partition's offset with given kafkaOffsetsString, // Return true if offset is specified by timestamp. - private static boolean analyzeKafkaOffsetProperty(String kafkaOffsetsString, List> kafkaPartitionOffsets, - String timeZoneStr) + private static boolean analyzeKafkaOffsetProperty(String kafkaOffsetsString, + List> kafkaPartitionOffsets, String timeZoneStr) throws UserException { if (Strings.isNullOrEmpty(kafkaOffsetsString)) { throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + " could not be a empty string"); @@ -354,7 +366,8 @@ public class RoutineLoadDataSourceProperties { } else if (NumberUtils.isDigits(kafkaOffsetsStr)) { kafkaPartitionOffsets.get(i).second = Long.valueOf(NumberUtils.toLong(kafkaOffsetsStr)); } else { - throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + " must be an integer or a date time"); + throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + + " must be an integer or a date time"); } } } @@ -368,7 +381,7 @@ public class RoutineLoadDataSourceProperties { if (dataSourceProperty.getKey().startsWith("property.")) { String propertyKey = dataSourceProperty.getKey(); String propertyValue = dataSourceProperty.getValue(); - String propertyValueArr[] = propertyKey.split("\\."); + String[] propertyValueArr = propertyKey.split("\\."); if (propertyValueArr.length < 2) { throw new AnalysisException("kafka property value could not be a empty string"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java index 2eb892ed1c..127285bcb5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java @@ -72,8 +72,9 @@ public enum SchemaTableType { fullSelectLists = new SelectList(); fullSelectLists.addItem(SelectListItem.createStarItem(null)); } - private final String description; - private final String tableName; + + private final String description; + private final String tableName; private final TSchemaTableType tableType; SchemaTableType(String description, String tableName, TSchemaTableType tableType) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java index db2e688b6a..643da0095a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java @@ -62,7 +62,7 @@ public class SelectListItem { } // select list item corresponding to "[[db.]tbl.]*" - static public SelectListItem createStarItem(TableName tblName) { + public static SelectListItem createStarItem(TableName tblName) { return new SelectListItem(tblName); } @@ -115,6 +115,7 @@ public class SelectListItem { return "*"; } } + /** * Return a column label for the select list item. */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java index e9ae2aa66e..ca450021b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java @@ -71,7 +71,7 @@ import java.util.stream.Collectors; * clauses. */ public class SelectStmt extends QueryStmt { - private final static Logger LOG = LogManager.getLogger(SelectStmt.class); + private static final Logger LOG = LogManager.getLogger(SelectStmt.class); private UUID id = UUID.randomUUID(); // /////////////////////////////////////// @@ -291,7 +291,8 @@ public class SelectStmt extends QueryStmt { } @Override - public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { + public void getTables(Analyzer analyzer, Map tableMap, + Set parentViewNameSet) throws AnalysisException { getWithClauseTables(analyzer, tableMap, parentViewNameSet); for (TableRef tblRef : fromClause) { if (tblRef instanceof InlineViewRef) { @@ -962,7 +963,8 @@ public class SelectStmt extends QueryStmt { if (groupByClause == null && !selectList.isDistinct() && !TreeNode.contains(resultExprs, Expr.isAggregatePredicate()) - && (havingClauseAfterAnaylzed == null || !havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate())) + && (havingClauseAfterAnaylzed == null || !havingClauseAfterAnaylzed.contains( + Expr.isAggregatePredicate())) && (sortInfo == null || !TreeNode.contains(sortInfo.getOrderingExprs(), Expr.isAggregatePredicate()))) { // We're not computing aggregates but we still need to register the HAVING @@ -993,7 +995,8 @@ public class SelectStmt extends QueryStmt { if (selectList.isDistinct() && (groupByClause != null || TreeNode.contains(resultExprs, Expr.isAggregatePredicate()) - || (havingClauseAfterAnaylzed != null && havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate())))) { + || (havingClauseAfterAnaylzed != null && havingClauseAfterAnaylzed.contains( + Expr.isAggregatePredicate())))) { throw new AnalysisException("cannot combine SELECT DISTINCT with aggregate functions or GROUP BY"); } @@ -1063,7 +1066,8 @@ public class SelectStmt extends QueryStmt { ? aggInfo.getSecondPhaseDistinctAggInfo() : aggInfo; groupingByTupleIds.add(finalAggInfo.getOutputTupleId()); - ExprSubstitutionMap combinedSmap = ExprSubstitutionMap.compose(countAllMap, finalAggInfo.getOutputSmap(), analyzer); + ExprSubstitutionMap combinedSmap = ExprSubstitutionMap.compose( + countAllMap, finalAggInfo.getOutputSmap(), analyzer); // change select list, having and ordering exprs to point to agg output. We need // to reanalyze the exprs at this point. if (LOG.isDebugEnabled()) { @@ -1084,7 +1088,8 @@ public class SelectStmt extends QueryStmt { } /* - * All of columns of result and having clause are replaced by new slot ref which is bound by top tuple of agg info. + * All of columns of result and having clause are replaced by new slot ref + * which is bound by top tuple of agg info. * For example: * ResultExprs: SlotRef(k1), FunctionCall(sum(SlotRef(k2))) * Having predicate: FunctionCall(sum(SlotRef(k2))) > subquery @@ -1448,9 +1453,9 @@ public class SelectStmt extends QueryStmt { * Aliases information of groupBy and orderBy clauses is recorded in `QueryStmt.aliasSMap`. * The select clause has its own alias info in `SelectListItem.alias`. * - * Aliases expr in the `group by` and `order by` clauses are not analyzed, i.e. `Expr.isAnalyzed=false` - * Subsequent constant folding will analyze the unanalyzed Expr before collecting the constant - * expressions, preventing the `INVALID_TYPE` expr from being sent to BE. + * Aliases expr in the `group by` and `order by` clauses are not analyzed, + * i.e. `Expr.isAnalyzed=false`. Subsequent constant folding will analyze the unanalyzed Expr before + * collecting the constant expressions, preventing the `INVALID_TYPE` expr from being sent to BE. * * But when analyzing the alias, the meta information corresponding to the slot cannot be found * in the catalog, an error will be reported. diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java index 2062bf26df..21968fd19c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java @@ -48,7 +48,7 @@ import java.util.Set; * and we need to mark the slots of resolved exprs as materialized. */ public class SetOperationStmt extends QueryStmt { - private final static Logger LOG = LogManager.getLogger(SetOperationStmt.class); + private static final Logger LOG = LogManager.getLogger(SetOperationStmt.class); public enum Operation { UNION, @@ -213,7 +213,8 @@ public class SetOperationStmt extends QueryStmt { } @Override - public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { + public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) + throws AnalysisException { getWithClauseTables(analyzer, tableMap, parentViewNameSet); for (SetOperand op : operands) { op.getQueryStmt().getTables(analyzer, tableMap, parentViewNameSet); @@ -886,6 +887,7 @@ public class SetOperationStmt extends QueryStmt { public Operation getOperation() { return operation; } + // Used for propagating DISTINCT. public void setQualifier(Qualifier qualifier) { this.qualifier = qualifier; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java index 3b1eeec8e9..b753a3ffc1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java @@ -132,7 +132,8 @@ public class SetVar { if (getVariable().equalsIgnoreCase(SessionVariable.PREFER_JOIN_METHOD)) { String value = getValue().getStringValue(); if (!value.equalsIgnoreCase("broadcast") && !value.equalsIgnoreCase("shuffle")) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.PREFER_JOIN_METHOD, value); + ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, + SessionVariable.PREFER_JOIN_METHOD, value); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java index 8bfc5ed59d..1b4dc7f4ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java @@ -46,7 +46,8 @@ import java.util.List; /* * ShowAlterStmt: used to show process state of alter statement. * Syntax: - * SHOW ALTER TABLE [COLUMN | ROLLUP] [FROM dbName] [WHERE TableName="xxx"] [ORDER BY CreateTime DESC] [LIMIT [offset,]rows] + * SHOW ALTER TABLE [COLUMN | ROLLUP] [FROM dbName] [WHERE TableName="xxx"] + * [ORDER BY CreateTime DESC] [LIMIT [offset,]rows] */ public class ShowAlterStmt extends ShowStmt { private static final Logger LOG = LogManager.getLogger(ShowAlterStmt.class); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java index eec29684d1..6e58296c64 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java @@ -177,9 +177,11 @@ public class ShowBackupStmt extends ShowStmt { return label -> true; } if (isAccurateMatch) { - return CaseSensibility.LABEL.getCaseSensibility() ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue); + return CaseSensibility.LABEL.getCaseSensibility() + ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue); } else { - PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(labelValue, CaseSensibility.LABEL.getCaseSensibility()); + PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern( + labelValue, CaseSensibility.LABEL.getCaseSensibility()); return patternMatcher::match; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java index 0339c4f5ac..57b8766b4a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java @@ -59,9 +59,7 @@ public class ShowClusterStmt extends ShowStmt { @Override public void analyze(Analyzer analyzer) throws AnalysisException { if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), - PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, - PaloPrivilege.NODE_PRIV), - Operator.OR))) { + PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, PaloPrivilege.NODE_PRIV), Operator.OR))) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java index 481550ab29..d826da8340 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java @@ -164,7 +164,8 @@ public class ShowExportStmt extends ShowStmt { valid = true; } - } else if (whereExpr instanceof LikePredicate && ((LikePredicate) whereExpr).getOp() == LikePredicate.Operator.LIKE) { + } else if (whereExpr instanceof LikePredicate + && ((LikePredicate) whereExpr).getOp() == LikePredicate.Operator.LIKE) { if ("label".equals(leftKey) && whereExpr.getChild(1) instanceof StringLiteral) { label = whereExpr.getChild(1).getStringValue(); isLabelUseLike = true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java index 197f61df59..fa765da9e4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java @@ -46,6 +46,7 @@ import com.google.common.base.Preconditions; public class ShowGrantsStmt extends ShowStmt { private static final ShowResultSetMetaData META_DATA; + static { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); for (String col : AuthProcDir.TITLE_NAMES) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java index 58c35a9284..82616d3262 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java @@ -69,8 +69,8 @@ public class ShowIndexStmt extends ShowStmt { } tableName.analyze(analyzer); - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), - PrivPredicate.SHOW)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.SHOW)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), tableName.getDb() + ": " + tableName.toString()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java index 50f8fcc28b..a30ef18b9f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java @@ -29,7 +29,7 @@ import com.google.common.base.Strings; // show load profile "/"; # list all saving load job ids // show load profile "/10014" # show task ids of specified job // show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/" # show instance list of the task -// show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/e0f7390f5363419e-b416a2a799960906" # show instance tree graph +// show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/e0f7390f5363419e-b416a2a7999" # show instance's graph public class ShowLoadProfileStmt extends ShowStmt { private static final ShowResultSetMetaData META_DATA_TASK_IDS = ShowResultSetMetaData.builder() diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java index c7b84bcef7..81b9f294dc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java @@ -18,8 +18,6 @@ package org.apache.doris.analysis; import org.apache.doris.catalog.Catalog; -import org.apache.doris.catalog.Column; -import org.apache.doris.catalog.ScalarType; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; import org.apache.doris.common.UserException; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java index 7c0994c434..15be652aed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java @@ -29,7 +29,7 @@ import com.google.common.base.Strings; // show query profile "/"; # list all saving query ids // show query profile "/e0f7390f5363419e-b416a2a79996083e" # show graph of fragments of the query // show query profile "/e0f7390f5363419e-b416a2a79996083e/0" # show instance list of the specified fragment -// show query profile "/e0f7390f5363419e-b416a2a79996083e/0/e0f7390f5363419e-b416a2a799960906" # show graph of the instance +// show query profile "/e0f7390f5363419e-b416a2a79996083e/0/e0f7390f5363419e-b416a2a799960906" # show instance's graph public class ShowQueryProfileStmt extends ShowStmt { // This should be same as ProfileManager.PROFILE_HEADERS public static final ShowResultSetMetaData META_DATA_QUERY_IDS = diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java index 0c42178415..4aa2007bd5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java @@ -176,9 +176,11 @@ public class ShowRestoreStmt extends ShowStmt { return label -> true; } if (isAccurateMatch) { - return CaseSensibility.LABEL.getCaseSensibility() ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue); + return CaseSensibility.LABEL.getCaseSensibility() + ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue); } else { - PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(labelValue, CaseSensibility.LABEL.getCaseSensibility()); + PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern( + labelValue, CaseSensibility.LABEL.getCaseSensibility()); return patternMatcher::match; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java index ccf84e2c95..47b9bacda2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java @@ -29,6 +29,7 @@ import org.apache.doris.qe.ShowResultSetMetaData; public class ShowRolesStmt extends ShowStmt { private static final ShowResultSetMetaData META_DATA; + static { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java index 553151165b..84047f0b3b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java @@ -128,7 +128,8 @@ public class ShowRoutineLoadTaskStmt extends ShowStmt { } // CHECKSTYLE IGNORE THIS LINE if (!valid) { - throw new AnalysisException("show routine load job only support one equal expr which is sames like JobName=\"ILoveDoris\""); + throw new AnalysisException("show routine load job only support one equal expr " + + "which is sames like JobName=\"ILoveDoris\""); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java index 1ecfa5e060..9ae208e936 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java @@ -68,7 +68,8 @@ public class ShowStreamLoadStmt extends ShowStmt { .add("StartTime").add("FinishTime") .build(); - public ShowStreamLoadStmt(String db, Expr labelExpr, List orderByElements, LimitElement limitElement) { + public ShowStreamLoadStmt(String db, Expr labelExpr, + List orderByElements, LimitElement limitElement) { this.dbName = db; this.whereClause = labelExpr; this.orderByElements = orderByElements; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java index 2842c4f48d..9c7ea82381 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java @@ -89,7 +89,8 @@ public class ShowViewStmt extends ShowStmt { tbl.analyze(analyzer); String dbName = tbl.getDb(); - if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbName, getTbl(), PrivPredicate.SHOW)) { + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv( + ConnectContext.get(), dbName, getTbl(), PrivPredicate.SHOW)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "SHOW VIEW", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java index fe287e3900..3e0703414b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java @@ -40,7 +40,7 @@ import java.util.Set; * particular input row (materialize all row slots) */ public class SortInfo { - private final static Logger LOG = LogManager.getLogger(SortInfo.class); + private static final Logger LOG = LogManager.getLogger(SortInfo.class); // All ordering exprs with cost greater than this will be materialized. Since we don't // currently have any information about actual function costs, this value is intended to // ensure that all expensive functions will be materialized while still leaving simple diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java index 4f0ea9c059..66c864fb57 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java @@ -182,7 +182,8 @@ public class StmtRewriter { * For example: * Query: select cs_item_sk, sum(cs_sales_price) from catalog_sales a group by cs_item_sk having ...; * Inline view: - * from (select cs_item_sk $ColumnA, sum(cs_sales_price) $ColumnB from catalog_sales a group by cs_item_sk) $TableA + * from (select cs_item_sk $ColumnA, sum(cs_sales_price) $ColumnB + * from catalog_sales a group by cs_item_sk) $TableA * * Add missing aggregation columns in select list * For example: @@ -595,7 +596,7 @@ public class StmtRewriter { lhsExprs, rhsExprs, updateGroupBy); } - /** + /* * Situation: The expr is a uncorrelated subquery for outer stmt. * Rewrite: Add a limit 1 for subquery. * origin stmt: select * from t1 where exists (select * from table2); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java index 12e570296c..b073ff7067 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java @@ -38,7 +38,8 @@ public class StorageBackend extends StorageDesc implements ParseNode { private StorageType storageType; private Map properties; - public StorageBackend(String storageName, String location, StorageType storageType, Map properties) { + public StorageBackend(String storageName, String location, + StorageType storageType, Map properties) { this.name = storageName; this.location = location; this.storageType = storageType; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java index 16eaddf455..a215cc3132 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java @@ -206,7 +206,8 @@ public class StringLiteral extends LiteralExpr { case LARGEINT: if (VariableVarConverters.hasConverter(beConverted)) { try { - return new LargeIntLiteral(String.valueOf(VariableVarConverters.encode(beConverted, value))); + return new LargeIntLiteral(String.valueOf( + VariableVarConverters.encode(beConverted, value))); } catch (DdlException e) { throw new AnalysisException(e.getMessage()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java index d3056bbe2f..10b51b80db 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java @@ -41,7 +41,7 @@ import java.util.List; * its own Analyzer context. */ public class Subquery extends Expr { - private final static Logger LOG = LoggerFactory.getLogger(Subquery.class); + private static final Logger LOG = LoggerFactory.getLogger(Subquery.class); // The QueryStmt of the subquery. protected QueryStmt stmt; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java index f03a71fb3d..6f6a3d393a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java @@ -41,6 +41,7 @@ public class TablePattern implements Writable { boolean isAnalyzed = false; public static TablePattern ALL; + static { ALL = new TablePattern("*", "*"); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java index 287e45952c..58184b9bb2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java @@ -24,12 +24,15 @@ import org.apache.doris.transaction.TransactionEntry; public class TransactionBeginStmt extends TransactionStmt { private String label = null; + public TransactionBeginStmt() { this.label = ""; } + public TransactionBeginStmt(final String label) { this.label = label; } + @Override public void analyze(Analyzer analyzer) throws AnalysisException, UserException { if (label == null || label.isEmpty()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java index e8ed068a43..bdbb4c88fc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java @@ -28,7 +28,6 @@ import org.apache.doris.catalog.StructField; import org.apache.doris.catalog.StructType; import org.apache.doris.catalog.Type; import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; import com.google.common.base.Preconditions; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java index 8727418625..d0da7479aa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java @@ -63,7 +63,8 @@ public class UseStmt extends StatementBase { database = ClusterNamespace.getFullName(getClusterName(), database); if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), database, PrivPredicate.SHOW)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), database); + ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, + analyzer.getQualifiedUser(), database); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java index b87878fa4e..ec24d06a41 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java @@ -98,7 +98,8 @@ public class UserIdentity implements Writable { } public static UserIdentity fromThrift(TUserIdentity tUserIdent) { - UserIdentity userIdentity = new UserIdentity(tUserIdent.getUsername(), tUserIdent.getHost(), tUserIdent.is_domain); + UserIdentity userIdentity = new UserIdentity(tUserIdent.getUsername(), + tUserIdent.getHost(), tUserIdent.is_domain); userIdentity.setIsAnalyzed(); return userIdentity; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java index a9f3951bff..49e7e92e35 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java @@ -32,6 +32,7 @@ public class ValueList { rows = Lists.newArrayList(); rows.add(row); } + public ValueList(List> rows) { this.rows = rows; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java index 44ab177c25..98979fc028 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java @@ -113,7 +113,8 @@ public class WithClause implements ParseNode { } } - public void getTables(Analyzer analyzer, Map tableMap, Set parentViewNameSet) throws AnalysisException { + public void getTables(Analyzer analyzer, Map tableMap, + Set parentViewNameSet) throws AnalysisException { for (View view : views) { QueryStmt stmt = view.getQueryStmt(); parentViewNameSet.add(view.getName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java index 2f0b07e461..c529259fbc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java @@ -192,7 +192,8 @@ public class BackupHandler extends MasterDaemon implements Writable { public void createRepository(CreateRepositoryStmt stmt) throws DdlException { if (!catalog.getBrokerMgr().containsBroker(stmt.getBrokerName()) && stmt.getStorageType() == StorageBackend.StorageType.BROKER) { - ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "broker does not exist: " + stmt.getBrokerName()); + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "broker does not exist: " + stmt.getBrokerName()); } BlobStorage storage = BlobStorage.create(stmt.getBrokerName(), stmt.getStorageType(), stmt.getProperties()); @@ -328,13 +329,15 @@ public class BackupHandler extends MasterDaemon implements Writable { tbl.readLock(); try { if (olapTbl.existTempPartitions()) { - ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Do not support backup table with temp partitions"); + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "Do not support backup table with temp partitions"); } PartitionNames partitionNames = tblRef.getPartitionNames(); if (partitionNames != null) { if (partitionNames.isTemp()) { - ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Do not support backup temp partitions"); + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "Do not support backup temp partitions"); } for (String partName : partitionNames.getPartitionNames()) { @@ -671,7 +674,8 @@ public class BackupHandler extends MasterDaemon implements Writable { public void write(DataOutput out) throws IOException { repoMgr.write(out); - List jobs = dbIdToBackupOrRestoreJobs.values().stream().flatMap(Deque::stream).collect(Collectors.toList()); + List jobs = dbIdToBackupOrRestoreJobs.values() + .stream().flatMap(Deque::stream).collect(Collectors.toList()); out.writeInt(jobs.size()); for (AbstractJob job : jobs) { job.write(out); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index 96d89a2707..da07f64342 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -162,7 +162,8 @@ public class BackupJob extends AbstractJob { // snapshot task could not finish if status_code is OLAP_ERR_VERSION_ALREADY_MERGED, // so cancel this job if (request.getTaskStatus().getStatusCode() == TStatusCode.OLAP_ERR_VERSION_ALREADY_MERGED) { - status = new Status(ErrCode.OLAP_VERSION_ALREADY_MERGED, "make snapshot failed, version already merged"); + status = new Status(ErrCode.OLAP_VERSION_ALREADY_MERGED, + "make snapshot failed, version already merged"); cancelInternal(); } return false; @@ -427,7 +428,8 @@ public class BackupJob extends AbstractJob { } } - private void prepareSnapshotTaskForOlapTable(OlapTable olapTable, TableRef backupTableRef, AgentBatchTask batchTask) { + private void prepareSnapshotTaskForOlapTable(OlapTable olapTable, + TableRef backupTableRef, AgentBatchTask batchTask) { olapTable.readLock(); try { // check backup table again @@ -654,8 +656,8 @@ public class BackupJob extends AbstractJob { File jobDir = new File(localJobDirPath.toString()); if (jobDir.exists()) { // if dir exists, delete it first - Files.walk(localJobDirPath, - FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); + Files.walk(localJobDirPath, FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()) + .map(Path::toFile).forEach(File::delete); } if (!jobDir.mkdir()) { status = new Status(ErrCode.COMMON_ERROR, "Failed to create tmp dir: " + localJobDirPath); @@ -673,7 +675,8 @@ public class BackupJob extends AbstractJob { localMetaInfoFilePath = metaInfoFile.getAbsolutePath(); // 3. save job info file - jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, getContent(), backupMeta, snapshotInfos); + jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, + getContent(), backupMeta, snapshotInfos); LOG.debug("job info: {}. {}", jobInfo, this); File jobInfoFile = new File(jobDir, Repository.PREFIX_JOB_INFO + createTimeStr); if (!jobInfoFile.createNewFile()) { @@ -805,8 +808,8 @@ public class BackupJob extends AbstractJob { try { File jobDir = new File(localJobDirPath.toString()); if (jobDir.exists()) { - Files.walk(localJobDirPath, - FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); + Files.walk(localJobDirPath, FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()) + .map(Path::toFile).forEach(File::delete); } } catch (Exception e) { LOG.warn("failed to clean the backup job dir: " + localJobDirPath.toString()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java index 9295095f40..f703634413 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java @@ -126,8 +126,8 @@ public class BrokerStorage extends BlobStorage { File localFile = new File(localFilePath); if (localFile.exists()) { try { - Files.walk(Paths.get(localFilePath), - FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); + Files.walk(Paths.get(localFilePath), FileVisitOption.FOLLOW_LINKS) + .sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); } catch (IOException e) { return new Status(Status.ErrCode.COMMON_ERROR, "failed to delete exist local file: " + localFilePath); } @@ -294,7 +294,8 @@ public class BrokerStorage extends BlobStorage { } } finally { Status closeStatus = closeWriter(client, address, fd); - if (closeStatus.getErrCode() == Status.ErrCode.BAD_CONNECTION || status.getErrCode() == Status.ErrCode.BAD_CONNECTION) { + if (closeStatus.getErrCode() == Status.ErrCode.BAD_CONNECTION + || status.getErrCode() == Status.ErrCode.BAD_CONNECTION) { ClientPool.brokerPool.invalidateObject(address, client); } else { ClientPool.brokerPool.returnObject(address, client); @@ -340,7 +341,8 @@ public class BrokerStorage extends BlobStorage { int tryTimes = 0; while (tryTimes < 3) { try { - TBrokerPWriteRequest req = new TBrokerPWriteRequest(TBrokerVersion.VERSION_ONE, fd, writeOffset, bb); + TBrokerPWriteRequest req + = new TBrokerPWriteRequest(TBrokerVersion.VERSION_ONE, fd, writeOffset, bb); TBrokerOperationStatus opst = client.pwrite(req); if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) { // pwrite return failure. diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java index 5a99017b2c..4b5b13ad70 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java @@ -366,7 +366,8 @@ public class Repository implements Writable { // create remote tablet snapshot path // eg: - // /location/__palo_repository_repo_name/__ss_my_ss1/__ss_content/__db_10001/__tbl_10020/__part_10031/__idx_10032/__10023/__3481721 + // /location/__palo_repository_repo_name/__ss_my_ss1/__ss_content/ + // __db_10001/__tbl_10020/__part_10031/__idx_10032/__10023/__3481721 public String assembleRemoteSnapshotPath(String label, SnapshotInfo info) { String path = Joiner.on(PATH_DELIMITER).join(location, joinPrefix(PREFIX_REPO, name), @@ -453,7 +454,8 @@ public class Repository implements Writable { if (storage instanceof BrokerStorage) { // this may be a retry, so we should first delete remote file String tmpRemotePath = assembleFileNameWithSuffix(remoteFilePath, SUFFIX_TMP_FILE); - LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", localFilePath, tmpRemotePath, finalRemotePath); + LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", + localFilePath, tmpRemotePath, finalRemotePath); st = storage.delete(tmpRemotePath); if (!st.ok()) { return st; diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index 25267dbad8..78148acc1f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -511,8 +511,7 @@ public class RestoreJob extends AbstractJob { } } for (BackupJobInfo.BackupOdbcResourceInfo backupOdbcResourceInfo : jobInfo.newBackupObjects.odbcResources) { - Resource resource = Catalog.getCurrentCatalog().getResourceMgr(). - getResource(backupOdbcResourceInfo.name); + Resource resource = Catalog.getCurrentCatalog().getResourceMgr().getResource(backupOdbcResourceInfo.name); if (resource == null) { continue; } @@ -538,7 +537,8 @@ public class RestoreJob extends AbstractJob { // table already exist, check schema if (localTbl.getType() != TableType.OLAP) { status = new Status(ErrCode.COMMON_ERROR, - "The type of local table should be same as type of remote table: " + remoteTbl.getName()); + "The type of local table should be same as type of remote table: " + + remoteTbl.getName()); return; } OlapTable localOlapTbl = (OlapTable) localTbl; @@ -554,8 +554,10 @@ public class RestoreJob extends AbstractJob { } LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this); if (!localOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames) - .equals(remoteOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames))) { - status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tableName) + .equals(remoteOlapTbl.getSignature( + BackupHandler.SIGNATURE_VERSION, intersectPartNames))) { + status = new Status(ErrCode.COMMON_ERROR, "Table " + + jobInfo.getAliasByOriginNameIfSet(tableName) + " already exist but with different schema"); return; } @@ -571,10 +573,12 @@ public class RestoreJob extends AbstractJob { if (localPartInfo.getType() == PartitionType.RANGE || localPartInfo.getType() == PartitionType.LIST) { PartitionItem localItem = localPartInfo.getItem(localPartition.getId()); - PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo().getItem(backupPartInfo.id); + PartitionItem remoteItem = remoteOlapTbl + .getPartitionInfo().getItem(backupPartInfo.id); if (localItem.equals(remoteItem)) { // Same partition, same range - if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, partitionName, tblInfo)) { + if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, + localTbl, backupPartInfo, partitionName, tblInfo)) { return; } } else { @@ -586,7 +590,8 @@ public class RestoreJob extends AbstractJob { } } else { // If this is a single partitioned table. - if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, partitionName, tblInfo)) { + if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, + backupPartInfo, partitionName, tblInfo)) { return; } } @@ -596,7 +601,8 @@ public class RestoreJob extends AbstractJob { PartitionInfo localPartitionInfo = localOlapTbl.getPartitionInfo(); if (localPartitionInfo.getType() == PartitionType.RANGE || localPartitionInfo.getType() == PartitionType.LIST) { - PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo().getItem(backupPartInfo.id); + PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo() + .getItem(backupPartInfo.id); if (localPartitionInfo.getAnyIntersectItem(remoteItem, false) != null) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + partitionName + " in table " + localTbl.getName() @@ -614,7 +620,8 @@ public class RestoreJob extends AbstractJob { restoredPartitions.add(Pair.create(localOlapTbl.getName(), restorePart)); } } else { - // It is impossible that a single partitioned table exist without any existing partition + // It is impossible that a single partitioned table exist + // without any existing partition status = new Status(ErrCode.COMMON_ERROR, "No partition exist in single partitioned table " + localOlapTbl.getName()); return; @@ -876,7 +883,8 @@ public class RestoreJob extends AbstractJob { } // check disk capacity - org.apache.doris.common.Status st = Catalog.getCurrentSystemInfo().checkExceedDiskCapacityLimit(bePathsMap, true); + org.apache.doris.common.Status st = Catalog.getCurrentSystemInfo() + .checkExceedDiskCapacityLimit(bePathsMap, true); if (!st.ok()) { status = new Status(ErrCode.COMMON_ERROR, st.getErrorMsg()); return; @@ -923,8 +931,8 @@ public class RestoreJob extends AbstractJob { } } - private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, Table localTbl, - BackupPartitionInfo backupPartInfo, String partitionName, BackupOlapTableInfo tblInfo) { + private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, + Table localTbl, BackupPartitionInfo backupPartInfo, String partitionName, BackupOlapTableInfo tblInfo) { short restoreReplicaNum = replicaAlloc.getTotalReplicaNum(); short localReplicaNum = localPartInfo.getReplicaAllocation(localPartition.getId()).getTotalReplicaNum(); if (localReplicaNum != restoreReplicaNum) { @@ -1116,7 +1124,8 @@ public class RestoreJob extends AbstractJob { OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); PartitionInfo localPartitionInfo = localTbl.getPartitionInfo(); PartitionInfo remotePartitionInfo = remoteTbl.getPartitionInfo(); - BackupPartitionInfo backupPartitionInfo = jobInfo.getOlapTableInfo(entry.first).getPartInfo(restorePart.getName()); + BackupPartitionInfo backupPartitionInfo = jobInfo.getOlapTableInfo(entry.first) + .getPartInfo(restorePart.getName()); long remotePartId = backupPartitionInfo.id; DataProperty remoteDataProperty = remotePartitionInfo.getDataProperty(remotePartId); localPartitionInfo.addPartition(restorePart.getId(), false, remotePartitionInfo.getItem(remotePartId), @@ -1271,9 +1280,9 @@ public class RestoreJob extends AbstractJob { MaterializedIndex idx = part.getIndex(info.getIndexId()); if (idx == null) { - status = new Status(ErrCode.NOT_FOUND, - "index " + info.getIndexId() + " does not exist in partion " + part.getName() - + "of restored table " + tbl.getName()); + status = new Status(ErrCode.NOT_FOUND, "index " + info.getIndexId() + + " does not exist in partion " + part.getName() + + "of restored table " + tbl.getName()); return; } @@ -1745,7 +1754,8 @@ public class RestoreJob extends AbstractJob { for (Map.Entry entry : restoredVersionInfo.row(tblId).entrySet()) { out.writeLong(entry.getKey()); out.writeLong(entry.getValue()); - // It is version hash in the past, but it useless but should compatible with old version so that write 0 here + // It is version hash in the past, + // but it useless but should compatible with old version so that write 0 here out.writeLong(0L); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java index a67b0992e5..ae89175ebd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java @@ -104,7 +104,8 @@ public class S3Storage extends BlobStorage { // If not, it will not be converted ( https://github.com/aws/aws-sdk-java-v2/pull/763), // but the endpoints of many cloud service providers for object storage do not start with s3, // so they cannot be converted to virtual hosted-sytle. - // Some of them, such as aliyun's oss, only support virtual hosted-sytle, and some of them(ceph) may only support + // Some of them, such as aliyun's oss, only support virtual hosted-sytle, + // and some of them(ceph) may only support // path-style, so we need to do some additional conversion. // // use_path_style | !use_path_style @@ -202,7 +203,8 @@ public class S3Storage extends BlobStorage { } try { S3URI uri = S3URI.create(remoteFilePath, forceHostedStyle); - GetObjectResponse response = getClient(uri.getVirtualBucket()).getObject(GetObjectRequest.builder().bucket(uri.getBucket()).key(uri.getKey()).build(), localFile.toPath()); + GetObjectResponse response = getClient(uri.getVirtualBucket()).getObject( + GetObjectRequest.builder().bucket(uri.getBucket()).key(uri.getKey()).build(), localFile.toPath()); if (localFile.length() == fileSize) { LOG.info( "finished to download from {} to {} with size: {}. cost {} ms", @@ -348,7 +350,9 @@ public class S3Storage extends BlobStorage { return Status.OK; } for (FileStatus fileStatus : files) { - RemoteFile remoteFile = new RemoteFile(fileNameOnly ? fileStatus.getPath().getName() : fileStatus.getPath().toString(), !fileStatus.isDirectory(), fileStatus.isDirectory() ? -1 : fileStatus.getLen()); + RemoteFile remoteFile = new RemoteFile( + fileNameOnly ? fileStatus.getPath().getName() : fileStatus.getPath().toString(), + !fileStatus.isDirectory(), fileStatus.isDirectory() ? -1 : fileStatus.getLen()); result.add(remoteFile); } } catch (FileNotFoundException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java index 3bf66989fd..db99f4de43 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java @@ -307,4 +307,4 @@ public class SqlBlockRuleMgr implements Writable { String json = Text.readString(in); return GsonUtils.GSON.fromJson(json, SqlBlockRuleMgr.class); } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java index efa1c0c2f1..d0b5e28733 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java @@ -101,18 +101,19 @@ public class AggregateFunction extends Function { } public AggregateFunction(FunctionName fnName, List argTypes, - Type retType, Type intermediateType, - URI location, String updateFnSymbol, String initFnSymbol, - String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol, - String removeFnSymbol, String finalizeFnSymbol) { + Type retType, Type intermediateType, + URI location, String updateFnSymbol, String initFnSymbol, + String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol, + String removeFnSymbol, String finalizeFnSymbol) { this(fnName, argTypes, retType, intermediateType, location, updateFnSymbol, initFnSymbol, serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, false); } public AggregateFunction(FunctionName fnName, List argTypes, - Type retType, Type intermediateType, boolean hasVarArgs) { + Type retType, Type intermediateType, boolean hasVarArgs) { super(fnName, argTypes, retType, hasVarArgs); - this.intermediateType = (intermediateType != null && intermediateType.equals(retType)) ? null : intermediateType; + this.intermediateType = (intermediateType != null && intermediateType.equals(retType)) + ? null : intermediateType; ignoresDistinct = false; isAnalyticFn = false; isAggregateFn = true; @@ -120,19 +121,21 @@ public class AggregateFunction extends Function { } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - boolean ignoresDistinct, - boolean isAnalyticFn, - boolean returnsNonNullOnEmpty) { - return createBuiltin(name, argTypes, retType, intermediateType, false, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty); + List argTypes, Type retType, Type intermediateType, + boolean ignoresDistinct, + boolean isAnalyticFn, + boolean returnsNonNullOnEmpty) { + return createBuiltin(name, argTypes, retType, intermediateType, false, + ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty); } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - boolean hasVarArgs, boolean ignoresDistinct, - boolean isAnalyticFn, - boolean returnsNonNullOnEmpty) { - AggregateFunction fn = new AggregateFunction(new FunctionName(name), argTypes, retType, intermediateType, hasVarArgs); + List argTypes, Type retType, Type intermediateType, + boolean hasVarArgs, boolean ignoresDistinct, + boolean isAnalyticFn, + boolean returnsNonNullOnEmpty) { + AggregateFunction fn = new AggregateFunction(new FunctionName(name), + argTypes, retType, intermediateType, hasVarArgs); fn.setBinaryType(TFunctionBinaryType.BUILTIN); fn.ignoresDistinct = ignoresDistinct; fn.isAnalyticFn = isAnalyticFn; @@ -146,7 +149,8 @@ public class AggregateFunction extends Function { URI location, String updateFnSymbol, String initFnSymbol, String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol, String removeFnSymbol, String finalizeFnSymbol, boolean vectorized) { - this(fnName, argTypes, retType, intermediateType, false, location, updateFnSymbol, initFnSymbol, serializeFnSymbol, + this(fnName, argTypes, retType, intermediateType, false, location, + updateFnSymbol, initFnSymbol, serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, vectorized); } @@ -157,8 +161,10 @@ public class AggregateFunction extends Function { String removeFnSymbol, String finalizeFnSymbol, boolean vectorized) { // only `count` is always not nullable, other aggregate function is always nullable super(fnName, argTypes, retType, hasVarArgs, vectorized, - AggregateFunction.NOT_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) ? NullableMode.ALWAYS_NOT_NULLABLE : - AggregateFunction.ALWAYS_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) ? NullableMode.ALWAYS_NULLABLE : NullableMode.DEPEND_ON_ARGUMENT); + AggregateFunction.NOT_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) + ? NullableMode.ALWAYS_NOT_NULLABLE : + AggregateFunction.ALWAYS_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) + ? NullableMode.ALWAYS_NULLABLE : NullableMode.DEPEND_ON_ARGUMENT); setLocation(location); this.intermediateType = (intermediateType.equals(retType)) ? null : intermediateType; this.updateFnSymbol = updateFnSymbol; @@ -175,30 +181,31 @@ public class AggregateFunction extends Function { } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct, - boolean isAnalyticFn, boolean returnsNonNullOnEmpty) { + List argTypes, Type retType, Type intermediateType, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct, + boolean isAnalyticFn, boolean returnsNonNullOnEmpty) { return createBuiltin(name, argTypes, retType, intermediateType, initFnSymbol, updateFnSymbol, mergeFnSymbol, serializeFnSymbol, finalizeFnSymbol, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty, false); } + public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct, - boolean isAnalyticFn, boolean returnsNonNullOnEmpty, boolean vectorized) { + List argTypes, Type retType, Type intermediateType, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct, + boolean isAnalyticFn, boolean returnsNonNullOnEmpty, boolean vectorized) { return createBuiltin(name, argTypes, retType, intermediateType, initFnSymbol, updateFnSymbol, mergeFnSymbol, serializeFnSymbol, null, null, finalizeFnSymbol, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty, vectorized); } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, - String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, - boolean returnsNonNullOnEmpty) { + List argTypes, Type retType, Type intermediateType, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, + String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, + boolean returnsNonNullOnEmpty) { return createBuiltin(name, argTypes, retType, intermediateType, initFnSymbol, updateFnSymbol, mergeFnSymbol, serializeFnSymbol, getValueFnSymbol, removeFnSymbol, @@ -206,11 +213,11 @@ public class AggregateFunction extends Function { } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, - String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, - boolean returnsNonNullOnEmpty, boolean vectorized) { + List argTypes, Type retType, Type intermediateType, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, + String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, + boolean returnsNonNullOnEmpty, boolean vectorized) { return createBuiltin(name, argTypes, retType, intermediateType, false, initFnSymbol, updateFnSymbol, mergeFnSymbol, serializeFnSymbol, getValueFnSymbol, removeFnSymbol, @@ -218,22 +225,22 @@ public class AggregateFunction extends Function { } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, boolean hasVarArgs, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, - String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, - boolean returnsNonNullOnEmpty) { + List argTypes, Type retType, Type intermediateType, boolean hasVarArgs, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, + String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, + boolean returnsNonNullOnEmpty) { return createBuiltin(name, argTypes, retType, intermediateType, hasVarArgs, initFnSymbol, updateFnSymbol, mergeFnSymbol, serializeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty, false); } public static AggregateFunction createBuiltin(String name, - List argTypes, Type retType, Type intermediateType, boolean hasVarArgs, - String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, - String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, - String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, - boolean returnsNonNullOnEmpty, boolean vectorized) { + List argTypes, Type retType, Type intermediateType, boolean hasVarArgs, + String initFnSymbol, String updateFnSymbol, String mergeFnSymbol, + String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol, + String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn, + boolean returnsNonNullOnEmpty, boolean vectorized) { AggregateFunction fn = new AggregateFunction(new FunctionName(name), argTypes, retType, intermediateType, hasVarArgs, null, updateFnSymbol, initFnSymbol, serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol, @@ -397,6 +404,7 @@ public class AggregateFunction extends Function { this.removeFnSymbol = symbol; return this; } + public AggregateFunctionBuilder binaryType(TFunctionBinaryType binaryType) { this.binaryType = binaryType; return this; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java index ec25ade6ee..386e4e6433 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java @@ -109,6 +109,7 @@ public enum AggregateType { compatibilityMap.put(NONE, EnumSet.copyOf(excObjectStored)); } + private final String sqlName; private AggregateType(String sqlName) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java index c0c97530a0..25097bb24e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java @@ -18,7 +18,7 @@ package org.apache.doris.catalog; /** - * Define different auth type for external table such as hive/iceberg, + * Define different auth type for external table such as hive/iceberg * so that BE could call secured under fileStorageSystem (enable kerberos) */ public enum AuthType { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java index c0c09375c4..8802df9747 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java @@ -190,14 +190,14 @@ public class BrokerTable extends Table { case "parquet": break; default: - throw new DdlException("Invalid file type: " + copiedProps.toString() + ".Only support csv and parquet."); + throw new DdlException("Invalid file type: " + copiedProps + ".Only support csv and parquet."); } } copiedProps.remove(FILE_FORMAT); if (!copiedProps.isEmpty()) { - throw new DdlException("Unknown table properties: " + copiedProps.toString()); + throw new DdlException("Unknown table properties: " + copiedProps); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java index 1d649a29a7..9bf9608a45 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java @@ -555,22 +555,26 @@ public class Catalog { this.metaContext.setThreadLocalInfo(); this.stat = new TabletSchedulerStat(); - this.tabletScheduler = new TabletScheduler(this, systemInfo, tabletInvertedIndex, stat, Config.tablet_rebalancer_type); + this.tabletScheduler = new TabletScheduler(this, systemInfo, + tabletInvertedIndex, stat, Config.tablet_rebalancer_type); this.tabletChecker = new TabletChecker(this, systemInfo, tabletScheduler, stat); // The pendingLoadTaskScheduler's queue size should not less than Config.desired_max_waiting_jobs. // So that we can guarantee that all submitted load jobs can be scheduled without being starved. - this.pendingLoadTaskScheduler = new MasterTaskExecutor("pending_load_task_scheduler", Config.async_pending_load_task_pool_size, + this.pendingLoadTaskScheduler = new MasterTaskExecutor("pending_load_task_scheduler", + Config.async_pending_load_task_pool_size, Config.desired_max_waiting_jobs, !isCheckpointCatalog); // The loadingLoadTaskScheduler's queue size is unlimited, so that it can receive all loading tasks // created after pending tasks finish. And don't worry about the high concurrency, because the // concurrency is limited by Config.desired_max_waiting_jobs and Config.async_loading_load_task_pool_size. - this.loadingLoadTaskScheduler = new MasterTaskExecutor("loading_load_task_scheduler", Config.async_loading_load_task_pool_size, + this.loadingLoadTaskScheduler = new MasterTaskExecutor("loading_load_task_scheduler", + Config.async_loading_load_task_pool_size, Integer.MAX_VALUE, !isCheckpointCatalog); this.loadJobScheduler = new LoadJobScheduler(); this.loadManager = new LoadManager(loadJobScheduler); - this.streamLoadRecordMgr = new StreamLoadRecordMgr("stream_load_record_manager", Config.fetch_stream_load_record_interval_second * 1000); + this.streamLoadRecordMgr = new StreamLoadRecordMgr("stream_load_record_manager", + Config.fetch_stream_load_record_interval_second * 1000L); this.loadEtlChecker = new LoadEtlChecker(loadManager); this.loadLoadingChecker = new LoadLoadingChecker(loadManager); this.routineLoadScheduler = new RoutineLoadScheduler(routineLoadManager); @@ -776,7 +780,8 @@ public class Catalog { // 1. check and create dirs and files File meta = new File(metaDir); if (!meta.exists()) { - LOG.warn("Doris' meta dir {} does not exist. You need to create it before starting FE", meta.getAbsolutePath()); + LOG.warn("Doris' meta dir {} does not exist." + + " You need to create it before starting FE", meta.getAbsolutePath()); throw new Exception(meta.getAbsolutePath() + " does not exist, will exit"); } @@ -894,9 +899,11 @@ public class Catalog { // nodeName should be like "192.168.1.1_9217_1620296111213" // and the selfNode should be the prefix of nodeName. // If not, it means that the ip used last time is different from this time, which is not allowed. - // But is metadata_failure_recovery is true, we will not check it because this may be a FE migration. + // But is metadata_failure_recovery is true, + // we will not check it because this may be a FE migration. String[] split = nodeName.split("_"); - if (Config.metadata_failure_recovery.equals("false") && !selfNode.first.equalsIgnoreCase(split[0])) { + if (Config.metadata_failure_recovery.equals("false") + && !selfNode.first.equalsIgnoreCase(split[0])) { throw new IOException("the self host " + selfNode.first + " does not equal to the host in ROLE" + " file " + split[0] + ". You need to set 'priority_networks' config" @@ -969,7 +976,8 @@ public class Catalog { if (!versionFile.exists()) { // If the version file doesn't exist, download it from helper node if (!getVersionFileFromHelper(rightHelperNode)) { - throw new IOException("fail to download version file from " + rightHelperNode.first + " will exit."); + throw new IOException("fail to download version file from " + + rightHelperNode.first + " will exit."); } // NOTE: cluster_id will be init when Storage object is constructed, @@ -1008,7 +1016,8 @@ public class Catalog { Preconditions.checkNotNull(token); Preconditions.checkNotNull(remoteToken); if (!token.equals(remoteToken)) { - throw new IOException("token is not equal with helper node " + rightHelperNode.first + ". will exit."); + throw new IOException("token is not equal with helper node " + + rightHelperNode.first + ". will exit."); } } } catch (Exception e) { @@ -1533,7 +1542,8 @@ public class Catalog { connection.setReadTimeout(HTTP_TIMEOUT_SECOND * 1000); String response; - try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(connection.getInputStream()))) { + try (BufferedReader bufferedReader + = new BufferedReader(new InputStreamReader(connection.getInputStream()))) { String line; StringBuilder sb = new StringBuilder(); while ((line = bufferedReader.readLine()) != null) { @@ -1612,7 +1622,7 @@ public class Catalog { LOG.info("finished replay masterInfo from image"); return newChecksum; } - + public long loadFrontends(DataInputStream dis, long checksum) throws IOException { int size = dis.readInt(); long newChecksum = checksum ^ size; @@ -1727,14 +1737,16 @@ public class Catalog { long newChecksum = checksum ^ size; if (size > 0) { // There should be no old alter jobs, if exist throw exception, should not use this FE version - throw new IOException("There are [" + size + "] old alter jobs. Please downgrade FE to an older version and handle residual jobs"); + throw new IOException("There are [" + size + "] old alter jobs." + + " Please downgrade FE to an older version and handle residual jobs"); } // finished or cancelled jobs size = dis.readInt(); newChecksum ^= size; if (size > 0) { - throw new IOException("There are [" + size + "] old finished or cancelled alter jobs. Please downgrade FE to an older version and handle residual jobs"); + throw new IOException("There are [" + size + "] old finished or cancelled alter jobs." + + " Please downgrade FE to an older version and handle residual jobs"); } // alter job v2 @@ -2008,7 +2020,8 @@ public class Catalog { public long saveExportJob(CountingDataOutputStream dos, long checksum) throws IOException { long curTime = System.currentTimeMillis(); - List jobs = exportMgr.getJobs().stream().filter(t -> !t.isExpired(curTime)).collect(Collectors.toList()); + List jobs = exportMgr.getJobs().stream() + .filter(t -> !t.isExpired(curTime)).collect(Collectors.toList()); int size = jobs.size(); checksum ^= size; dos.writeInt(size); @@ -2667,8 +2680,11 @@ public class Catalog { // 1.2 other table type sb.append("CREATE "); - if (table.getType() == TableType.ODBC || table.getType() == TableType.MYSQL || table.getType() == TableType.ELASTICSEARCH - || table.getType() == TableType.BROKER || table.getType() == TableType.HIVE) { + if (table.getType() == TableType.ODBC + || table.getType() == TableType.MYSQL + || table.getType() == TableType.ELASTICSEARCH + || table.getType() == TableType.BROKER + || table.getType() == TableType.HIVE) { sb.append("EXTERNAL "); } sb.append("TABLE "); @@ -2864,7 +2880,8 @@ public class Catalog { sb.append("\"password\" = \"").append(hidePassword ? "" : mysqlTable.getPasswd()).append("\",\n"); sb.append("\"charset\" = \"").append(mysqlTable.getCharset()).append("\",\n"); } else { - sb.append("\"odbc_catalog_resource\" = \"").append(mysqlTable.getOdbcCatalogResourceName()).append("\",\n"); + sb.append("\"odbc_catalog_resource\" = \"") + .append(mysqlTable.getOdbcCatalogResourceName()).append("\",\n"); } sb.append("\"database\" = \"").append(mysqlTable.getMysqlDatabaseName()).append("\",\n"); sb.append("\"table\" = \"").append(mysqlTable.getMysqlTableName()).append("\"\n"); @@ -2884,7 +2901,8 @@ public class Catalog { sb.append("\"driver\" = \"").append(odbcTable.getOdbcDriver()).append("\",\n"); sb.append("\"odbc_type\" = \"").append(odbcTable.getOdbcTableTypeName()).append("\",\n"); } else { - sb.append("\"odbc_catalog_resource\" = \"").append(odbcTable.getOdbcCatalogResourceName()).append("\",\n"); + sb.append("\"odbc_catalog_resource\" = \"") + .append(odbcTable.getOdbcCatalogResourceName()).append("\",\n"); } sb.append("\"database\" = \"").append(odbcTable.getOdbcDatabaseName()).append("\",\n"); sb.append("\"table\" = \"").append(odbcTable.getOdbcTableName()).append("\"\n"); @@ -3045,7 +3063,8 @@ public class Catalog { getInternalDataSource().replayCreateTable(dbName, table); } - public void replayAlterExternalTableSchema(String dbName, String tableName, List newSchema) throws MetaNotFoundException { + public void replayAlterExternalTableSchema(String dbName, String tableName, List newSchema) + throws MetaNotFoundException { getInternalDataSource().replayAlterExternalTableSchema(dbName, tableName, newSchema); } @@ -3261,7 +3280,8 @@ public class Catalog { for (Partition partition : olapTable.getAllPartitions()) { long partitionId = partition.getId(); DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); - Preconditions.checkNotNull(dataProperty, partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId); + Preconditions.checkNotNull(dataProperty, partition.getName() + + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId); if (dataProperty.getStorageMedium() == TStorageMedium.SSD && dataProperty.getCooldownTimeMs() < currentTimeMs) { // expire. change to HDD. @@ -3300,7 +3320,8 @@ public class Catalog { // use try lock to avoid blocking a long time. // if block too long, backend report rpc will timeout. if (!olapTable.tryWriteLockIfExist(Table.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) { - LOG.warn("try get table {} writelock but failed when checking backend storage medium", table.getName()); + LOG.warn("try get table {} writelock but failed" + + " when checking backend storage medium", table.getName()); continue; } Preconditions.checkState(olapTable.isWriteLockHeldByCurrentThread()); @@ -3763,13 +3784,15 @@ public class Catalog { if (bucketsNum == -1) { bucketsNum = partition.getDistributionInfo().getBucketNum(); } else if (bucketsNum != partition.getDistributionInfo().getBucketNum()) { - throw new DdlException("Partitions in table " + table.getName() + " have different buckets number"); + throw new DdlException("Partitions in table " + table.getName() + + " have different buckets number"); } if (replicaAlloc == null) { replicaAlloc = partitionInfo.getReplicaAllocation(partition.getId()); } else if (!replicaAlloc.equals(partitionInfo.getReplicaAllocation(partition.getId()))) { - throw new DdlException("Partitions in table " + table.getName() + " have different replica allocation."); + throw new DdlException("Partitions in table " + table.getName() + + " have different replica allocation."); } } } @@ -3794,7 +3817,8 @@ public class Catalog { } // set this group as unstable - colocateTableIndex.markGroupUnstable(groupId, "Colocation group modified by user", false /* edit log is along with modify table log */); + colocateTableIndex.markGroupUnstable(groupId, "Colocation group modified by user", + false /* edit log is along with modify table log */); table.setColocateGroup(colocateGroup); } else { // unset colocation group @@ -3908,7 +3932,8 @@ public class Catalog { throw new DdlException("Table[" + table.getName() + "] is under " + table.getState()); } - if (table.getPartitionInfo().getType() != PartitionType.RANGE && table.getPartitionInfo().getType() != PartitionType.LIST) { + if (table.getPartitionInfo().getType() != PartitionType.RANGE + && table.getPartitionInfo().getType() != PartitionType.LIST) { throw new DdlException("Table[" + table.getName() + "] is single partitioned. " + "no need to rename partition name."); } @@ -3978,8 +4003,8 @@ public class Catalog { // Merge the new properties with origin properties, and then analyze them Map origDynamicProperties = tableProperty.getOriginDynamicPartitionProperty(); origDynamicProperties.putAll(properties); - Map analyzedDynamicPartition = DynamicPartitionUtil. - analyzeDynamicPartition(origDynamicProperties, table.getPartitionInfo()); + Map analyzedDynamicPartition = DynamicPartitionUtil + .analyzeDynamicPartition(origDynamicProperties, table.getPartitionInfo()); tableProperty.modifyTableProperties(analyzedDynamicPartition); tableProperty.buildDynamicProperty(); } @@ -3987,13 +4012,14 @@ public class Catalog { DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), table, false); dynamicPartitionScheduler.createOrUpdateRuntimeInfo( table.getId(), DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime()); - ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties); + ModifyTablePropertyOperationLog info + = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties); editLog.logDynamicPartition(info); } private void convertDynamicPartitionReplicaNumToReplicaAllocation(Map properties) { if (properties.containsKey(DynamicPartitionProperty.REPLICATION_NUM)) { - Short repNum = Short.valueOf(properties.remove(DynamicPartitionProperty.REPLICATION_NUM)); + short repNum = Short.parseShort(properties.remove(DynamicPartitionProperty.REPLICATION_NUM)); ReplicaAllocation replicaAlloc = new ReplicaAllocation(repNum); properties.put(DynamicPartitionProperty.REPLICATION_ALLOCATION, replicaAlloc.toCreateStmt()); } @@ -4007,7 +4033,8 @@ public class Catalog { * @throws DdlException */ // The caller need to hold the table write lock - public void modifyTableReplicaAllocation(Database db, OlapTable table, Map properties) throws UserException { + public void modifyTableReplicaAllocation(Database db, OlapTable table, + Map properties) throws UserException { Preconditions.checkArgument(table.isWriteLockHeldByCurrentThread()); String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM; PartitionInfo partitionInfo = table.getPartitionInfo(); @@ -4058,7 +4085,8 @@ public class Catalog { tableProperty.buildReplicaAllocation(); // log - ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); + ModifyTablePropertyOperationLog info + = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); editLog.logModifyReplicationNum(info); LOG.debug("modify table[{}] replication num to {}", table.getName(), properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); @@ -4080,11 +4108,13 @@ public class Catalog { table.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory()); } - ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); + ModifyTablePropertyOperationLog info + = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); editLog.logModifyInMemory(info); } - public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) throws MetaNotFoundException { + public void replayModifyTableProperty(short opCode, + ModifyTablePropertyOperationLog info) throws MetaNotFoundException { long dbId = info.getDbId(); long tableId = info.getTableId(); Map properties = info.getProperties(); @@ -4113,7 +4143,8 @@ public class Catalog { } } - public void modifyDefaultDistributionBucketNum(Database db, OlapTable olapTable, ModifyDistributionClause modifyDistributionClause) throws DdlException { + public void modifyDefaultDistributionBucketNum(Database db, OlapTable olapTable, + ModifyDistributionClause modifyDistributionClause) throws DdlException { olapTable.writeLockOrDdlException(); try { if (olapTable.isColocateTable()) { @@ -4131,12 +4162,14 @@ public class Catalog { DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema); // for now. we only support modify distribution's bucket num if (distributionInfo.getType() != defaultDistributionInfo.getType()) { - throw new DdlException("Cannot change distribution type when modify default distribution bucket num"); + throw new DdlException("Cannot change distribution type when modify" + + " default distribution bucket num"); } if (distributionInfo.getType() == DistributionInfoType.HASH) { HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; List newDistriCols = hashDistributionInfo.getDistributionColumns(); - List defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo).getDistributionColumns(); + List defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo) + .getDistributionColumns(); if (!newDistriCols.equals(defaultDistriCols)) { throw new DdlException("Cannot assign hash distribution with different distribution cols. " + "default is: " + defaultDistriCols); @@ -4150,7 +4183,9 @@ public class Catalog { defaultDistributionInfo.setBucketNum(bucketNum); - ModifyTableDefaultDistributionBucketNumOperationLog info = new ModifyTableDefaultDistributionBucketNumOperationLog(db.getId(), olapTable.getId(), bucketNum); + ModifyTableDefaultDistributionBucketNumOperationLog info + = new ModifyTableDefaultDistributionBucketNumOperationLog( + db.getId(), olapTable.getId(), bucketNum); editLog.logModifyDefaultDistributionBucketNum(info); LOG.info("modify table[{}] default bucket num to {}", olapTable.getName(), bucketNum); } @@ -4159,7 +4194,8 @@ public class Catalog { } } - public void replayModifyTableDefaultDistributionBucketNum(ModifyTableDefaultDistributionBucketNumOperationLog info) throws MetaNotFoundException { + public void replayModifyTableDefaultDistributionBucketNum(ModifyTableDefaultDistributionBucketNumOperationLog info) + throws MetaNotFoundException { long dbId = info.getDbId(); long tableId = info.getTableId(); int bucketNum = info.getBucketNum(); @@ -4576,9 +4612,10 @@ public class Catalog { // but it is wrong because we can not get replica from `tabletInvertedIndex` when doing checkpoint, // because when doing checkpoint, the tabletInvertedIndex is not initialized at all. // - // So we can only discard this information, in this case, it is equivalent to losing the record of these operations. - // But it doesn't matter, these records are currently only used to record whether a replica is in a bad state. - // This state has little effect on the system, and it can be restored after the system has processed the bad state replica. + // So we can only discard this information, in this case, it is equivalent to losing the record of these + // operations. But it doesn't matter, these records are currently only used to record whether a replica is + // in a bad state. This state has little effect on the system, and it can be restored after the system + // has processed the bad state replica. for (Pair tabletInfo : tabletsWithSchemaHash) { LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first); } @@ -4653,7 +4690,8 @@ public class Catalog { /* * The entry of replacing partitions with temp partitions. */ - public void replaceTempPartition(Database db, OlapTable olapTable, ReplacePartitionClause clause) throws DdlException { + public void replaceTempPartition(Database db, OlapTable olapTable, ReplacePartitionClause clause) + throws DdlException { Preconditions.checkState(olapTable.isWriteLockHeldByCurrentThread()); List partitionNames = clause.getPartitionNames(); List tempPartitionNames = clause.getTempPartitionNames(); @@ -4680,7 +4718,8 @@ public class Catalog { clause.getPartitionNames(), clause.getTempPartitionNames(), olapTable.getName()); } - public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) throws MetaNotFoundException { + public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) + throws MetaNotFoundException { long dbId = replaceTempPartitionLog.getDbId(); long tableId = replaceTempPartitionLog.getTblId(); Database db = this.getDbOrMetaException(dbId); @@ -4765,7 +4804,8 @@ public class Catalog { setReplicaStatusInternal(log.getTabletId(), log.getBackendId(), log.getReplicaStatus(), true); } - private void setReplicaStatusInternal(long tabletId, long backendId, ReplicaStatus status, boolean isReplay) throws MetaNotFoundException { + private void setReplicaStatusInternal(long tabletId, long backendId, + ReplicaStatus status, boolean isReplay) throws MetaNotFoundException { try { TabletMeta meta = tabletInvertedIndex.getTabletMeta(tabletId); if (meta == null) { @@ -4782,7 +4822,8 @@ public class Catalog { if (status == ReplicaStatus.BAD || status == ReplicaStatus.OK) { if (replica.setBad(status == ReplicaStatus.BAD)) { if (!isReplay) { - SetReplicaStatusOperationLog log = new SetReplicaStatusOperationLog(backendId, tabletId, status); + SetReplicaStatusOperationLog log + = new SetReplicaStatusOperationLog(backendId, tabletId, status); getEditLog().logSetReplicaStatus(log); } LOG.info("set replica {} of tablet {} on backend {} as {}. is replay: {}", diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java index ea2d8ca20f..617c08bc4d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java @@ -45,7 +45,8 @@ public class ColocateGroupSchema implements Writable { } - public ColocateGroupSchema(GroupId groupId, List distributionCols, int bucketsNum, ReplicaAllocation replicaAlloc) { + public ColocateGroupSchema(GroupId groupId, List distributionCols, + int bucketsNum, ReplicaAllocation replicaAlloc) { this.groupId = groupId; this.distributionColTypes = distributionCols.stream().map(c -> c.getType()).collect(Collectors.toList()); this.bucketsNum = bucketsNum; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java index 7f75392090..02892573bc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java @@ -83,7 +83,8 @@ public class Column implements Writable { @SerializedName(value = "children") private List children; // Define expr may exist in two forms, one is analyzed, and the other is not analyzed. - // Currently, analyzed define expr is only used when creating materialized views, so the define expr in RollupJob must be analyzed. + // Currently, analyzed define expr is only used when creating materialized views, + // so the define expr in RollupJob must be analyzed. // In other cases, such as define expr in `MaterializedIndexMeta`, it may not be analyzed after being replayed. private Expr defineExpr; // use to define column in materialize view @SerializedName(value = "visible") @@ -123,6 +124,7 @@ public class Column implements Writable { String defaultValue, String comment) { this(name, type, isKey, aggregateType, isAllowNull, defaultValue, comment, true, null); } + public Column(String name, Type type, boolean isKey, AggregateType aggregateType, boolean isAllowNull, String defaultValue, String comment, boolean visible, DefaultValueExprDef defaultValueExprDef) { this.name = name; @@ -368,7 +370,8 @@ public class Column implements Writable { // And CreateReplicaTask does not need `defineExpr` field. // The `defineExpr` is only used when creating `TAlterMaterializedViewParam`, which is in `AlterReplicaTask`. // And when creating `TAlterMaterializedViewParam`, the `defineExpr` is certainly analyzed. - // If we need to use `defineExpr` and call defineExpr.treeToThrift(), make sure it is analyzed, or NPE will thrown. + // If we need to use `defineExpr` and call defineExpr.treeToThrift(), + // make sure it is analyzed, or NPE will thrown. return tColumn; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java index 995d1603d5..8ba694b42d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java @@ -36,7 +36,7 @@ import java.util.Objects; * Statistics for a single column. */ public class ColumnStats implements Writable { - private final static Logger LOG = LogManager.getLogger(ColumnStats.class); + private static final Logger LOG = LogManager.getLogger(ColumnStats.class); @SerializedName(value = "avgSerializedSize") private float avgSerializedSize; // in bytes; includes serialization overhead @@ -126,6 +126,7 @@ public class ColumnStats implements Writable { out.writeLong(maxSize); out.writeLong(numNulls); } + public void readFields(DataInput in) throws IOException { numDistinctValues = in.readLong(); avgSerializedSize = in.readFloat(); @@ -158,6 +159,7 @@ public class ColumnStats implements Writable { && (maxSize == stats.maxSize) && (numNulls == stats.numNulls); } + /** * For fixed-length type (those which don't need additional storage besides * the slot they occupy), sets avgSerializedSize and maxSize to their slot size. @@ -172,6 +174,7 @@ public class ColumnStats implements Writable { maxSize = colType.getSlotSize(); } } + /** * Creates ColumnStats from the given expr. Sets numDistinctValues and if the expr * is a SlotRef also numNulls. diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java index 5607f3f59e..a94ebc8022 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java @@ -325,7 +325,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf } // return pair - public Pair createTableWithLock(Table table, boolean isReplay, boolean setIfNotExist) throws DdlException { + public Pair createTableWithLock( + Table table, boolean isReplay, boolean setIfNotExist) throws DdlException { boolean result = true; // if a table is already exists, then edit log won't be executed // some caller of this method may need to know this message @@ -484,7 +485,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
return Optional.ofNullable(getTableNullable(tableId)); } - public Table getTableOrException(String tableName, java.util.function.Function e) throws E { + public Table getTableOrException( + String tableName, java.util.function.Function e) throws E { Table table = getTableNullable(tableName); if (table == null) { throw e.apply(tableName); @@ -492,7 +494,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
return table; } - public Table getTableOrException(long tableId, java.util.function.Function e) throws E { + public Table getTableOrException( + long tableId, java.util.function.Function e) throws E { Table table = getTableNullable(tableId); if (table == null) { throw e.apply(tableId); @@ -512,7 +515,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
public Table getTableOrMetaException(String tableName, TableType tableType) throws MetaNotFoundException { Table table = getTableOrMetaException(tableName); if (table.getType() != tableType) { - throw new MetaNotFoundException("table type is not " + tableType + ", tableName=" + tableName + ", type=" + table.getType()); + throw new MetaNotFoundException("table type is not " + + tableType + ", tableName=" + tableName + ", type=" + table.getType()); } return table; } @@ -521,7 +525,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
public Table getTableOrMetaException(long tableId, TableType tableType) throws MetaNotFoundException { Table table = getTableOrMetaException(tableId); if (table.getType() != tableType) { - throw new MetaNotFoundException("table type is not " + tableType + ", tableId=" + tableId + ", type=" + table.getType()); + throw new MetaNotFoundException("table type is not " + tableType + + ", tableId=" + tableId + ", type=" + table.getType()); } return table; } @@ -543,8 +548,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
} public Table getTableOrAnalysisException(String tableName) throws AnalysisException { - return getTableOrException(tableName, t -> new AnalysisException(ErrorCode.ERR_UNKNOWN_TABLE.formatErrorMsg(t - , fullQualifiedName))); + return getTableOrException(tableName, + t -> new AnalysisException(ErrorCode.ERR_UNKNOWN_TABLE.formatErrorMsg(t, fullQualifiedName))); } public OlapTable getOlapTableOrAnalysisException(String tableName) throws AnalysisException { @@ -556,7 +561,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
} public Table getTableOrAnalysisException(long tableId) throws AnalysisException { - return getTableOrException(tableId, t -> new AnalysisException(ErrorCode.ERR_BAD_TABLE_ERROR.formatErrorMsg(t))); + return getTableOrException(tableId, + t -> new AnalysisException(ErrorCode.ERR_BAD_TABLE_ERROR.formatErrorMsg(t))); } public int getMaxReplicationNum() { @@ -571,7 +577,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
table.readLock(); try { for (Partition partition : olapTable.getAllPartitions()) { - short replicationNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short replicationNum = olapTable.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); if (ret < replicationNum) { ret = replicationNum; } @@ -880,7 +887,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf
if (!isReplay) { if (existKey != null) { if (existKey.isIdentical(encryptKey)) { - throw new UserException("encryptKey [" + existKey.getEncryptKeyName().toString() + "] already exists"); + throw new UserException("encryptKey [" + + existKey.getEncryptKeyName().toString() + "] already exists"); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java index 83ba5b67a6..0756bc0e85 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java @@ -86,9 +86,11 @@ public class DynamicPartitionProperty { this.buckets = Integer.parseInt(properties.get(BUCKETS)); this.replicaAlloc = analyzeReplicaAllocation(properties); this.createHistoryPartition = Boolean.parseBoolean(properties.get(CREATE_HISTORY_PARTITION)); - this.historyPartitionNum = Integer.parseInt(properties.getOrDefault(HISTORY_PARTITION_NUM, String.valueOf(NOT_SET_HISTORY_PARTITION_NUM))); + this.historyPartitionNum = Integer.parseInt(properties.getOrDefault( + HISTORY_PARTITION_NUM, String.valueOf(NOT_SET_HISTORY_PARTITION_NUM))); this.hotPartitionNum = Integer.parseInt(properties.getOrDefault(HOT_PARTITION_NUM, "0")); - this.reservedHistoryPeriods = properties.getOrDefault(RESERVED_HISTORY_PERIODS, NOT_SET_RESERVED_HISTORY_PERIODS); + this.reservedHistoryPeriods = properties.getOrDefault( + RESERVED_HISTORY_PERIODS, NOT_SET_RESERVED_HISTORY_PERIODS); createStartOfs(properties); } else { this.exist = false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java index 6981d62857..f34c8b69cb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java @@ -93,12 +93,12 @@ public class EsTable extends Table { private boolean httpSslEnabled = false; // Solr doc_values vs stored_fields performance-smackdown indicate: - // It is possible to notice that retrieving an high number of fields leads - // to a sensible worsening of performance if DocValues are used. - // Instead, the (almost) surprising thing is that, by returning less than 20 fields, - // DocValues performs better than stored fields and the difference gets little as the number of fields returned increases. - // Asking for 9 DocValues fields and 1 stored field takes an average query time is 6.86 (more than returning 10 stored fields) - // Here we have a slightly conservative value of 20, but at the same time we also provide configurable parameters for expert-using + // It is possible to notice that retrieving an high number of fields leads to a sensible worsening of performance + // if DocValues are used. Instead, the (almost) surprising thing is that, by returning less than 20 fields, + // DocValues performs better than stored fields and the difference gets little as the number of fields + // returned increases. Asking for 9 DocValues fields and 1 stored field takes an average query time is 6.86 + // (more than returning 10 stored fields) Here we have a slightly conservative value of 20, but at the same time + // we also provide configurable parameters for expert-using // @see `MAX_DOCVALUE_FIELDS` private static final int DEFAULT_MAX_DOCVALUE_FIELDS = 20; @@ -230,8 +230,8 @@ public class EsTable extends Table { && !Strings.isNullOrEmpty(properties.get(TRANSPORT).trim())) { transport = properties.get(TRANSPORT).trim(); if (!(TRANSPORT_HTTP.equals(transport) || TRANSPORT_THRIFT.equals(transport))) { - throw new DdlException("transport of ES table must be http/https(recommend) or thrift(reserved inner usage)," - + " but value is " + transport); + throw new DdlException("transport of ES table must be http/https(recommend)" + + " or thrift(reserved inner usage), but value is " + transport); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java index 7e609c5070..9079deaa2d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java @@ -133,7 +133,8 @@ public class Function implements Writable { this(0, name, args, retType, varArgs, vectorized, NullableMode.DEPEND_ON_ARGUMENT); } - public Function(FunctionName name, List args, Type retType, boolean varArgs, boolean vectorized, NullableMode mode) { + public Function(FunctionName name, List args, Type retType, + boolean varArgs, boolean vectorized, NullableMode mode) { this(0, name, args, retType, varArgs, vectorized, mode); } @@ -606,6 +607,7 @@ public class Function implements Writable { FunctionType(int code) { this.code = code; } + public int getCode() { return code; } @@ -627,10 +629,11 @@ public class Function implements Writable { public void write(DataOutput output) throws IOException { output.writeInt(code); } + public static FunctionType read(DataInput input) throws IOException { return fromCode(input.readInt()); } - }; + } protected void writeFields(DataOutput output) throws IOException { output.writeLong(id); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java index 5b263589c3..9f8e978fe2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java @@ -1312,6 +1312,7 @@ public class FunctionSet { public static final String COUNT = "count"; public static final String WINDOW_FUNNEL = "window_funnel"; + // Populate all the aggregate builtins in the catalog. // null symbols indicate the function does not need that step of the evaluation. // An empty symbol indicates a TODO for the BE to implement the function. diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java index d5cac299f8..80aced6f0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java @@ -73,6 +73,7 @@ public class HashDistributionInfo extends DistributionInfo { } out.writeInt(bucketNum); } + public void readFields(DataInput in) throws IOException { super.readFields(in); int columnCount = in.readInt(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java index 1b46d40e75..46f1ea0b63 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java @@ -37,7 +37,6 @@ import org.apache.doris.thrift.TBrokerFileStatus; import org.apache.doris.thrift.TExprOpcode; import com.google.common.base.Strings; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -200,7 +199,8 @@ public class HiveMetaStoreClientHelper { brokerFileStatus.setIsSplitable(true); brokerFileStatus.setSize(fileStatus.getLen()); // path = "/path/to/partition/file_name" - // eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse/dae.db/customer/state=CA/city=SanJose/000000_0 + // eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse + // + /dae.db/customer/state=CA/city=SanJose/000000_0 String path = fileStatus.getPath().toUri().getPath(); if (onS3) { // Backend need full s3 path (with s3://bucket at the beginning) to read the data on s3. @@ -305,7 +305,7 @@ public class HiveMetaStoreClientHelper { configuration.set(entry.getKey(), entry.getValue()); } if (entry.getKey().equals(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION) - && entry.getValue().equals(AuthType.KERBEROS.getDesc())) { + && entry.getValue().equals(AuthType.KERBEROS.getDesc())) { isSecurityEnabled = true; } } @@ -319,7 +319,7 @@ public class HiveMetaStoreClientHelper { UserGroupInformation.setConfiguration(configuration); // login user from keytab UserGroupInformation.loginUserFromKeytab(properties.get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL), - properties.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB)); + properties.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB)); } FileSystem fileSystem = path.getFileSystem(configuration); iterators.add(fileSystem.listLocatedStatus(path)); @@ -407,7 +407,8 @@ public class HiveMetaStoreClientHelper { * @throws DdlException * @throws SemanticException */ - public static ExprNodeGenericFuncDesc convertToHivePartitionExpr(Expr dorisExpr, List partitions, String tblName) throws DdlException { + public static ExprNodeGenericFuncDesc convertToHivePartitionExpr(Expr dorisExpr, + List partitions, String tblName) throws DdlException { if (dorisExpr == null) { return null; } @@ -416,8 +417,10 @@ public class HiveMetaStoreClientHelper { CompoundPredicate compoundPredicate = (CompoundPredicate) dorisExpr; switch (compoundPredicate.getOp()) { case AND: { - ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName); - ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName); + ExprNodeGenericFuncDesc left = convertToHivePartitionExpr( + compoundPredicate.getChild(0), partitions, tblName); + ExprNodeGenericFuncDesc right = convertToHivePartitionExpr( + compoundPredicate.getChild(0), partitions, tblName); if (left != null && right != null) { List andArgs = new ArrayList<>(); andArgs.add(left); @@ -431,8 +434,10 @@ public class HiveMetaStoreClientHelper { return null; } case OR: { - ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName); - ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName); + ExprNodeGenericFuncDesc left = convertToHivePartitionExpr( + compoundPredicate.getChild(0), partitions, tblName); + ExprNodeGenericFuncDesc right = convertToHivePartitionExpr( + compoundPredicate.getChild(0), partitions, tblName); if (left != null && right != null) { List orArgs = new ArrayList<>(); orArgs.add(left); @@ -587,6 +592,7 @@ public class HiveMetaStoreClientHelper { } return null; } + /** * Convert from Doris column type to Hive column type * @param dorisType diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java index 3736548948..b7d3ee013f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java @@ -39,8 +39,11 @@ import java.util.Map; * Currently only support loading from hive table */ public class HiveTable extends Table { - private static final String PROPERTY_MISSING_MSG = "Hive %s is null. Please add properties('%s'='xxx') when create table"; - private static final String PROPERTY_ERROR_MSG = "Hive table properties('%s'='%s') is illegal or not supported. Please check it"; + + private static final String PROPERTY_MISSING_MSG = "Hive %s is null. Please add properties('%s'='xxx')" + + " when create table"; + private static final String PROPERTY_ERROR_MSG = "Hive table properties('%s'='%s')" + + " is illegal or not supported. Please check it"; private String hiveDb; private String hiveTable; @@ -100,7 +103,7 @@ public class HiveTable extends Table { copiedProps.remove(HIVE_TABLE); // check hive properties - // hive.metastore.uris + // hive.metastore.uris String hiveMetaStoreUris = copiedProps.get(HIVE_METASTORE_URIS); if (Strings.isNullOrEmpty(hiveMetaStoreUris)) { throw new DdlException(String.format(PROPERTY_MISSING_MSG, HIVE_METASTORE_URIS, HIVE_METASTORE_URIS)); @@ -114,7 +117,8 @@ public class HiveTable extends Table { authType = AuthType.SIMPLE.getDesc(); } if (!AuthType.isSupportedAuthType(authType)) { - throw new DdlException(String.format(PROPERTY_ERROR_MSG, BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType)); + throw new DdlException(String.format(PROPERTY_ERROR_MSG, + BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType)); } copiedProps.remove(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION); hiveProperties.put(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType); @@ -123,23 +127,25 @@ public class HiveTable extends Table { // check principal String principal = copiedProps.get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL); if (Strings.isNullOrEmpty(principal)) { - throw new DdlException(String.format(PROPERTY_MISSING_MSG, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL)); + throw new DdlException(String.format(PROPERTY_MISSING_MSG, + BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL)); } hiveProperties.put(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, principal); copiedProps.remove(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL); // check keytab String keytabPath = copiedProps.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB); if (Strings.isNullOrEmpty(keytabPath)) { - throw new DdlException(String.format(PROPERTY_MISSING_MSG, BrokerUtil.HADOOP_KERBEROS_KEYTAB, BrokerUtil.HADOOP_KERBEROS_KEYTAB)); + throw new DdlException(String.format(PROPERTY_MISSING_MSG, + BrokerUtil.HADOOP_KERBEROS_KEYTAB, BrokerUtil.HADOOP_KERBEROS_KEYTAB)); } if (!Strings.isNullOrEmpty(keytabPath)) { hiveProperties.put(BrokerUtil.HADOOP_KERBEROS_KEYTAB, keytabPath); copiedProps.remove(BrokerUtil.HADOOP_KERBEROS_KEYTAB); } } - String HDFSUserName = copiedProps.get(BrokerUtil.HADOOP_USER_NAME); - if (!Strings.isNullOrEmpty(HDFSUserName)) { - hiveProperties.put(BrokerUtil.HADOOP_USER_NAME, HDFSUserName); + String hdfsUserName = copiedProps.get(BrokerUtil.HADOOP_USER_NAME); + if (!Strings.isNullOrEmpty(hdfsUserName)) { + hiveProperties.put(BrokerUtil.HADOOP_USER_NAME, hdfsUserName); copiedProps.remove(BrokerUtil.HADOOP_USER_NAME); } if (!copiedProps.isEmpty()) { @@ -189,7 +195,7 @@ public class HiveTable extends Table { public TTableDescriptor toThrift() { THiveTable tHiveTable = new THiveTable(getHiveDb(), getHiveTable(), getHiveProperties()); TTableDescriptor tTableDescriptor = new TTableDescriptor(getId(), TTableType.HIVE_TABLE, - fullSchema.size(), 0, getName(), ""); + fullSchema.size(), 0, getName(), ""); tTableDescriptor.setHiveTable(tHiveTable); return tTableDescriptor; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java index afa6562f86..8198fbe760 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java @@ -69,8 +69,8 @@ public class ListPartitionInfo extends PartitionInfo { PartitionKey partitionKey = PartitionKey.createListPartitionKey(values, partitionColumns); checkNewPartitionKey(partitionKey, partitionKeyDesc, isTemp); if (partitionKeys.contains(partitionKey)) { - throw new AnalysisException("The partition key[" + partitionKeyDesc.toSql() + "] has duplicate item [" - + partitionKey.toSql() + "]."); + throw new AnalysisException("The partition key[" + + partitionKeyDesc.toSql() + "] has duplicate item [" + partitionKey.toSql() + "]."); } partitionKeys.add(partitionKey); } @@ -80,7 +80,8 @@ public class ListPartitionInfo extends PartitionInfo { return new ListPartitionItem(partitionKeys); } - private void checkNewPartitionKey(PartitionKey newKey, PartitionKeyDesc keyDesc, boolean isTemp) throws AnalysisException { + private void checkNewPartitionKey(PartitionKey newKey, PartitionKeyDesc keyDesc, + boolean isTemp) throws AnalysisException { Map id2Item = idToItem; if (isTemp) { id2Item = idToTempItem; @@ -103,7 +104,8 @@ public class ListPartitionInfo extends PartitionInfo { } @Override - public void checkPartitionItemListsConflict(List list1, List list2) throws DdlException { + public void checkPartitionItemListsConflict(List list1, + List list2) throws DdlException { ListUtil.checkListsConflict(list1, list2); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java index 4547539bb0..1a746a2374 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java @@ -30,10 +30,12 @@ import com.google.common.base.Strings; public class MapType extends Type { private final Type keyType; private final Type valueType; + public MapType() { this.keyType = NULL; this.valueType = NULL; } + public MapType(Type keyType, Type valueType) { Preconditions.checkNotNull(keyType); Preconditions.checkNotNull(valueType); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java index 887af28a74..f8babe93b9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java @@ -71,7 +71,8 @@ public class MetadataViewer { for (String partName : partitions) { Partition partition = olapTable.getPartition(partName); long visibleVersion = partition.getVisibleVersion(); - short replicationNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short replicationNum = olapTable.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = olapTable.getSchemaHashByIndexId(index.getId()); @@ -158,7 +159,8 @@ public class MetadataViewer { return getTabletDistribution(stmt.getDbName(), stmt.getTblName(), stmt.getPartitionNames()); } - private static List> getTabletDistribution(String dbName, String tblName, PartitionNames partitionNames) + private static List> getTabletDistribution( + String dbName, String tblName, PartitionNames partitionNames) throws DdlException { DecimalFormat df = new DecimalFormat("00.00 %"); @@ -209,7 +211,8 @@ public class MetadataViewer { continue; } countMap.put(replica.getBackendId(), countMap.get(replica.getBackendId()) + 1); - sizeMap.put(replica.getBackendId(), sizeMap.get(replica.getBackendId()) + replica.getDataSize()); + sizeMap.put(replica.getBackendId(), + sizeMap.get(replica.getBackendId()) + replica.getDataSize()); totalReplicaNum++; totalReplicaSize += replica.getDataSize(); } @@ -225,9 +228,11 @@ public class MetadataViewer { row.add(String.valueOf(countMap.get(beId))); row.add(String.valueOf(sizeMap.get(beId))); row.add(graph(countMap.get(beId), totalReplicaNum)); - row.add(totalReplicaNum == countMap.get(beId) ? "100.00%" : df.format((double) countMap.get(beId) / totalReplicaNum)); + row.add(totalReplicaNum == countMap.get(beId) + ? "100.00%" : df.format((double) countMap.get(beId) / totalReplicaNum)); row.add(graph(sizeMap.get(beId), totalReplicaSize)); - row.add(totalReplicaSize == sizeMap.get(beId) ? "100.00%" : df.format((double) sizeMap.get(beId) / totalReplicaSize)); + row.add(totalReplicaSize == sizeMap.get(beId) + ? "100.00%" : df.format((double) sizeMap.get(beId) / totalReplicaSize)); result.add(row); } @@ -299,7 +304,8 @@ public class MetadataViewer { row.add(String.valueOf(i)); row.add(tabletInfos.get(i).toString()); row.add(graph(tabletInfos.get(i), totalSize)); - row.add(totalSize == tabletInfos.get(i) ? "100.00%" : df.format((double) tabletInfos.get(i) / totalSize)); + row.add(totalSize == tabletInfos.get(i) + ? "100.00%" : df.format((double) tabletInfos.get(i) / totalSize)); result.add(row); } } finally { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java index d1332d3c33..eacef3de26 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java @@ -158,7 +158,8 @@ public class MysqlTable extends Table { String property = odbcCatalogResource.getProperty(propertyName); if (property == null) { - throw new RuntimeException("The property:" + propertyName + " do not set in resource " + odbcCatalogResourceName); + throw new RuntimeException("The property:" + propertyName + + " do not set in resource " + odbcCatalogResourceName); } return property; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java index 0fa55265d5..a482553595 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java @@ -232,10 +232,12 @@ public class OdbcTable extends Table { String property = odbcCatalogResource.getProperty(propertyName); if (property == null) { - throw new RuntimeException("The property:" + propertyName + " do not set in resource " + odbcCatalogResourceName); + throw new RuntimeException("The property:" + propertyName + + " do not set in resource " + odbcCatalogResourceName); } return property; } + public String getExtraParameter(Map extraMap) { if (extraMap == null || extraMap.isEmpty()) { return ""; @@ -252,6 +254,7 @@ public class OdbcTable extends Table { } return getExtraParameter(resourceProperties); } + public String getOdbcCatalogResourceName() { return odbcCatalogResourceName; } @@ -336,7 +339,8 @@ public class OdbcTable extends Table { getCharset()); break; case POSTGRESQL: - connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;Uid=%s;Pwd=%s;charset=%s;UseDeclareFetch=1;Fetch=4096", + connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;" + + "Uid=%s;Pwd=%s;charset=%s;UseDeclareFetch=1;Fetch=4096", getOdbcDriver(), getHost(), getPort(), @@ -346,7 +350,8 @@ public class OdbcTable extends Table { getCharset()); break; case MYSQL: - connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;Uid=%s;Pwd=%s;charset=%s;forward_cursor=1;no_cache=1", + connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;" + + "Uid=%s;Pwd=%s;charset=%s;forward_cursor=1;no_cache=1", getOdbcDriver(), getHost(), getPort(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 25eb7f65db..a117762274 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -340,8 +340,8 @@ public class OlapTable extends Table { this.indexIdToMeta.remove(indexId); // Some column of deleted index should be removed during `deleteIndexInfo` such as `mv_bitmap_union_c1` // If deleted index id == base index id, the schema will not be rebuilt. - // The reason is that the base index has been removed from indexIdToMeta while the new base index hasn't changed. - // The schema could not be rebuild in here with error base index id. + // The reason is that the base index has been removed from indexIdToMeta while the new base index + // hasn't changed. The schema could not be rebuild in here with error base index id. if (indexId != baseIndexId) { rebuildFullSchema(); } @@ -643,10 +643,12 @@ public class OlapTable extends Table { return partitionColumnNames; } else if (partitionInfo instanceof RangePartitionInfo) { RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; - return rangePartitionInfo.getPartitionColumns().stream().map(c -> c.getName().toLowerCase()).collect(Collectors.toSet()); + return rangePartitionInfo.getPartitionColumns().stream() + .map(c -> c.getName().toLowerCase()).collect(Collectors.toSet()); } else if (partitionInfo instanceof ListPartitionInfo) { ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo; - return listPartitionInfo.getPartitionColumns().stream().map(c -> c.getName().toLowerCase()).collect(Collectors.toSet()); + return listPartitionInfo.getPartitionColumns().stream() + .map(c -> c.getName().toLowerCase()).collect(Collectors.toSet()); } else { throw new DdlException("Unknown partition info type: " + partitionInfo.getType().name()); } @@ -1255,7 +1257,8 @@ public class OlapTable extends Table { } // remove shadow index from copied table - List shadowIndex = copied.getPartitions().stream().findFirst().get().getMaterializedIndices(IndexExtState.SHADOW); + List shadowIndex = copied.getPartitions().stream().findFirst() + .get().getMaterializedIndices(IndexExtState.SHADOW); for (MaterializedIndex deleteIndex : shadowIndex) { LOG.debug("copied table delete shadow index : {}", deleteIndex.getId()); copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId())); @@ -1292,7 +1295,8 @@ public class OlapTable extends Table { partNames.addAll(copied.getPartitionNames()); // partition name is case insensitive: - Set lowerReservedPartitionNames = reservedPartitions.stream().map(String::toLowerCase).collect(Collectors.toSet()); + Set lowerReservedPartitionNames = reservedPartitions.stream() + .map(String::toLowerCase).collect(Collectors.toSet()); for (String partName : partNames) { if (!lowerReservedPartitionNames.contains(partName.toLowerCase())) { copied.dropPartitionAndReserveTablet(partName); @@ -1530,7 +1534,8 @@ public class OlapTable extends Table { if (tableProperty == null) { tableProperty = new TableProperty(new HashMap<>()); } - tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY, Boolean.valueOf(isInMemory).toString()); + tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY, + Boolean.valueOf(isInMemory).toString()); tableProperty.buildInMemory(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index bee4c382a0..9d95b14b61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -197,7 +197,8 @@ public class PartitionInfo implements Writable { public void checkPartitionItemListsMatch(List list1, List list2) throws DdlException { } - public void checkPartitionItemListsConflict(List list1, List list2) throws DdlException { + public void checkPartitionItemListsConflict(List list1, + List list2) throws DdlException { } public DataProperty getDataProperty(long partitionId) { @@ -282,8 +283,8 @@ public class PartitionInfo implements Writable { } } - public void resetPartitionIdForRestore(long newPartitionId, long oldPartitionId, ReplicaAllocation restoreReplicaAlloc, - boolean isSinglePartitioned) { + public void resetPartitionIdForRestore(long newPartitionId, long oldPartitionId, + ReplicaAllocation restoreReplicaAlloc, boolean isSinglePartitioned) { idToDataProperty.put(newPartitionId, idToDataProperty.remove(oldPartitionId)); idToReplicaAllocation.remove(oldPartitionId); idToReplicaAllocation.put(newPartitionId, restoreReplicaAlloc); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java index a953386630..4c5c4c4c6f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java @@ -353,9 +353,11 @@ public enum PrimitiveType { public static ArrayList getIntegerTypes() { return integerTypes; } + public static ArrayList getNumericTypes() { return numericTypes; } + public static ArrayList getSupportedTypes() { return supportedTypes; } @@ -400,8 +402,10 @@ public enum PrimitiveType { compatibilityMatrix[NULL_TYPE.ordinal()][STRING.ordinal()] = STRING; compatibilityMatrix[NULL_TYPE.ordinal()][DECIMALV2.ordinal()] = DECIMALV2; compatibilityMatrix[NULL_TYPE.ordinal()][TIME.ordinal()] = TIME; - compatibilityMatrix[NULL_TYPE.ordinal()][BITMAP.ordinal()] = BITMAP; //TODO(weixiang): bitmap can be null? - compatibilityMatrix[NULL_TYPE.ordinal()][QUANTILE_STATE.ordinal()] = QUANTILE_STATE; //TODO(weixiang): QUANTILE_STATE can be null? + //TODO(weixiang): bitmap can be null? + compatibilityMatrix[NULL_TYPE.ordinal()][BITMAP.ordinal()] = BITMAP; + //TODO(weixiang): QUANTILE_STATE can be null? + compatibilityMatrix[NULL_TYPE.ordinal()][QUANTILE_STATE.ordinal()] = QUANTILE_STATE; compatibilityMatrix[BOOLEAN.ordinal()][BOOLEAN.ordinal()] = BOOLEAN; compatibilityMatrix[BOOLEAN.ordinal()][TINYINT.ordinal()] = TINYINT; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java index 5a9589b43d..168b1f9f33 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java @@ -62,6 +62,7 @@ public class RandomDistributionInfo extends DistributionInfo { super.write(out); out.writeInt(bucketNum); } + public void readFields(DataInput in) throws IOException { super.readFields(in); bucketNum = in.readInt(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java index 090c78c4e2..91d15db09f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java @@ -79,7 +79,8 @@ public class RangePartitionInfo extends PartitionInfo { // create a new range and check it. private Range createAndCheckNewRange(PartitionKeyDesc partKeyDesc, boolean isTemp) throws AnalysisException, DdlException { - boolean isFixedPartitionKeyValueType = partKeyDesc.getPartitionType() == PartitionKeyDesc.PartitionKeyValueType.FIXED; + boolean isFixedPartitionKeyValueType + = partKeyDesc.getPartitionType() == PartitionKeyDesc.PartitionKeyValueType.FIXED; // generate partitionItemEntryList List> partitionItemEntryList = isFixedPartitionKeyValueType @@ -122,7 +123,7 @@ public class RangePartitionInfo extends PartitionInfo { } private Range createNewRangeForFixedPartitionValueType(PartitionKeyDesc partKeyDesc, - List> partitionItemEntryList) + List> partitionItemEntryList) throws AnalysisException, DdlException { PartitionKey lowKey = PartitionKey.createPartitionKey(partKeyDesc.getLowerValues(), partitionColumns); PartitionKey upperKey = PartitionKey.createPartitionKey(partKeyDesc.getUpperValues(), partitionColumns); @@ -169,7 +170,8 @@ public class RangePartitionInfo extends PartitionInfo { } @Override - public void checkPartitionItemListsConflict(List list1, List list2) throws DdlException { + public void checkPartitionItemListsConflict(List list1, List list2) + throws DdlException { RangeUtils.checkRangeConflict(list1, list2); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java index 99c37a0fe7..665cd26482 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java @@ -255,7 +255,8 @@ public class Replica implements Writable { updateReplicaInfo(newVersion, this.lastFailedVersion, this.lastSuccessVersion, newDataSize, newRowCount); } - public synchronized void updateVersionWithFailedInfo(long newVersion, long lastFailedVersion, long lastSuccessVersion) { + public synchronized void updateVersionWithFailedInfo( + long newVersion, long lastFailedVersion, long lastSuccessVersion) { updateReplicaInfo(newVersion, lastFailedVersion, lastSuccessVersion, dataSize, rowCount); } @@ -292,11 +293,13 @@ public class Replica implements Writable { if (newVersion < this.version) { // This case means that replica meta version has been updated by ReportHandler before - // For example, the publish version daemon has already sent some publish version tasks to one be to publish version 2, 3, 4, 5, 6, - // and the be finish all publish version tasks, the be's replica version is 6 now, but publish version daemon need to wait + // For example, the publish version daemon has already sent some publish version tasks + // to one be to publish version 2, 3, 4, 5, 6, and the be finish all publish version tasks, + // the be's replica version is 6 now, but publish version daemon need to wait // for other be to finish most of publish version tasks to update replica version in fe. - // At the moment, the replica version in fe is 4, when ReportHandler sync tablet, it find reported replica version in be is 6 and then - // set version to 6 for replica in fe. And then publish version daemon try to finish txn, and use visible version(5) + // At the moment, the replica version in fe is 4, when ReportHandler sync tablet, + // it find reported replica version in be is 6 and then set version to 6 for replica in fe. + // And then publish version daemon try to finish txn, and use visible version(5) // to update replica. Finally, it find the newer version(5) is lower than replica version(6) in fe. if (LOG.isDebugEnabled()) { LOG.debug("replica {} on backend {}'s new version {} is lower than meta version {}," @@ -365,8 +368,10 @@ public class Replica implements Writable { /* * Check whether the replica's version catch up with the expected version. - * If ignoreAlter is true, and state is ALTER, and replica's version is PARTITION_INIT_VERSION, just return true, ignore the version. - * This is for the case that when altering table, the newly created replica's version is PARTITION_INIT_VERSION, + * If ignoreAlter is true, and state is ALTER, and replica's version is + * PARTITION_INIT_VERSION, just return true, ignore the version. + * This is for the case that when altering table, + * the newly created replica's version is PARTITION_INIT_VERSION, * but we need to treat it as a "normal" replica which version is supposed to be "catch-up". * But if state is ALTER but version larger than PARTITION_INIT_VERSION, which means this replica * is already updated by load process, so we need to consider its version. diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java index d2c2e7f48f..fac6a30e45 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java @@ -137,6 +137,7 @@ public abstract class Resource implements Writable { public abstract Map getCopiedProperties(); + /** * Fill BaseProcResult with different properties in child resources * ResourceMgr.RESOURCE_PROC_NODE_TITLE_NAMES format: diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java index 9d1b3c43e9..7c934570b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java @@ -75,6 +75,7 @@ public class ResourceGroup implements Writable { public Map getQuotaMap() { return quotaByType; } + public static Builder builder() { return new Builder(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java index c87fd65481..e716fb65e2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java @@ -70,7 +70,8 @@ public class ScalarFunction extends Function { public ScalarFunction(FunctionName fnName, List argTypes, Type retType, boolean hasVarArgs, TFunctionBinaryType binaryType, boolean userVisible, boolean isVec) { - super(0, fnName, argTypes, retType, hasVarArgs, binaryType, userVisible, isVec, NullableMode.DEPEND_ON_ARGUMENT); + super(0, fnName, argTypes, retType, hasVarArgs, binaryType, userVisible, isVec, + NullableMode.DEPEND_ON_ARGUMENT); } public ScalarFunction(FunctionName fnName, List argTypes, diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java index bb9a7a9520..11202c8a56 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java @@ -360,9 +360,11 @@ public class ScalarType extends Type { break; case DECIMALV2: if (Strings.isNullOrEmpty(precisionStr)) { - stringBuilder.append("decimal").append("(").append(precision).append(", ").append(scale).append(")"); + stringBuilder.append("decimal").append("(").append(precision) + .append(", ").append(scale).append(")"); } else if (!Strings.isNullOrEmpty(precisionStr) && !Strings.isNullOrEmpty(scaleStr)) { - stringBuilder.append("decimal").append("(`").append(precisionStr).append("`, `").append(scaleStr).append("`)"); + stringBuilder.append("decimal").append("(`").append(precisionStr) + .append("`, `").append(scaleStr).append("`)"); } else { stringBuilder.append("decimal").append("(`").append(precisionStr).append("`)"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java index a053aa5809..ef04f1e58e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java @@ -42,6 +42,7 @@ public class SchemaTable extends Table { private static final int GRANTEE_len = 81; private static final int PRIVILEGE_TYPE_LEN = 64; private static final int IS_GRANTABLE_LEN = 3; + // Now we just mock tables, table_privileges, referential_constraints, key_column_usage and routines table // Because in MySQL ODBC, these tables are used. // TODO(zhaochun): Review some commercial BI to check if we need support where clause in show statement diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java index 1a95031e3e..01b1e0488b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java @@ -162,8 +162,11 @@ public class SparkResource extends Resource { Map copiedProperties = Maps.newHashMap(sparkConfigs); return copiedProperties; } - // Each SparkResource has and only has one SparkRepository. - // This method get the remote archive which matches the dpp version from remote repository + + /** + * Each SparkResource has and only has one SparkRepository. + * This method get the remote archive which matches the dpp version from remote repository + */ public synchronized SparkRepository.SparkArchive prepareArchive() throws LoadException { String remoteRepositoryPath = workingDir + "/" + Catalog.getCurrentCatalog().getClusterId() + "/" + SparkRepository.REPOSITORY_DIR + name; @@ -252,7 +255,8 @@ public class SparkResource extends Resource { throw new DdlException("Missing " + SPARK_SUBMIT_DEPLOY_MODE + " in properties"); } // if deploy machines do not set HADOOP_CONF_DIR env, we should set these configs blow - if ((!sparkConfigs.containsKey(SPARK_YARN_RESOURCE_MANAGER_ADDRESS) || !sparkConfigs.containsKey(SPARK_FS_DEFAULT_FS)) + if ((!sparkConfigs.containsKey(SPARK_YARN_RESOURCE_MANAGER_ADDRESS) + || !sparkConfigs.containsKey(SPARK_FS_DEFAULT_FS)) && isYarnMaster()) { throw new DdlException("Missing (" + SPARK_YARN_RESOURCE_MANAGER_ADDRESS + " and " + SPARK_FS_DEFAULT_FS + ") in yarn master"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java index 5d68be8f5c..e3ffa3a521 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java @@ -114,6 +114,7 @@ public class StructType extends Type { field.toThrift(container, node); } } + @Override public String toString() { return toSql(0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java index 4b4a049253..d69bbe3213 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java @@ -42,8 +42,7 @@ public interface TableIf { boolean isWriteLockHeldByCurrentThread(); - - void writeLockOrException(E e) throws E; + void writeLockOrException(E e) throws E; void writeLockOrDdlException() throws DdlException; @@ -53,8 +52,7 @@ public interface TableIf { boolean tryWriteLockOrMetaException(long timeout, TimeUnit unit) throws MetaNotFoundException; - - boolean tryWriteLockOrException(long timeout, TimeUnit unit, E e) throws E; + boolean tryWriteLockOrException(long timeout, TimeUnit unit, E e) throws E; boolean tryWriteLockIfExist(long timeout, TimeUnit unit); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java index b9c3835c96..fd70e718e9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java @@ -283,9 +283,11 @@ public class TableProperty implements Writable { return tableProperty; } - // For some historical reason, both "dynamic_partition.replication_num" and "dynamic_partition.replication_allocation" + // For some historical reason, + // both "dynamic_partition.replication_num" and "dynamic_partition.replication_allocation" // may be exist in "properties". we need remove the "dynamic_partition.replication_num", or it will always replace - // the "dynamic_partition.replication_allocation", result in unable to set "dynamic_partition.replication_allocation". + // the "dynamic_partition.replication_allocation", + // result in unable to set "dynamic_partition.replication_allocation". private void removeDuplicateReplicaNumProperty() { if (properties.containsKey(DynamicPartitionProperty.REPLICATION_NUM) && properties.containsKey(DynamicPartitionProperty.REPLICATION_ALLOCATION)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java index c96c59e1c7..5642455c4d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java @@ -53,7 +53,8 @@ import java.util.stream.LongStream; */ public class Tablet extends MetaObject implements Writable { private static final Logger LOG = LogManager.getLogger(Tablet.class); - // if current version count of replica is more than QUERYABLE_TIMES_OF_MIN_VERSION_COUNT times the minimum version count, + // if current version count of replica is mor than + // QUERYABLE_TIMES_OF_MIN_VERSION_COUNT times the minimum version count, // then the replica would not be considered as queryable. private static final int QUERYABLE_TIMES_OF_MIN_VERSION_COUNT = 3; @@ -472,7 +473,8 @@ public class Tablet extends MetaObject implements Writable { // condition explain: // 1. alive < replicationNum: replica is missing or bad // 2. replicas.size() >= aliveBackendsNum: the existing replicas occupies all available backends - // 3. aliveBackendsNum >= replicationNum: make sure after deleting, there will be at least one backend for new replica. + // 3. aliveBackendsNum >= replicationNum: make sure after deleting, + // there will be at least one backend for new replica. // 4. replicationNum > 1: if replication num is set to 1, do not delete any replica, for safety reason return Pair.create(TabletStatus.FORCE_REDUNDANT, TabletSchedCtx.Priority.VERY_HIGH); } else if (alive < (replicationNum / 2) + 1) { @@ -507,7 +509,8 @@ public class Tablet extends MetaObject implements Writable { && availableBeIds.size() >= replicationNum && replicationNum > 1) { // No BE can be choose to create a new replica return Pair.create(TabletStatus.FORCE_REDUNDANT, - stable < (replicationNum / 2) + 1 ? TabletSchedCtx.Priority.NORMAL : TabletSchedCtx.Priority.LOW); + stable < (replicationNum / 2) + 1 + ? TabletSchedCtx.Priority.NORMAL : TabletSchedCtx.Priority.LOW); } if (stable < (replicationNum / 2) + 1) { return Pair.create(TabletStatus.REPLICA_RELOCATING, TabletSchedCtx.Priority.NORMAL); @@ -576,7 +579,8 @@ public class Tablet extends MetaObject implements Writable { * No need to check if backend is available. We consider all backends in 'backendsSet' are available, * If not, unavailable backends will be relocated by CalocateTableBalancer first. */ - public TabletStatus getColocateHealthStatus(long visibleVersion, ReplicaAllocation replicaAlloc, Set backendsSet) { + public TabletStatus getColocateHealthStatus(long visibleVersion, + ReplicaAllocation replicaAlloc, Set backendsSet) { // Here we don't need to care about tag. Because the replicas of the colocate table has been confirmed // in ColocateTableCheckerAndBalancer. Short totalReplicaNum = replicaAlloc.getTotalReplicaNum(); @@ -592,7 +596,8 @@ public class Tablet extends MetaObject implements Writable { for (Replica replica : replicas) { if (!backendsSet.contains(replica.getBackendId())) { // We don't care about replicas that are not in backendsSet. - // eg: replicaBackendIds=(1,2,3,4); backendsSet=(1,2,3), then replica 4 should be skipped here and then goto ```COLOCATE_REDUNDANT``` in step 3 + // eg: replicaBackendIds=(1,2,3,4); backendsSet=(1,2,3), + // then replica 4 should be skipped here and then goto ```COLOCATE_REDUNDANT``` in step 3 continue; } @@ -603,7 +608,8 @@ public class Tablet extends MetaObject implements Writable { return TabletStatus.COLOCATE_REDUNDANT; } else { // maybe in replica's DECOMMISSION state - // Here we return VERSION_INCOMPLETE, and the tablet scheduler will finally set it's state to NORMAL. + // Here we return VERSION_INCOMPLETE, + // and the tablet scheduler will finally set it's state to NORMAL. return TabletStatus.VERSION_INCOMPLETE; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java index 1d843226f8..21c55fb4a8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java @@ -139,9 +139,11 @@ public class TabletInvertedIndex { Replica replica = entry.getValue(); tabletFoundInMeta.add(tabletId); TTabletInfo backendTabletInfo = backendTablet.getTabletInfos().get(0); - if (partitionIdInMemorySet.contains(backendTabletInfo.getPartitionId()) != backendTabletInfo.isIsInMemory()) { + if (partitionIdInMemorySet.contains( + backendTabletInfo.getPartitionId()) != backendTabletInfo.isIsInMemory()) { synchronized (tabletToInMemory) { - tabletToInMemory.add(new ImmutableTriple<>(tabletId, backendTabletInfo.getSchemaHash(), !backendTabletInfo.isIsInMemory())); + tabletToInMemory.add(new ImmutableTriple<>(tabletId, backendTabletInfo.getSchemaHash(), + !backendTabletInfo.isIsInMemory())); } } // 1. (intersection) @@ -200,21 +202,27 @@ public class TabletInvertedIndex { List transactionIds = backendTabletInfo.getTransactionIds(); GlobalTransactionMgr transactionMgr = Catalog.getCurrentGlobalTransactionMgr(); for (Long transactionId : transactionIds) { - TransactionState transactionState = transactionMgr.getTransactionState(tabletMeta.getDbId(), transactionId); - if (transactionState == null || transactionState.getTransactionStatus() == TransactionStatus.ABORTED) { + TransactionState transactionState + = transactionMgr.getTransactionState(tabletMeta.getDbId(), transactionId); + if (transactionState == null + || transactionState.getTransactionStatus() == TransactionStatus.ABORTED) { synchronized (transactionsToClear) { transactionsToClear.put(transactionId, tabletMeta.getPartitionId()); } LOG.debug("transaction id [{}] is not valid any more, " + "clear it from backend [{}]", transactionId, backendId); } else if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) { - TableCommitInfo tableCommitInfo = transactionState.getTableCommitInfo(tabletMeta.getTableId()); - PartitionCommitInfo partitionCommitInfo = tableCommitInfo == null ? null : tableCommitInfo.getPartitionCommitInfo(partitionId); + TableCommitInfo tableCommitInfo + = transactionState.getTableCommitInfo(tabletMeta.getTableId()); + PartitionCommitInfo partitionCommitInfo = tableCommitInfo == null + ? null : tableCommitInfo.getPartitionCommitInfo(partitionId); if (partitionCommitInfo != null) { - TPartitionVersionInfo versionInfo = new TPartitionVersionInfo(tabletMeta.getPartitionId(), + TPartitionVersionInfo versionInfo + = new TPartitionVersionInfo(tabletMeta.getPartitionId(), partitionCommitInfo.getVersion(), 0); synchronized (transactionsToPublish) { - ListMultimap map = transactionsToPublish.get(transactionState.getDbId()); + ListMultimap map + = transactionsToPublish.get(transactionState.getDbId()); if (map == null) { map = ArrayListMultimap.create(); transactionsToPublish.put(transactionState.getDbId(), map); @@ -246,11 +254,13 @@ public class TabletInvertedIndex { } long end = System.currentTimeMillis(); - LOG.info("finished to do tablet diff with backend[{}]. sync: {}. metaDel: {}. foundInMeta: {}. migration: {}. " + LOG.info("finished to do tablet diff with backend[{}]. sync: {}." + + " metaDel: {}. foundInMeta: {}. migration: {}. " + "found invalid transactions {}. found republish transactions {}. tabletInMemorySync: {}." - + " need recovery: {}. cost: {} ms", backendId, tabletSyncMap.size(), tabletDeleteFromMeta.size(), - tabletFoundInMeta.size(), tabletMigrationMap.size(), transactionsToClear.size(), transactionsToPublish.size(), - tabletToInMemory.size(), tabletRecoveryMap.size(), (end - start)); + + " need recovery: {}. cost: {} ms", backendId, tabletSyncMap.size(), + tabletDeleteFromMeta.size(), tabletFoundInMeta.size(), tabletMigrationMap.size(), + transactionsToClear.size(), transactionsToPublish.size(), tabletToInMemory.size(), + tabletRecoveryMap.size(), (end - start)); } public Long getTabletIdByReplica(long replicaId) { @@ -302,7 +312,8 @@ public class TabletInvertedIndex { // backend replica's version is larger or newer than replica in FE, sync it. return true; } else if (versionInFe == backendTabletInfo.getVersion() && replicaInFe.isBad()) { - // backend replica's version is equal to replica in FE, but replica in FE is bad, while backend replica is good, sync it + // backend replica's version is equal to replica in FE, but replica in FE is bad, + // while backend replica is good, sync it return true; } @@ -546,7 +557,8 @@ public class TabletInvertedIndex { } // Only build from available bes, exclude colocate tables - public Map> buildPartitionInfoBySkew(List availableBeIds) { + public Map> buildPartitionInfoBySkew( + List availableBeIds) { readLock(); // 1. gen > @@ -567,12 +579,14 @@ public class TabletInvertedIndex { Preconditions.checkState(availableBeIds.contains(beId), "dead be " + beId); TabletMeta tabletMeta = tabletMetaMap.get(tabletId); Preconditions.checkNotNull(tabletMeta, "invalid tablet " + tabletId); - Preconditions.checkState(!Catalog.getCurrentColocateIndex().isColocateTable(tabletMeta.getTableId()), + Preconditions.checkState( + !Catalog.getCurrentColocateIndex().isColocateTable(tabletMeta.getTableId()), "should not be the colocate table"); TStorageMedium medium = tabletMeta.getStorageMedium(); Table> partitionReplicasInfo = partitionReplicasInfoMaps.get(medium); - Map countMap = partitionReplicasInfo.get(tabletMeta.getPartitionId(), tabletMeta.getIndexId()); + Map countMap = partitionReplicasInfo.get( + tabletMeta.getPartitionId(), tabletMeta.getIndexId()); if (countMap == null) { // If one be doesn't have any replica of one partition, it should be counted too. countMap = availableBeIds.stream().collect(Collectors.toMap(i -> i, i -> 0L)); @@ -597,8 +611,10 @@ public class TabletInvertedIndex { // put to table_info_by_skew Map> skewMaps = Maps.newHashMap(); for (TStorageMedium medium : TStorageMedium.values()) { - TreeMultimap partitionInfoBySkew = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); - Set>> mapCells = partitionReplicasInfoMaps.getOrDefault(medium, HashBasedTable.create()).cellSet(); + TreeMultimap partitionInfoBySkew + = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); + Set>> mapCells + = partitionReplicasInfoMaps.getOrDefault(medium, HashBasedTable.create()).cellSet(); for (Table.Cell> cell : mapCells) { Map countMap = cell.getValue(); Preconditions.checkNotNull(countMap); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java index f389b73f51..21430dd71a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java @@ -56,6 +56,7 @@ public class TempPartitions implements Writable, GsonPostProcessable { idToPartition.put(partition.getId(), partition); nameToPartition.put(partition.getName(), partition); } + public long getUpdateTime() { long updateTime = -1L; for (Partition p : idToPartition.values()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java index 8a89b0ffe3..9bf27226e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java @@ -132,9 +132,11 @@ public abstract class Type { public static ArrayList getIntegerTypes() { return integerTypes; } + public static ArrayList getNumericTypes() { return numericTypes; } + public static ArrayList getSupportedTypes() { return supportedTypes; } @@ -323,6 +325,7 @@ public abstract class Type { public boolean isDate() { return isScalarType(PrimitiveType.DATE); } + /** * Returns true if Impala supports this type in the metdata. It does not mean we * can manipulate data of this type. For tables that contain columns with these @@ -567,6 +570,7 @@ public abstract class Type { return null; } } + public static List toThrift(Type[] types) { return toThrift(Lists.newArrayList(types)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java index 8b604dd142..dd3d78ff9d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java @@ -17,7 +17,6 @@ package org.apache.doris.catalog.external; -import org.apache.commons.lang.NotImplementedException; import org.apache.doris.catalog.DatabaseIf; import org.apache.doris.catalog.DatabaseProperty; import org.apache.doris.catalog.OlapTable; @@ -28,6 +27,7 @@ import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.datasource.ExternalDataSource; import org.apache.doris.qe.ConnectContext; +import org.apache.commons.lang.NotImplementedException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java index 824137b2f1..85136dd17a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java @@ -181,8 +181,10 @@ public class BackendLoadStatistic { TStorageMedium medium = diskInfo.getStorageMedium(); if (diskInfo.getState() == DiskState.ONLINE) { // we only collect online disk's capacity - totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + diskInfo.getTotalCapacityB()); - totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB())); + totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + + diskInfo.getTotalCapacityB()); + totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB())); } RootPathLoadStatistic pathStatistic = new RootPathLoadStatistic(beId, diskInfo.getRootPath(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java index fb55091b35..1b03a7044f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java @@ -201,7 +201,8 @@ public class BeLoadRebalancer extends Rebalancer { * 2. Select a low load backend as destination. And tablet should not has replica on this backend. */ @Override - public void completeSchedCtx(TabletSchedCtx tabletCtx, Map backendsWorkingSlots) throws SchedException { + public void completeSchedCtx(TabletSchedCtx tabletCtx, + Map backendsWorkingSlots) throws SchedException { ClusterLoadStatistic clusterStat = statisticMap.get(tabletCtx.getCluster(), tabletCtx.getTag()); if (clusterStat == null) { throw new SchedException(Status.UNRECOVERABLE, "cluster does not exist"); @@ -283,8 +284,9 @@ public class BeLoadRebalancer extends Rebalancer { continue; } - if (!Config.be_rebalancer_fuzzy_test && !clusterStat.isMoreBalanced(tabletCtx.getSrcBackendId(), beStat.getBeId(), - tabletCtx.getTabletId(), tabletCtx.getTabletSize(), tabletCtx.getStorageMedium())) { + if (!Config.be_rebalancer_fuzzy_test && !clusterStat.isMoreBalanced( + tabletCtx.getSrcBackendId(), beStat.getBeId(), tabletCtx.getTabletId(), + tabletCtx.getTabletSize(), tabletCtx.getStorageMedium())) { continue; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java index a407347de3..aee2665afc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java @@ -45,23 +45,24 @@ import java.util.stream.Collectors; public class ClusterLoadStatistic { private static final Logger LOG = LogManager.getLogger(ClusterLoadStatistic.class); - private SystemInfoService infoService; - private TabletInvertedIndex invertedIndex; + private final SystemInfoService infoService; + private final TabletInvertedIndex invertedIndex; - private String clusterName; - private Tag tag; + private final String clusterName; + private final Tag tag; - private Map totalCapacityMap = Maps.newHashMap(); - private Map totalUsedCapacityMap = Maps.newHashMap(); - private Map totalReplicaNumMap = Maps.newHashMap(); - private Map avgUsedCapacityPercentMap = Maps.newHashMap(); - private Map avgReplicaNumPercentMap = Maps.newHashMap(); - private Map avgLoadScoreMap = Maps.newHashMap(); + private final Map totalCapacityMap = Maps.newHashMap(); + private final Map totalUsedCapacityMap = Maps.newHashMap(); + private final Map totalReplicaNumMap = Maps.newHashMap(); + private final Map avgUsedCapacityPercentMap = Maps.newHashMap(); + private final Map avgReplicaNumPercentMap = Maps.newHashMap(); + private final Map avgLoadScoreMap = Maps.newHashMap(); // storage medium -> number of backend which has this kind of medium - private Map backendNumMap = Maps.newHashMap(); - private List beLoadStatistics = Lists.newArrayList(); - private Map> beByTotalReplicaCountMaps = Maps.newHashMap(); - private Map> skewMaps = Maps.newHashMap(); + private final Map backendNumMap = Maps.newHashMap(); + private final List beLoadStatistics = Lists.newArrayList(); + private final Map> beByTotalReplicaCountMaps = Maps.newHashMap(); + private Map> skewMaps + = Maps.newHashMap(); public ClusterLoadStatistic(String clusterName, Tag tag, SystemInfoService infoService, TabletInvertedIndex invertedIndex) { @@ -99,20 +100,24 @@ public class ClusterLoadStatistic { } for (TStorageMedium medium : TStorageMedium.values()) { - totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + beStatistic.getTotalCapacityB(medium)); - totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + beStatistic.getTotalUsedCapacityB(medium)); - totalReplicaNumMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) + beStatistic.getReplicaNum(medium)); + totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + + beStatistic.getTotalCapacityB(medium)); + totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + + beStatistic.getTotalUsedCapacityB(medium)); + totalReplicaNumMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) + + beStatistic.getReplicaNum(medium)); if (beStatistic.hasMedium(medium)) { backendNumMap.put(medium, backendNumMap.getOrDefault(medium, 0) + 1); } } - beLoadStatistics.add(beStatistic); } for (TStorageMedium medium : TStorageMedium.values()) { - avgUsedCapacityPercentMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) / (double) totalCapacityMap.getOrDefault(medium, 1L)); - avgReplicaNumPercentMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) / (double) backendNumMap.getOrDefault(medium, 1)); + avgUsedCapacityPercentMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + / (double) totalCapacityMap.getOrDefault(medium, 1L)); + avgReplicaNumPercentMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) + / (double) backendNumMap.getOrDefault(medium, 1)); } for (BackendLoadStatistic beStatistic : beLoadStatistics) { @@ -141,8 +146,10 @@ public class ClusterLoadStatistic { // Multimap PartitionBalanceInfo> // PartitionBalanceInfo: > // Only count available bes here, aligned with the beByTotalReplicaCountMaps. - skewMaps = invertedIndex.buildPartitionInfoBySkew(beLoadStatistics.stream().filter(BackendLoadStatistic::isAvailable). - map(BackendLoadStatistic::getBeId).collect(Collectors.toList())); + skewMaps = invertedIndex.buildPartitionInfoBySkew(beLoadStatistics.stream() + .filter(BackendLoadStatistic::isAvailable) + .map(BackendLoadStatistic::getBeId) + .collect(Collectors.toList())); } } @@ -168,6 +175,7 @@ public class ClusterLoadStatistic { continue; } + if (Config.be_rebalancer_fuzzy_test) { if (beStat.getLoadScore(medium) > avgLoadScore) { beStat.setClazz(medium, Classification.HIGH); @@ -252,8 +260,10 @@ public class ClusterLoadStatistic { destBeStat.getTotalCapacityB(medium), destBeStat.getReplicaNum(medium) + 1, avgUsedCapacityPercentMap.get(medium), avgReplicaNumPercentMap.get(medium)); - double currentDiff = Math.abs(currentSrcBeScore - avgLoadScoreMap.get(medium)) + Math.abs(currentDestBeScore - avgLoadScoreMap.get(medium)); - double newDiff = Math.abs(newSrcBeScore.score - avgLoadScoreMap.get(medium)) + Math.abs(newDestBeScore.score - avgLoadScoreMap.get(medium)); + double currentDiff = Math.abs(currentSrcBeScore - avgLoadScoreMap.get(medium)) + + Math.abs(currentDestBeScore - avgLoadScoreMap.get(medium)); + double newDiff = Math.abs(newSrcBeScore.score - avgLoadScoreMap.get(medium)) + + Math.abs(newDestBeScore.score - avgLoadScoreMap.get(medium)); LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {}," @@ -294,8 +304,8 @@ public class ClusterLoadStatistic { pathStat.add(pathStatistic.getStorageMedium().name()); pathStat.add(String.valueOf(pathStatistic.getUsedCapacityB())); pathStat.add(String.valueOf(pathStatistic.getCapacityB())); - pathStat.add(String.valueOf(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(pathStatistic.getUsedCapacityB() * 100 - / (double) pathStatistic.getCapacityB()))); + pathStat.add(String.valueOf(DebugUtil.DECIMAL_FORMAT_SCALE_3.format( + pathStatistic.getUsedCapacityB() * 100 / (double) pathStatistic.getCapacityB()))); pathStat.add(pathStatistic.getClazz().name()); pathStat.add(pathStatistic.getDiskState().name()); statistics.add(pathStat); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index 111f8d0aa7..240c02ad56 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -178,7 +178,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { } // get all unavailable backends in the backend bucket sequence of this group - Set unavailableBeIdsInGroup = getUnavailableBeIdsInGroup(infoService, colocateIndex, groupId, tag); + Set unavailableBeIdsInGroup = getUnavailableBeIdsInGroup( + infoService, colocateIndex, groupId, tag); // get all available backends for this group Set beIdsInOtherTag = colocateIndex.getBackendIdsExceptForTag(groupId, tag); List availableBeIds = getAvailableBeIds(db.getClusterName(), tag, beIdsInOtherTag, infoService); @@ -189,7 +190,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { colocateIndex.addBackendsPerBucketSeqByTag(groupId, tag, balancedBackendsPerBucketSeq); Map>> balancedBackendsPerBucketSeqMap = Maps.newHashMap(); balancedBackendsPerBucketSeqMap.put(tag, balancedBackendsPerBucketSeq); - ColocatePersistInfo info = ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeqMap); + ColocatePersistInfo info = ColocatePersistInfo + .createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeqMap); catalog.getEditLog().logColocateBackendsPerBucketSeq(info); LOG.info("balance group {}. now backends per bucket sequence for tag {} is: {}", groupId, tag, balancedBackendsPerBucketSeq); @@ -232,7 +234,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { olapTable.readLock(); try { for (Partition partition : olapTable.getPartitions()) { - ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()); + ReplicaAllocation replicaAlloc + = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()); short replicationNum = replicaAlloc.getTotalReplicaNum(); long visibleVersion = partition.getVisibleVersion(); // Here we only get VISIBLE indexes. All other indexes are not queryable. @@ -243,11 +246,14 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { int idx = 0; for (Long tabletId : index.getTabletIdsInOrder()) { Set bucketsSeq = backendBucketsSeq.get(idx); - Preconditions.checkState(bucketsSeq.size() == replicationNum, bucketsSeq.size() + " vs. " + replicationNum); + Preconditions.checkState(bucketsSeq.size() == replicationNum, + bucketsSeq.size() + " vs. " + replicationNum); Tablet tablet = index.getTablet(tabletId); - TabletStatus st = tablet.getColocateHealthStatus(visibleVersion, replicaAlloc, bucketsSeq); + TabletStatus st = tablet.getColocateHealthStatus( + visibleVersion, replicaAlloc, bucketsSeq); if (st != TabletStatus.HEALTHY) { - unstableReason = String.format("get unhealthy tablet %d in colocate table. status: %s", tablet.getId(), st); + unstableReason = String.format("get unhealthy tablet %d in colocate table." + + " status: %s", tablet.getId(), st); LOG.debug(unstableReason); if (!tablet.readyToBeRepaired(Priority.NORMAL)) { @@ -266,7 +272,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { AddResult res = tabletScheduler.addTablet(tabletCtx, false /* not force */); if (res == AddResult.LIMIT_EXCEED || res == AddResult.DISABLED) { - // tablet in scheduler exceed limit, or scheduler is disabled, skip this group and check next one. + // tablet in scheduler exceed limit, or scheduler is disabled, + // skip this group and check next one. LOG.info("tablet scheduler return: {}. stop colocate table check", res.name()); break OUT; } @@ -347,13 +354,15 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { * Return false if nothing changed. */ private boolean relocateAndBalance(GroupId groupId, Tag tag, Set unavailableBeIds, List availableBeIds, - ColocateTableIndex colocateIndex, SystemInfoService infoService, - ClusterLoadStatistic statistic, List> balancedBackendsPerBucketSeq) { + ColocateTableIndex colocateIndex, SystemInfoService infoService, + ClusterLoadStatistic statistic, List> balancedBackendsPerBucketSeq) { ColocateGroupSchema groupSchema = colocateIndex.getGroupSchema(groupId); short replicaNum = groupSchema.getReplicaAlloc().getReplicaNumByTag(tag); - List> backendsPerBucketSeq = Lists.newArrayList(colocateIndex.getBackendsPerBucketSeqByTag(groupId, tag)); + List> backendsPerBucketSeq = Lists.newArrayList( + colocateIndex.getBackendsPerBucketSeqByTag(groupId, tag)); // [[A,B,C],[B,C,D]] -> [A,B,C,B,C,D] - List flatBackendsPerBucketSeq = backendsPerBucketSeq.stream().flatMap(List::stream).collect(Collectors.toList()); + List flatBackendsPerBucketSeq = backendsPerBucketSeq.stream() + .flatMap(List::stream).collect(Collectors.toList()); boolean isChanged = false; OUT: @@ -381,7 +390,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { } // sort backends with replica num in desc order List> backendWithReplicaNum = - getSortedBackendReplicaNumPairs(availableBeIds, unavailableBeIds, statistic, flatBackendsPerBucketSeq); + getSortedBackendReplicaNumPairs(availableBeIds, + unavailableBeIds, statistic, flatBackendsPerBucketSeq); // if there is only one available backend and no unavailable bucketId to relocate, end the outer loop if (backendWithReplicaNum.size() <= 1) { @@ -484,8 +494,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { return hostsPerBucketSeq; } - private List> getSortedBackendReplicaNumPairs(List allAvailBackendIds, Set unavailBackendIds, - ClusterLoadStatistic statistic, List flatBackendsPerBucketSeq) { + private List> getSortedBackendReplicaNumPairs(List allAvailBackendIds, + Set unavailBackendIds, ClusterLoadStatistic statistic, List flatBackendsPerBucketSeq) { // backend id -> replica num, and sorted by replica num, descending. Map backendToReplicaNum = flatBackendsPerBucketSeq.stream() .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); @@ -544,20 +554,23 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { Set backends = colocateIndex.getBackendsByGroup(groupId, tag); Set unavailableBeIds = Sets.newHashSet(); for (Long backendId : backends) { - if (!checkBackendAvailable(backendId, tag, Sets.newHashSet(), infoService, Config.colocate_group_relocate_delay_second)) { + if (!checkBackendAvailable(backendId, tag, Sets.newHashSet(), infoService, + Config.colocate_group_relocate_delay_second)) { unavailableBeIds.add(backendId); } } return unavailableBeIds; } - private List getAvailableBeIds(String cluster, Tag tag, Set excludedBeIds, SystemInfoService infoService) { + private List getAvailableBeIds(String cluster, Tag tag, Set excludedBeIds, + SystemInfoService infoService) { // get all backends to allBackendIds, and check be availability using checkBackendAvailable // backend stopped for a short period of time is still considered available List allBackendIds = infoService.getClusterBackendIds(cluster, false); List availableBeIds = Lists.newArrayList(); for (Long backendId : allBackendIds) { - if (checkBackendAvailable(backendId, tag, excludedBeIds, infoService, Config.colocate_group_relocate_delay_second)) { + if (checkBackendAvailable(backendId, tag, excludedBeIds, infoService, + Config.colocate_group_relocate_delay_second)) { availableBeIds.add(backendId); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java index 80bac26717..493e465b40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java @@ -246,7 +246,8 @@ public class DiskRebalancer extends Rebalancer { * 3. Select a low load path from this backend as destination. */ @Override - public void completeSchedCtx(TabletSchedCtx tabletCtx, Map backendsWorkingSlots) throws SchedException { + public void completeSchedCtx(TabletSchedCtx tabletCtx, + Map backendsWorkingSlots) throws SchedException { ClusterLoadStatistic clusterStat = statisticMap.get(tabletCtx.getCluster(), tabletCtx.getTag()); if (clusterStat == null) { throw new SchedException(Status.UNRECOVERABLE, "cluster does not exist"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java index 0aec8f21f0..fef969f646 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java @@ -69,8 +69,8 @@ import java.util.Set; /** * This class is used to periodically add or drop partition on an olapTable which specify dynamic partition properties - * Config.dynamic_partition_enable determine whether this feature is enable, Config.dynamic_partition_check_interval_seconds - * determine how often the task is performed + * Config.dynamic_partition_enable determine whether this feature is enable, + * Config.dynamic_partition_check_interval_seconds determine how often the task is performed */ public class DynamicPartitionScheduler extends MasterDaemon { private static final Logger LOG = LogManager.getLogger(DynamicPartitionScheduler.class); @@ -163,16 +163,20 @@ public class DynamicPartitionScheduler extends MasterDaemon { int hotPartitionNum = dynamicPartitionProperty.getHotPartitionNum(); for (; idx <= dynamicPartitionProperty.getEnd(); idx++) { - String prevBorder = DynamicPartitionUtil.getPartitionRangeString(dynamicPartitionProperty, now, idx, partitionFormat); - String nextBorder = DynamicPartitionUtil.getPartitionRangeString(dynamicPartitionProperty, now, idx + 1, partitionFormat); + String prevBorder = DynamicPartitionUtil.getPartitionRangeString( + dynamicPartitionProperty, now, idx, partitionFormat); + String nextBorder = DynamicPartitionUtil.getPartitionRangeString( + dynamicPartitionProperty, now, idx + 1, partitionFormat); PartitionValue lowerValue = new PartitionValue(prevBorder); PartitionValue upperValue = new PartitionValue(nextBorder); boolean isPartitionExists = false; Range addPartitionKeyRange; try { - PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerValue), Collections.singletonList(partitionColumn)); - PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperValue), Collections.singletonList(partitionColumn)); + PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerValue), + Collections.singletonList(partitionColumn)); + PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperValue), + Collections.singletonList(partitionColumn)); addPartitionKeyRange = Range.closedOpen(lowerBound, upperBound); } catch (AnalysisException | IllegalArgumentException e) { // AnalysisException: keys.size is always equal to column.size, cannot reach this exception @@ -188,10 +192,12 @@ public class DynamicPartitionScheduler extends MasterDaemon { } catch (Exception e) { isPartitionExists = true; if (addPartitionKeyRange.equals(partitionItem.getItems())) { - LOG.info("partition range {} exist in table {}, clear fail msg", addPartitionKeyRange, olapTable.getName()); + LOG.info("partition range {} exist in table {}, clear fail msg", + addPartitionKeyRange, olapTable.getName()); clearCreatePartitionFailedMsg(olapTable.getId()); } else { - recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), e.getMessage(), olapTable.getId()); + recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), + e.getMessage(), olapTable.getId()); } break; } @@ -201,7 +207,8 @@ public class DynamicPartitionScheduler extends MasterDaemon { } // construct partition desc - PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed(Collections.singletonList(lowerValue), Collections.singletonList(upperValue)); + PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed(Collections.singletonList(lowerValue), + Collections.singletonList(upperValue)); HashMap partitionProperties = new HashMap<>(1); if (dynamicPartitionProperty.getReplicaAllocation().isNotSet()) { partitionProperties.put(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, @@ -216,8 +223,9 @@ public class DynamicPartitionScheduler extends MasterDaemon { setStorageMediumProperty(partitionProperties, dynamicPartitionProperty, now, hotPartitionNum, idx); } - String partitionName = dynamicPartitionProperty.getPrefix() + DynamicPartitionUtil.getFormattedPartitionName( - dynamicPartitionProperty.getTimeZone(), prevBorder, dynamicPartitionProperty.getTimeUnit()); + String partitionName = dynamicPartitionProperty.getPrefix() + + DynamicPartitionUtil.getFormattedPartitionName(dynamicPartitionProperty.getTimeZone(), + prevBorder, dynamicPartitionProperty.getTimeUnit()); SinglePartitionDesc rangePartitionDesc = new SinglePartitionDesc(true, partitionName, partitionKeyDesc, partitionProperties); @@ -239,8 +247,8 @@ public class DynamicPartitionScheduler extends MasterDaemon { return addPartitionClauses; } - private void setStorageMediumProperty(HashMap partitionProperties, DynamicPartitionProperty property, - ZonedDateTime now, int hotPartitionNum, int offset) { + private void setStorageMediumProperty(HashMap partitionProperties, + DynamicPartitionProperty property, ZonedDateTime now, int hotPartitionNum, int offset) { if (offset + hotPartitionNum <= 0) { return; } @@ -250,14 +258,16 @@ public class DynamicPartitionScheduler extends MasterDaemon { partitionProperties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, cooldownTime); } - private Range getClosedRange(Database db, OlapTable olapTable, Column partitionColumn, String partitionFormat, - String lowerBorderOfReservedHistory, String upperBorderOfReservedHistory) { + private Range getClosedRange(Database db, OlapTable olapTable, Column partitionColumn, + String partitionFormat, String lowerBorderOfReservedHistory, String upperBorderOfReservedHistory) { Range reservedHistoryPartitionKeyRange = null; PartitionValue lowerBorderPartitionValue = new PartitionValue(lowerBorderOfReservedHistory); PartitionValue upperBorderPartitionValue = new PartitionValue(upperBorderOfReservedHistory); try { - PartitionKey lowerBorderBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerBorderPartitionValue), Collections.singletonList(partitionColumn)); - PartitionKey upperBorderBound = PartitionKey.createPartitionKey(Collections.singletonList(upperBorderPartitionValue), Collections.singletonList(partitionColumn)); + PartitionKey lowerBorderBound = PartitionKey.createPartitionKey( + Collections.singletonList(lowerBorderPartitionValue), Collections.singletonList(partitionColumn)); + PartitionKey upperBorderBound = PartitionKey.createPartitionKey( + Collections.singletonList(upperBorderPartitionValue), Collections.singletonList(partitionColumn)); reservedHistoryPartitionKeyRange = Range.closed(lowerBorderBound, upperBorderBound); } catch (AnalysisException e) { // AnalysisException: keys.size is always equal to column.size, cannot reach this exception @@ -272,7 +282,8 @@ public class DynamicPartitionScheduler extends MasterDaemon { * 1. get the range of [start, 0) as a reserved range. * 2. get DropPartitionClause of partitions which range are before this reserved range. */ - private ArrayList getDropPartitionClause(Database db, OlapTable olapTable, Column partitionColumn, String partitionFormat) throws DdlException { + private ArrayList getDropPartitionClause(Database db, OlapTable olapTable, + Column partitionColumn, String partitionFormat) throws DdlException { ArrayList dropPartitionClauses = new ArrayList<>(); DynamicPartitionProperty dynamicPartitionProperty = olapTable.getTableProperty().getDynamicPartitionProperty(); if (dynamicPartitionProperty.getStart() == DynamicPartitionProperty.MIN_START_OFFSET) { @@ -290,8 +301,10 @@ public class DynamicPartitionScheduler extends MasterDaemon { List> reservedHistoryPartitionKeyRangeList = new ArrayList>(); Range reservePartitionKeyRange; try { - PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerPartitionValue), Collections.singletonList(partitionColumn)); - PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperPartitionValue), Collections.singletonList(partitionColumn)); + PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerPartitionValue), + Collections.singletonList(partitionColumn)); + PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperPartitionValue), + Collections.singletonList(partitionColumn)); reservePartitionKeyRange = Range.closedOpen(lowerBound, upperBound); reservedHistoryPartitionKeyRangeList.add(reservePartitionKeyRange); } catch (AnalysisException | IllegalArgumentException e) { @@ -303,14 +316,19 @@ public class DynamicPartitionScheduler extends MasterDaemon { } String reservedHistoryPeriods = dynamicPartitionProperty.getReservedHistoryPeriods(); - List ranges = DynamicPartitionUtil.convertStringToPeriodsList(reservedHistoryPeriods, dynamicPartitionProperty.getTimeUnit()); + List ranges = DynamicPartitionUtil.convertStringToPeriodsList(reservedHistoryPeriods, + dynamicPartitionProperty.getTimeUnit()); if (ranges.size() != 0) { for (Range range : ranges) { try { - String lowerBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(dynamicPartitionProperty, range.lowerEndpoint().toString(), partitionFormat); - String upperBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(dynamicPartitionProperty, range.upperEndpoint().toString(), partitionFormat); - Range reservedHistoryPartitionKeyRange = getClosedRange(db, olapTable, partitionColumn, partitionFormat, lowerBorderOfReservedHistory, upperBorderOfReservedHistory); + String lowerBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString( + dynamicPartitionProperty, range.lowerEndpoint().toString(), partitionFormat); + String upperBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString( + dynamicPartitionProperty, range.upperEndpoint().toString(), partitionFormat); + Range reservedHistoryPartitionKeyRange + = getClosedRange(db, olapTable, partitionColumn, partitionFormat, + lowerBorderOfReservedHistory, upperBorderOfReservedHistory); reservedHistoryPartitionKeyRangeList.add(reservedHistoryPartitionKeyRange); } catch (IllegalArgumentException e) { return dropPartitionClauses; @@ -395,7 +413,8 @@ public class DynamicPartitionScheduler extends MasterDaemon { try { partitionFormat = DynamicPartitionUtil.getPartitionFormat(partitionColumn); } catch (Exception e) { - recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), e.getMessage(), olapTable.getId()); + recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), + e.getMessage(), olapTable.getId()); continue; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java index 51fd8bd930..856c3836e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java @@ -49,7 +49,8 @@ public class MovesCacheMap { private final Table> cacheMap = HashBasedTable.create(); private long lastExpireConfig = -1L; - // TabletId -> Pair, 'ToDeleteReplicaId == -1' means this move haven't been scheduled successfully. + // TabletId -> Pair, 'ToDeleteReplicaId == -1' + // means this move haven't been scheduled successfully. public static class MovesCache { Cache> cache; @@ -65,7 +66,8 @@ public class MovesCacheMap { // Cyclical update the cache mapping, cuz the cluster may be deleted, we should delete the corresponding cache too. public void updateMapping(Table statisticMap, long expireAfterAccessSecond) { if (expireAfterAccessSecond > 0 && lastExpireConfig != expireAfterAccessSecond) { - LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.", lastExpireConfig, expireAfterAccessSecond); + LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.", + lastExpireConfig, expireAfterAccessSecond); cacheMap.clear(); lastExpireConfig = expireAfterAccessSecond; } @@ -78,7 +80,8 @@ public class MovesCacheMap { .collect(Collectors.toList()); for (Table.Cell cell : toAdd) { Map newCacheMap = Maps.newHashMap(); - Arrays.stream(TStorageMedium.values()).forEach(m -> newCacheMap.put(m, new MovesCache(expireAfterAccessSecond, TimeUnit.SECONDS))); + Arrays.stream(TStorageMedium.values()) + .forEach(m -> newCacheMap.put(m, new MovesCache(expireAfterAccessSecond, TimeUnit.SECONDS))); this.cacheMap.put(cell.getRowKey(), cell.getColumnKey(), newCacheMap); } } @@ -114,7 +117,8 @@ public class MovesCacheMap { } public long size() { - return cacheMap.values().stream().mapToLong(maps -> maps.values().stream().mapToLong(map -> map.get().size()).sum()).sum(); + return cacheMap.values().stream().mapToLong( + maps -> maps.values().stream().mapToLong(map -> map.get().size()).sum()).sum(); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java index 406bf53bb5..b43af45c29 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java @@ -73,7 +73,8 @@ public class PartitionRebalancer extends Rebalancer { ClusterLoadStatistic clusterStat, TStorageMedium medium) { String clusterName = clusterStat.getClusterName(); MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache(clusterName, clusterStat.getTag(), medium); - Preconditions.checkNotNull(movesInProgress, "clusterStat is got from statisticMap, movesCacheMap should have the same entry"); + Preconditions.checkNotNull(movesInProgress, + "clusterStat is got from statisticMap, movesCacheMap should have the same entry"); // Iterating through Cache.asMap().values() does not reset access time for the entries you retrieve. List movesInProgressList = movesInProgress.get().asMap().values() @@ -111,7 +112,8 @@ public class PartitionRebalancer extends Rebalancer { LOG.debug("Cluster {}-{}: peek max skew {}, assume {} in-progress moves are succeeded {}", clusterName, medium, skews.isEmpty() ? 0 : skews.last(), movesInProgressList.size(), movesInProgressList); - List moves = algo.getNextMoves(clusterBalanceInfo, Config.partition_rebalance_max_moves_num_per_selection); + List moves + = algo.getNextMoves(clusterBalanceInfo, Config.partition_rebalance_max_moves_num_per_selection); List alternativeTablets = Lists.newArrayList(); List inProgressIds = movesInProgressList.stream().map(m -> m.tabletId).collect(Collectors.toList()); @@ -154,7 +156,8 @@ public class PartitionRebalancer extends Rebalancer { tabletCtx.setOrigPriority(TabletSchedCtx.Priority.LOW); alternativeTablets.add(tabletCtx); // Pair, ToDeleteReplicaId should be -1L before scheduled successfully - movesInProgress.get().put(pickedTabletId, new Pair<>(new TabletMove(pickedTabletId, move.fromBe, move.toBe), -1L)); + movesInProgress.get().put(pickedTabletId, + new Pair<>(new TabletMove(pickedTabletId, move.fromBe, move.toBe), -1L)); counterBalanceMoveCreated.incrementAndGet(); // Synchronize with movesInProgress inProgressIds.add(pickedTabletId); @@ -172,7 +175,7 @@ public class PartitionRebalancer extends Rebalancer { } private boolean buildClusterInfo(ClusterLoadStatistic clusterStat, TStorageMedium medium, - List movesInProgress, ClusterBalanceInfo info, List toDeleteKeys) { + List movesInProgress, ClusterBalanceInfo info, List toDeleteKeys) { Preconditions.checkState(info.beByTotalReplicaCount.isEmpty() && info.partitionInfoBySkew.isEmpty(), ""); // If we wanna modify the PartitionBalanceInfo in info.beByTotalReplicaCount, deep-copy it @@ -180,7 +183,8 @@ public class PartitionRebalancer extends Rebalancer { info.partitionInfoBySkew.putAll(clusterStat.getSkewMap(medium)); // Skip the toDeleteKeys - List filteredMoves = movesInProgress.stream().filter(m -> !toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList()); + List filteredMoves = movesInProgress.stream() + .filter(m -> !toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList()); for (TabletMove move : filteredMoves) { TabletMeta meta = invertedIndex.getTabletMeta(move.tabletId); @@ -190,8 +194,11 @@ public class PartitionRebalancer extends Rebalancer { continue; } - TwoDimensionalGreedyRebalanceAlgo.PartitionMove partitionMove = new TwoDimensionalGreedyRebalanceAlgo.PartitionMove(meta.getPartitionId(), meta.getIndexId(), move.fromBe, move.toBe); - boolean st = TwoDimensionalGreedyRebalanceAlgo.applyMove(partitionMove, info.beByTotalReplicaCount, info.partitionInfoBySkew); + TwoDimensionalGreedyRebalanceAlgo.PartitionMove partitionMove + = new TwoDimensionalGreedyRebalanceAlgo.PartitionMove( + meta.getPartitionId(), meta.getIndexId(), move.fromBe, move.toBe); + boolean st = TwoDimensionalGreedyRebalanceAlgo.applyMove( + partitionMove, info.beByTotalReplicaCount, info.partitionInfoBySkew); if (!st) { // Can't apply this move, mark it failed, continue to apply the next. toDeleteKeys.add(move.tabletId); @@ -208,7 +215,8 @@ public class PartitionRebalancer extends Rebalancer { if (moveIsComplete) { toDeleteKeys.add(move.tabletId); LOG.debug("Move {} is completed. The cur dist: {}", move, - invertedIndex.getReplicasByTabletId(move.tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList())); + invertedIndex.getReplicasByTabletId(move.tabletId).stream() + .map(Replica::getBackendId).collect(Collectors.toList())); counterBalanceMoveSucceeded.incrementAndGet(); } } @@ -217,15 +225,18 @@ public class PartitionRebalancer extends Rebalancer { // Move completed: fromBe doesn't have a replica and toBe has a replica private boolean checkMoveCompleted(TabletMove move) { Long tabletId = move.tabletId; - List bes = invertedIndex.getReplicasByTabletId(tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList()); + List bes = invertedIndex.getReplicasByTabletId(tabletId).stream() + .map(Replica::getBackendId).collect(Collectors.toList()); return !bes.contains(move.fromBe) && bes.contains(move.toBe); } @Override protected void completeSchedCtx(TabletSchedCtx tabletCtx, Map backendsWorkingSlots) throws SchedException { - MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache(tabletCtx.getCluster(), tabletCtx.getTag(), tabletCtx.getStorageMedium()); - Preconditions.checkNotNull(movesInProgress, "clusterStat is got from statisticMap, movesInProgressMap should have the same entry"); + MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache( + tabletCtx.getCluster(), tabletCtx.getTag(), tabletCtx.getStorageMedium()); + Preconditions.checkNotNull(movesInProgress, + "clusterStat is got from statisticMap, movesInProgressMap should have the same entry"); try { Pair pair = movesInProgress.get().getIfPresent(tabletCtx.getTabletId()); @@ -242,7 +253,8 @@ public class PartitionRebalancer extends Rebalancer { if (slot.takeBalanceSlot(srcReplica.getPathHash()) != -1) { tabletCtx.setSrc(srcReplica); } else { - throw new SchedException(SchedException.Status.SCHEDULE_FAILED, "no slot for src replica " + srcReplica + ", pathHash " + srcReplica.getPathHash()); + throw new SchedException(SchedException.Status.SCHEDULE_FAILED, + "no slot for src replica " + srcReplica + ", pathHash " + srcReplica.getPathHash()); } // Choose a path in destination @@ -259,7 +271,8 @@ public class PartitionRebalancer extends Rebalancer { .map(RootPathLoadStatistic::getPathHash).collect(Collectors.toSet()); long pathHash = slot.takeAnAvailBalanceSlotFrom(availPath); if (pathHash == -1) { - throw new SchedException(SchedException.Status.SCHEDULE_FAILED, "paths has no available balance slot: " + availPath); + throw new SchedException(SchedException.Status.SCHEDULE_FAILED, + "paths has no available balance slot: " + availPath); } else { tabletCtx.setDest(beStat.getBeId(), pathHash); } @@ -280,7 +293,8 @@ public class PartitionRebalancer extends Rebalancer { private void checkMoveValidation(TabletMove move) throws IllegalStateException { boolean fromAvailable = infoService.checkBackendScheduleAvailable(move.fromBe); boolean toAvailable = infoService.checkBackendScheduleAvailable(move.toBe); - Preconditions.checkState(fromAvailable && toAvailable, move + "'s bes are not all available: from " + fromAvailable + ", to " + toAvailable); + Preconditions.checkState(fromAvailable && toAvailable, + move + "'s bes are not all available: from " + fromAvailable + ", to " + toAvailable); // To be improved } @@ -333,7 +347,8 @@ public class PartitionRebalancer extends Rebalancer { // Balance information for a cluster(one medium), excluding decommissioned/dead bes and replicas on them. // Natural ordering, so the last key is the max key. public static class ClusterBalanceInfo { - TreeMultimap partitionInfoBySkew = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); + TreeMultimap partitionInfoBySkew + = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); TreeMultimap beByTotalReplicaCount = TreeMultimap.create(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java index ef7ae27995..09aea8e68a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java @@ -42,8 +42,9 @@ import java.util.Map; * 3. getToDeleteReplicaId: if the rebalance strategy wants to delete the specified replica, * override this func to let TabletScheduler know in handling redundant replica. * NOTICE: - * 1. Adding the selected tablets by TabletScheduler may not succeed at all. And the move may be failed in some other places. - * So the thing you need to know is, Rebalancer cannot know when the move is failed. + * 1. Adding the selected tablets by TabletScheduler may not succeed at all. + * And the move may be failed in some other places. So the thing you need to know is, + * Rebalancer cannot know when the move is failed. * 2. If you want to make sure the move is succeed, you can assume that it's succeed when getToDeleteReplicaId called. */ public abstract class Rebalancer { diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java index b56d9bc9a5..6bb75a8333 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java @@ -167,7 +167,8 @@ public class TabletChecker extends MasterDaemon { } // we also need to change the priority of tablets which are already in - tabletScheduler.changeTabletsPriorityToVeryHigh(repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); + tabletScheduler.changeTabletsPriorityToVeryHigh( + repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); } private void removePrios(RepairTabletInfo repairTabletInfo) { @@ -333,7 +334,7 @@ public class TabletChecker extends MasterDaemon { } private LoopControlStatus handlePartitionTablet(Database db, OlapTable tbl, Partition partition, boolean isInPrios, - List aliveBeIdsInCluster, long startTime, CheckerCounter counter) { + List aliveBeIdsInCluster, long startTime, CheckerCounter counter) { if (partition.getState() != PartitionState.NORMAL) { // when alter job is in FINISHING state, partition state will be set to NORMAL, // and we can schedule the tablets in it. @@ -478,9 +479,11 @@ public class TabletChecker extends MasterDaemon { * when being scheduled. */ public void repairTable(AdminRepairTableStmt stmt) throws DdlException { - RepairTabletInfo repairTabletInfo = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions()); + RepairTabletInfo repairTabletInfo = getRepairTabletInfo( + stmt.getDbName(), stmt.getTblName(), stmt.getPartitions()); addPrios(repairTabletInfo, stmt.getTimeoutS() * 1000); - LOG.info("repair database: {}, table: {}, partition: {}", repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); + LOG.info("repair database: {}, table: {}, partition: {}", + repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); } /* @@ -488,9 +491,11 @@ public class TabletChecker extends MasterDaemon { * This operation will remove the specified partitions from 'prios' */ public void cancelRepairTable(AdminCancelRepairTableStmt stmt) throws DdlException { - RepairTabletInfo repairTabletInfo = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions()); + RepairTabletInfo repairTabletInfo + = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions()); removePrios(repairTabletInfo); - LOG.info("cancel repair database: {}, table: {}, partition: {}", repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); + LOG.info("cancel repair database: {}, table: {}, partition: {}", + repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds); } public int getPrioPartitionNum() { @@ -520,7 +525,8 @@ public class TabletChecker extends MasterDaemon { return infos; } - public static RepairTabletInfo getRepairTabletInfo(String dbName, String tblName, List partitions) throws DdlException { + public static RepairTabletInfo getRepairTabletInfo(String dbName, String tblName, + List partitions) throws DdlException { Catalog catalog = Catalog.getCurrentCatalog(); Database db = catalog.getDbOrDdlException(dbName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java index d6e8eeeed9..daff5f1e06 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java @@ -685,7 +685,8 @@ public class TabletSchedCtx implements Comparable { // So if this replica was previously set to DECOMMISSION, this state needs to be reset to NORMAL. // It may happen as follows: // 1. A tablet of colocation table is in COLOCATION_REDUNDANT state - // 2. The tablet is being scheduled and set one of replica as DECOMMISSION in TabletScheduler.deleteReplicaInternal() + // 2. The tablet is being scheduled and set one of replica as + // DECOMMISSION in TabletScheduler.deleteReplicaInternal() // 3. The tablet will then be scheduled again // 4. But at that time, the BE node of the replica that was // set to the DECOMMISSION state in step 2 is returned to the colocation group. @@ -735,7 +736,8 @@ public class TabletSchedCtx implements Comparable { } if (storageMediaMigrationTask != null) { - AgentTaskQueue.removeTask(storageMediaMigrationTask.getBackendId(), TTaskType.STORAGE_MEDIUM_MIGRATE, storageMediaMigrationTask.getSignature()); + AgentTaskQueue.removeTask(storageMediaMigrationTask.getBackendId(), + TTaskType.STORAGE_MEDIUM_MIGRATE, storageMediaMigrationTask.getSignature()); } if (cloneTask != null) { AgentTaskQueue.removeTask(cloneTask.getBackendId(), TTaskType.CLONE, cloneTask.getSignature()); @@ -829,11 +831,13 @@ public class TabletSchedCtx implements Comparable { // That is, we may need to use 2 clone tasks to create a new replica. It is inefficient, // but there is no other way now. - // if this is a balance task, or this is a repair task with REPLICA_MISSING/REPLICA_RELOCATING or REPLICA_MISSING_IN_CLUSTER, + // if this is a balance task, or this is a repair task with + // REPLICA_MISSING/REPLICA_RELOCATING or REPLICA_MISSING_IN_CLUSTER, // we create a new replica with state CLONE if (tabletStatus == TabletStatus.REPLICA_MISSING || tabletStatus == TabletStatus.REPLICA_MISSING_IN_CLUSTER || tabletStatus == TabletStatus.REPLICA_RELOCATING || type == Type.BALANCE - || tabletStatus == TabletStatus.COLOCATE_MISMATCH || tabletStatus == TabletStatus.REPLICA_MISSING_FOR_TAG) { + || tabletStatus == TabletStatus.COLOCATE_MISMATCH + || tabletStatus == TabletStatus.REPLICA_MISSING_FOR_TAG) { Replica cloneReplica = new Replica( Catalog.getCurrentCatalog().getNextId(), destBackendId, -1 /* version */, schemaHash, @@ -920,9 +924,12 @@ public class TabletSchedCtx implements Comparable { } // 1. check the tablet status first - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new SchedException(Status.UNRECOVERABLE, "db " + dbId + " does not exist")); - OlapTable olapTable = (OlapTable) db.getTableOrException(tblId, s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletId + " does not exist")); - olapTable.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + olapTable.getName() + " does not exist")); + Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, + s -> new SchedException(Status.UNRECOVERABLE, "db " + dbId + " does not exist")); + OlapTable olapTable = (OlapTable) db.getTableOrException(tblId, + s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletId + " does not exist")); + olapTable.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + + olapTable.getName() + " does not exist")); try { Partition partition = olapTable.getPartition(partitionId); if (partition == null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index 166fdc17a4..8e35c5fa23 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -207,7 +207,8 @@ public class TabletScheduler extends MasterDaemon { // add new backends for (Backend be : backends.values()) { if (!backendsWorkingSlots.containsKey(be.getId())) { - List pathHashes = be.getDisks().values().stream().map(DiskInfo::getPathHash).collect(Collectors.toList()); + List pathHashes = be.getDisks().values().stream() + .map(DiskInfo::getPathHash).collect(Collectors.toList()); PathSlot slot = new PathSlot(pathHashes, Config.schedule_slot_num_per_path); backendsWorkingSlots.put(be.getId(), slot); LOG.info("add new backend {} with slots num: {}", be.getId(), be.getDisks().size()); @@ -287,7 +288,8 @@ public class TabletScheduler extends MasterDaemon { * 3. priority may be upgraded if it is not being schedule for a long time. * 4. every pending task should has a max scheduled time, if schedule fails too many times, if should be removed. * 5. every running task should has a timeout, to avoid running forever. - * 6. every running task should also has a max failure time, if clone task fails too many times, if should be removed. + * 6. every running task should also has a max failure time, + * if clone task fails too many times, if should be removed. * */ @Override @@ -489,7 +491,8 @@ public class TabletScheduler extends MasterDaemon { s -> new SchedException(Status.UNRECOVERABLE, "db " + tabletCtx.getDbId() + " does not exist")); OlapTable tbl = (OlapTable) db.getTableOrException(tabletCtx.getTblId(), s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletCtx.getTblId() + " does not exist")); - tbl.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + tbl.getName() + " does not exist")); + tbl.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + + tbl.getName() + " does not exist")); try { boolean isColocateTable = colocateTableIndex.isColocateTable(tbl.getId()); @@ -543,7 +546,8 @@ public class TabletScheduler extends MasterDaemon { if (tabletCtx.getType() == TabletSchedCtx.Type.BALANCE) { try { - DatabaseTransactionMgr dbTransactionMgr = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(db.getId()); + DatabaseTransactionMgr dbTransactionMgr + = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(db.getId()); for (TransactionState transactionState : dbTransactionMgr.getPreCommittedTxnList()) { if (transactionState.getTableIdList().contains(tbl.getId())) { // If table releate to transaction with precommitted status, do not allow to do balance. @@ -623,7 +627,8 @@ public class TabletScheduler extends MasterDaemon { handleReplicaMissing(tabletCtx, batchTask); break; case VERSION_INCOMPLETE: - case NEED_FURTHER_REPAIR: // same as version incomplete, it prefer to the dest replica which need further repair + case NEED_FURTHER_REPAIR: + // same as version incomplete, it prefers to the dest replica which need further repair handleReplicaVersionIncomplete(tabletCtx, batchTask); break; case REPLICA_RELOCATING: @@ -743,7 +748,8 @@ public class TabletScheduler extends MasterDaemon { tabletCtx.chooseDestReplicaForVersionIncomplete(backendsWorkingSlots); } catch (SchedException e) { if (e.getMessage().equals("unable to choose dest replica")) { - // This situation may occur when the BE nodes where all replicas of a tablet are located are decommission, + // This situation may occur when the BE nodes + // where all replicas of a tablet are located are decommission, // and this task is a VERSION_INCOMPLETE task. // This will lead to failure to select a suitable dest replica. // At this time, we try to convert this task to a REPLICA_MISSING task, and schedule it again. @@ -752,7 +758,8 @@ public class TabletScheduler extends MasterDaemon { tabletCtx.releaseResource(this, true); tabletCtx.setTabletStatus(TabletStatus.REPLICA_MISSING); handleReplicaMissing(tabletCtx, batchTask); - LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}", tabletCtx.getTabletId()); + LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}", + tabletCtx.getTabletId()); return; } else { throw e; @@ -1085,26 +1092,30 @@ public class TabletScheduler extends MasterDaemon { if (chosenReplica != null && !chosenReplica.equals(minReplica) && minReplica.isAlive() && !minReplica.tooSlow() && normalReplicaCount >= 1) { chosenReplica.setState(ReplicaState.COMPACTION_TOO_SLOW); - LOG.info("set replica id :{} tablet id: {}, backend id: {} to COMPACTION_TOO_SLOW", chosenReplica.getId() - , tabletCtx.getTablet() - .getId(), chosenReplica.getBackendId()); + LOG.info("set replica id :{} tablet id: {}, backend id: {} to COMPACTION_TOO_SLOW", + chosenReplica.getId(), tabletCtx.getTablet().getId(), chosenReplica.getBackendId()); throw new SchedException(Status.FINISHED, "set replica to COMPACTION_TOO_SLOW"); } throw new SchedException(Status.FINISHED, "No replica too slow"); } - private void deleteReplicaInternal(TabletSchedCtx tabletCtx, Replica replica, String reason, boolean force) throws SchedException { + private void deleteReplicaInternal(TabletSchedCtx tabletCtx, + Replica replica, String reason, boolean force) throws SchedException { /* - * Before deleting a replica, we should make sure that there is no running txn on it and no more txns will be on it. + * Before deleting a replica, we should make sure that + * there is no running txn on it and no more txns will be on it. * So we do followings: - * 1. If replica is loadable, set a watermark txn id on it and set it state as DECOMMISSION, but not deleting it this time. + * 1. If replica is loadable, set a watermark txn id on it and set it state as DECOMMISSION, + * but not deleting it this time. * The DECOMMISSION state will ensure that no more txns will be on this replicas. - * 2. Wait for any txns before the watermark txn id to be finished. If all are finished, which means this replica is + * 2. Wait for any txns before the watermark txn id to be finished. + * If all are finished, which means this replica is * safe to be deleted. */ if (!force && !Config.enable_force_drop_redundant_replica && replica.getState().canLoad() && replica.getWatermarkTxnId() == -1 && !FeConstants.runningUnitTest) { - long nextTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); + long nextTxnId = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionIDGenerator().getNextTransactionId(); replica.setWatermarkTxnId(nextTxnId); replica.setState(ReplicaState.DECOMMISSION); // set priority to normal because it may wait for a long time. Remain it as VERY_HIGH may block other task. @@ -1176,7 +1187,8 @@ public class TabletScheduler extends MasterDaemon { /** * Missing for tag, which means some of replicas of this tablet are allocated in wrong backend with specified tag. - * Treat it as replica missing, and in handleReplicaMissing(), it will find a property backend to create new replica. + * Treat it as replica missing, and in handleReplicaMissing(), + * it will find a property backend to create new replica. */ private void handleReplicaMissingForTag(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) throws SchedException { @@ -1329,7 +1341,8 @@ public class TabletScheduler extends MasterDaemon { // This is to solve, when we decommission some BEs with SSD disks, // if there are no SSD disks on the remaining BEs, it will be impossible to select a // suitable destination path. - // In this case, we need to ignore the storage medium property and try to select the destination path again. + // In this case, we need to ignore the storage medium property + // and try to select the destination path again. // Set `isSupplement` to true will ignore the storage medium property. st = bes.isFit(tabletCtx.getTabletSize(), tabletCtx.getStorageMedium(), resultPaths, true); @@ -1463,6 +1476,7 @@ public class TabletScheduler extends MasterDaemon { finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.FINISHED, Status.FINISHED, "finished"); return true; } + /** * return true if we want to remove the clone task from AgentTaskQueue */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java index 6dcb401f43..b589b3f157 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java @@ -35,11 +35,13 @@ import java.util.Random; import java.util.Set; import java.util.stream.Collectors; -/* - * A two-dimensional greedy rebalancing algorithm. The two dims are cluster and partition. It'll generate multiple `PartitionMove`, - * only decide which partition to move, fromBe, toBe. The next step is to select a tablet to move. +/** + * A two-dimensional greedy rebalancing algorithm. The two dims are cluster and partition. + * It'll generate multiple `PartitionMove`, only decide which partition to move, fromBe, toBe. + * The next step is to select a tablet to move. * - * From among moves that decrease the skew of a most skewed partition, it prefers ones that reduce the skew of the cluster. + *

From among moves that decrease the skew of a most skewed partition, + * it prefers ones that reduce the skew of the cluster. * A cluster is considered balanced when the skew of every partition is <= 1 and the skew of the cluster is <= 1. * The skew of the cluster is defined as the difference between the maximum total replica count over all bes and the * minimum total replica count over all bes. @@ -194,18 +196,24 @@ public class TwoDimensionalGreedyRebalanceAlgo { Long minReplicaCount = pbi.beByReplicaCount.keySet().first(); Long maxReplicaCount = pbi.beByReplicaCount.keySet().last(); - LOG.debug("balancing partition {}-{} with replica count skew {} (min_replica_count: {}, max_replica_count: {})", + LOG.debug("balancing partition {}-{} with replica count skew {}" + + " (min_replica_count: {}, max_replica_count: {})", pbi.partitionId, pbi.indexId, maxPartitionSkew, minReplicaCount, maxReplicaCount); // Compute the intersection of the bes most loaded for the table // with the bes most loaded overall, and likewise for least loaded. // These are our ideal candidates for moving from and to, respectively. - IntersectionResult maxLoaded = getIntersection(ExtremumType.MAX, pbi.beByReplicaCount, beByTotalReplicaCount); - IntersectionResult minLoaded = getIntersection(ExtremumType.MIN, pbi.beByReplicaCount, beByTotalReplicaCount); - LOG.debug("partition-wise: min_count: {}, max_count: {}", minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition); - LOG.debug("cluster-wise: min_count: {}, max_count: {}", minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal); - LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}", minLoaded.intersection.toString(), maxLoaded.intersection.toString()); + IntersectionResult maxLoaded = getIntersection(ExtremumType.MAX, + pbi.beByReplicaCount, beByTotalReplicaCount); + IntersectionResult minLoaded = getIntersection(ExtremumType.MIN, + pbi.beByReplicaCount, beByTotalReplicaCount); + LOG.debug("partition-wise: min_count: {}, max_count: {}", + minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition); + LOG.debug("cluster-wise: min_count: {}, max_count: {}", + minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal); + LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}", + minLoaded.intersection.toString(), maxLoaded.intersection.toString()); // Do not move replicas of a balanced table if the least (most) loaded // servers overall do not intersect the servers hosting the least (most) @@ -220,8 +228,10 @@ public class TwoDimensionalGreedyRebalanceAlgo { Long maxLoadedBe; if (equalSkewOption == EqualSkewOption.PICK_FIRST) { // beWithExtremumCount lists & intersection lists are natural ordering - minLoadedBe = minLoaded.intersection.isEmpty() ? minLoaded.beWithExtremumCount.get(0) : minLoaded.intersection.get(0); - maxLoadedBe = maxLoaded.intersection.isEmpty() ? maxLoaded.beWithExtremumCount.get(0) : maxLoaded.intersection.get(0); + minLoadedBe = minLoaded.intersection.isEmpty() + ? minLoaded.beWithExtremumCount.get(0) : minLoaded.intersection.get(0); + maxLoadedBe = maxLoaded.intersection.isEmpty() + ? maxLoaded.beWithExtremumCount.get(0) : maxLoaded.intersection.get(0); } else { minLoadedBe = minLoaded.intersection.isEmpty() ? getRandomListElement(minLoaded.beWithExtremumCount) : getRandomListElement(minLoaded.intersection); @@ -247,8 +257,8 @@ public class TwoDimensionalGreedyRebalanceAlgo { return items.get(rand.nextInt(items.size())); } - public static IntersectionResult getIntersection(ExtremumType extremumType, TreeMultimap beByReplicaCount, - TreeMultimap beByTotalReplicaCount) { + public static IntersectionResult getIntersection(ExtremumType extremumType, + TreeMultimap beByReplicaCount, TreeMultimap beByTotalReplicaCount) { Pair> beSelectedByPartition = getMinMaxLoadedServers(beByReplicaCount, extremumType); Pair> beSelectedByTotal = getMinMaxLoadedServers(beByTotalReplicaCount, extremumType); Preconditions.checkNotNull(beSelectedByPartition); @@ -258,11 +268,13 @@ public class TwoDimensionalGreedyRebalanceAlgo { res.replicaCountPartition = beSelectedByPartition.first; res.replicaCountTotal = beSelectedByTotal.first; res.beWithExtremumCount = Lists.newArrayList(beSelectedByPartition.second); - res.intersection = Lists.newArrayList(Sets.intersection(beSelectedByPartition.second, beSelectedByTotal.second)); + res.intersection = Lists.newArrayList( + Sets.intersection(beSelectedByPartition.second, beSelectedByTotal.second)); return res; } - private static Pair> getMinMaxLoadedServers(TreeMultimap multimap, ExtremumType extremumType) { + private static Pair> getMinMaxLoadedServers( + TreeMultimap multimap, ExtremumType extremumType) { if (multimap.isEmpty()) { return null; } @@ -270,8 +282,10 @@ public class TwoDimensionalGreedyRebalanceAlgo { return new Pair<>(count, multimap.get(count)); } - // Update the balance state in 'ClusterBalanceInfo'(the two maps) with the outcome of the move 'move'. - // To support apply in-progress moves to current cluster balance info, if apply failed, the maps should not be modified. + /** Update the balance state in 'ClusterBalanceInfo'(the two maps) with the outcome of the move 'move'. + * To support apply in-progress moves to current cluster balance info, + * if apply failed, the maps should not be modified. + */ public static boolean applyMove(PartitionMove move, TreeMultimap beByTotalReplicaCount, TreeMultimap skewMap) { // Update the total counts @@ -282,8 +296,9 @@ public class TwoDimensionalGreedyRebalanceAlgo { Long skew = -1L; for (Long key : skewMap.keySet()) { NavigableSet pbiSet = skewMap.get(key); - List pbis = pbiSet.stream().filter(info -> - info.partitionId.equals(move.partitionId) && info.indexId.equals(move.indexId)).collect(Collectors.toList()); + List pbis = pbiSet.stream() + .filter(info -> info.partitionId.equals(move.partitionId) && info.indexId.equals(move.indexId)) + .collect(Collectors.toList()); Preconditions.checkState(pbis.size() <= 1, "skew map has dup partition info"); if (pbis.size() == 1) { partitionBalanceInfo = pbis.get(0); @@ -301,7 +316,8 @@ public class TwoDimensionalGreedyRebalanceAlgo { long maxCount = newInfo.beByReplicaCount.keySet().last(); skewMap.put(maxCount - minCount, newInfo); } catch (IllegalStateException e) { - // If touch IllegalState, the skew map doesn't be modified, so we should rollback the move of beByTotalReplicaCount + // If touch IllegalState, the skew map doesn't be modified, + // so we should rollback the move of beByTotalReplicaCount moveOneReplica(move.toBe, move.fromBe, beByTotalReplicaCount); LOG.info("{} apply failed, {}", move, e.getMessage()); return false; @@ -316,7 +332,8 @@ public class TwoDimensionalGreedyRebalanceAlgo { // Applies to 'm' a move of a replica from the be with id 'src' to the be with id 'dst' by decrementing // the count of 'src' and incrementing the count of 'dst'. // If check failed, won't modify the map. - private static void moveOneReplica(Long fromBe, Long toBe, TreeMultimap m) throws IllegalStateException { + private static void moveOneReplica(Long fromBe, Long toBe, + TreeMultimap m) throws IllegalStateException { boolean foundSrc = false; boolean foundDst = false; Long countSrc = 0L; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java index 4cef7be025..e927b621d9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java @@ -40,6 +40,7 @@ public class CIDR { // Count the number of 1-bits in a 32-bit integer private static ImmutableMap maskBitNumMap; + static { ImmutableMap.Builder builder = ImmutableMap.builder(); builder.put(0, 0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java b/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java index 2d92e24948..02857f42d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; public class CheckedMath { - private final static Logger LOG = LogManager.getLogger(CheckedMath.class); + private static final Logger LOG = LogManager.getLogger(CheckedMath.class); /** * Computes and returns the multiply of two longs. If an overflow occurs, diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java index e5f109686d..2b473d7ac0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java @@ -156,7 +156,8 @@ public class Config extends ConfigBase { public static int history_job_keep_max_second = 7 * 24 * 3600; // 7 days /** - * the transaction will be cleaned after transaction_clean_interval_second seconds if the transaction is visible or aborted + * the transaction will be cleaned after transaction_clean_interval_second seconds + * if the transaction is visible or aborted * we should make this interval as short as possible and each clean cycle as soon as possible */ @ConfField @@ -729,7 +730,8 @@ public class Config extends ConfigBase { public static int max_running_txn_num_per_db = 100; /** - * This configuration is just for compatible with old version, this config has been replaced by async_loading_load_task_pool_size, + * This configuration is just for compatible with old version, + * this config has been replaced by async_loading_load_task_pool_size, * it will be removed in the future. */ @ConfField(mutable = false, masterOnly = true) @@ -1071,7 +1073,8 @@ public class Config extends ConfigBase { public static boolean disable_load_job = false; /* - * One master daemon thread will update database used data quota for db txn manager every db_used_data_quota_update_interval_secs + * One master daemon thread will update database used data quota for db txn manager + * every db_used_data_quota_update_interval_secs */ @ConfField(mutable = false, masterOnly = true) public static int db_used_data_quota_update_interval_secs = 300; @@ -1486,7 +1489,8 @@ public class Config extends ConfigBase { public static int default_max_query_instances = -1; /* - * One master daemon thread will update global partition in memory info every partition_in_memory_update_interval_secs + * One master daemon thread will update global partition in memory + * info every partition_in_memory_update_interval_secs */ @ConfField(mutable = false, masterOnly = true) public static int partition_in_memory_update_interval_secs = 300; @@ -1538,7 +1542,8 @@ public class Config extends ConfigBase { * is to wait until the loading task finished before dropping them. * But the default strategy may takes very long time to handle these redundant replicas. * So we can set this config to true to not wait any loading task. - * Set this config to true may cause loading task failed, but will speed up the process of tablet balance and repair. + * Set this config to true may cause loading task failed, but will + * speed up the process of tablet balance and repair. */ @ConfField(mutable = true, masterOnly = true) public static boolean enable_force_drop_redundant_replica = false; @@ -1551,7 +1556,8 @@ public class Config extends ConfigBase { /* * The relocation of a colocation group may involve a large number of tablets moving within the cluster. - * Therefore, we should use a more conservative strategy to avoid relocation of colocation groups as much as possible. + * Therefore, we should use a more conservative strategy to avoid relocation + * of colocation groups as much as possible. * Reloaction usually occurs after a BE node goes offline or goes down. * This parameter is used to delay the determination of BE node unavailability. * The default is 30 minutes, i.e., if a BE node recovers within 30 minutes, relocation of the colocation group @@ -1577,8 +1583,8 @@ public class Config extends ConfigBase { public static int min_version_count_indicate_replica_compaction_too_slow = 300; /** - * The valid ratio threshold of the difference between the version count of the slowest replica and the fastest replica. - * If repair_slow_replica is set to true, it is used to determine whether to repair the slowest replica + * The valid ratio threshold of the difference between the version count of the slowest replicaand the fastest + * replica. If repair_slow_replica is set to true, it is used to determine whether to repair the slowest replica */ @ConfField(mutable = true, masterOnly = true) public static double valid_version_count_delta_ratio_between_replicas = 0.5; @@ -1649,13 +1655,13 @@ public class Config extends ConfigBase { public static boolean enable_multi_catalog = false; // 1 min /** - * If set to TRUE, FE will: + * If set to TRUE, FE will: * 1. divide BE into high load and low load(no mid load) to force triggering tablet scheduling; * 2. ignore whether the cluster can be more balanced during tablet scheduling; * - * It's used to test the reliability in single replica case when tablet scheduling are frequent. + * It's used to test the reliability in single replica case when tablet scheduling are frequent. * Default is false. - */ + */ @ConfField(mutable = false, masterOnly = true) public static boolean be_rebalancer_fuzzy_test = false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java index 213e8f5907..4ae2e15220 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java @@ -289,7 +289,7 @@ public class ConfigBase { throw new IllegalArgumentException("type mismatch"); } - public synchronized static void setMutableConfig(String key, String value) throws DdlException { + public static synchronized void setMutableConfig(String key, String value) throws DdlException { Field field = confFields.get(key); if (field == null) { throw new DdlException("Config '" + key + "' does not exist"); @@ -312,7 +312,7 @@ public class ConfigBase { LOG.info("set config {} to {}", key, value); } - public synchronized static List> getConfigInfo(PatternMatcher matcher) { + public static synchronized List> getConfigInfo(PatternMatcher matcher) { return confFields.entrySet().stream().sorted(Map.Entry.comparingByKey()).flatMap(e -> { String confKey = e.getKey(); Field f = e.getValue(); @@ -332,7 +332,7 @@ public class ConfigBase { }).collect(Collectors.toList()); } - public synchronized static boolean checkIsMasterOnly(String key) { + public static synchronized boolean checkIsMasterOnly(String key) { Field f = confFields.get(key); if (f == null) { return false; @@ -343,7 +343,8 @@ public class ConfigBase { } // use synchronized to make sure only one thread modify this file - public synchronized static void persistConfig(Map customConf, boolean resetPersist) throws IOException { + public static synchronized void persistConfig(Map customConf, boolean resetPersist) + throws IOException { File file = new File(customConfFile); if (!file.exists()) { file.createNewFile(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java b/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java index eb96451cd0..2c7310ad0b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java @@ -21,6 +21,7 @@ public class DdlException extends UserException { public DdlException(String msg) { super(msg); } + public DdlException(String msg, ErrorCode mysqlErrorCode) { super(msg); setMysqlErrorCode(mysqlErrorCode); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java index 23c65b33b5..e2f55a34ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java @@ -39,7 +39,8 @@ public class ErrorReport { public static void reportAnalysisException(String pattern, Object... objs) throws AnalysisException { - throw new AnalysisException(reportCommon(pattern, ErrorCode.ERR_UNKNOWN_ERROR, objs), ErrorCode.ERR_UNKNOWN_ERROR); + throw new AnalysisException(reportCommon(pattern, ErrorCode.ERR_UNKNOWN_ERROR, objs), + ErrorCode.ERR_UNKNOWN_ERROR); } public static void reportAnalysisException(ErrorCode errorCode, Object... objs) diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java index 04de9901a4..ee52f10577 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java @@ -137,8 +137,9 @@ public class GenericPool { LOG.debug("before create socket hostname={} key.port={} timeoutMs={}", key.hostname, key.port, timeoutMs); } - TTransport transport = isNonBlockingIO ? new TFramedTransport(new TSocket(key.hostname, key.port, timeoutMs)) : - new TSocket(key.hostname, key.port, timeoutMs); + TTransport transport = isNonBlockingIO + ? new TFramedTransport(new TSocket(key.hostname, key.port, timeoutMs)) + : new TSocket(key.hostname, key.port, timeoutMs); transport.open(); TProtocol protocol = new TBinaryProtocol(transport); VALUE client = (VALUE) newInstance(className, protocol); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Id.java b/fe/fe-core/src/main/java/org/apache/doris/common/Id.java index 52187d6fb5..79372c52a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Id.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Id.java @@ -26,7 +26,7 @@ import java.util.ArrayList; * Integer ids that cannot accidentally be compared with ints. */ public class Id> { - static private int INVALID_ID = -1; + private static final int INVALID_ID = -1; protected final int id; public Id() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java index 6f025a396c..120a9a924e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java @@ -26,6 +26,8 @@ package org.apache.doris.common; */ public abstract class IdGenerator> { protected int nextId = 0; + public abstract IdType getNextId(); + public abstract IdType getMaxId(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java index 87105d56b9..7bf38fd786 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java @@ -37,6 +37,7 @@ import java.util.Map; public class Log4jConfig extends XmlConfiguration { private static final long serialVersionUID = 1L; + // CHECKSTYLE OFF private static String xmlConfTemplate = "\n" + "\n\n" + "\n" @@ -104,6 +105,7 @@ public class Log4jConfig extends XmlConfiguration { + " \n" + " \n" + ""; + // CHECKSTYLE ON private static StrSubstitutor strSub; private static String sysLogLevel; @@ -235,7 +237,7 @@ public class Log4jConfig extends XmlConfiguration { super(LoggerContext.getContext(), configSource); } - public synchronized static void initLogging(String dorisConfDir) throws IOException { + public static synchronized void initLogging(String dorisConfDir) throws IOException { sysLogLevel = Config.sys_log_level; verboseModules = Config.sys_log_verbose_modules; auditModules = Config.audit_log_modules; @@ -244,7 +246,7 @@ public class Log4jConfig extends XmlConfiguration { reconfig(); } - public synchronized static Tuple updateLogging( + public static synchronized Tuple updateLogging( String level, String[] verboseNames, String[] auditNames) throws IOException { boolean toReconfig = false; if (level != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java b/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java index 3894632d0b..5cb522ac7b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java @@ -42,7 +42,7 @@ public class Pair { this.second = second; } - static public Pair create(F first, S second) { + public static Pair create(F first, S second) { return new Pair(first, second); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java index bbabf26abf..4e5ae73b29 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java @@ -54,7 +54,8 @@ import java.util.concurrent.TimeUnit; * * All thread pool constructed by ThreadPoolManager will be added to the nameToThreadPoolMap, * so the thread pool name in fe must be unique. - * when all thread pools are constructed, ThreadPoolManager will register some metrics of all thread pool to MetricRepo, + * when all thread pools are constructed, + * ThreadPoolManager will register some metrics of all thread pool to MetricRepo, * so we can know the runtime state for all thread pool by prometheus metrics */ @@ -64,7 +65,7 @@ public class ThreadPoolManager { private static String[] poolMetricTypes = {"pool_size", "active_thread_num", "task_in_queue"}; - private final static long KEEP_ALIVE_TIME = 60L; + private static final long KEEP_ALIVE_TIME = 60L; public static void registerAllThreadPoolMetric() { for (Map.Entry entry : nameToThreadPoolMap.entrySet()) { @@ -75,7 +76,8 @@ public class ThreadPoolManager { public static void registerThreadPoolMetric(String poolName, ThreadPoolExecutor threadPool) { for (String poolMetricType : poolMetricTypes) { - GaugeMetric gauge = new GaugeMetric("thread_pool", MetricUnit.NOUNIT, "thread_pool statistics") { + GaugeMetric gauge = new GaugeMetric( + "thread_pool", MetricUnit.NOUNIT, "thread_pool statistics") { @Override public Integer getValue() { String metricType = this.getLabels().get(1).getValue(); @@ -97,14 +99,18 @@ public class ThreadPoolManager { } } - public static ThreadPoolExecutor newDaemonCacheThreadPool(int maxNumThread, String poolName, boolean needRegisterMetric) { - return newDaemonThreadPool(0, maxNumThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue(), + public static ThreadPoolExecutor newDaemonCacheThreadPool(int maxNumThread, + String poolName, boolean needRegisterMetric) { + return newDaemonThreadPool(0, maxNumThread, KEEP_ALIVE_TIME, + TimeUnit.SECONDS, new SynchronousQueue(), new LogDiscardPolicy(poolName), poolName, needRegisterMetric); } - public static ThreadPoolExecutor newDaemonFixedThreadPool(int numThread, int queueSize, String poolName, boolean needRegisterMetric) { - return newDaemonThreadPool(numThread, numThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<>(queueSize), - new BlockedPolicy(poolName, 60), poolName, needRegisterMetric); + public static ThreadPoolExecutor newDaemonFixedThreadPool(int numThread, + int queueSize, String poolName, boolean needRegisterMetric) { + return newDaemonThreadPool(numThread, numThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(queueSize), new BlockedPolicy(poolName, 60), + poolName, needRegisterMetric); } public static ThreadPoolExecutor newDaemonProfileThreadPool(int numThread, int queueSize, String poolName, @@ -123,7 +129,8 @@ public class ThreadPoolManager { String poolName, boolean needRegisterMetric) { ThreadFactory threadFactory = namedThreadFactory(poolName); - ThreadPoolExecutor threadPool = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler); + ThreadPoolExecutor threadPool = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + keepAliveTime, unit, workQueue, threadFactory, handler); if (needRegisterMetric) { nameToThreadPoolMap.put(poolName, threadPool); } @@ -133,9 +140,11 @@ public class ThreadPoolManager { // Now, we have no delay task num limit and thread num limit in ScheduledThreadPoolExecutor, // so it may cause oom when there are too many delay tasks or threads in ScheduledThreadPoolExecutor // Please use this api only for scheduling short task at fix rate. - public static ScheduledThreadPoolExecutor newDaemonScheduledThreadPool(int corePoolSize, String poolName, boolean needRegisterMetric) { + public static ScheduledThreadPoolExecutor newDaemonScheduledThreadPool( + int corePoolSize, String poolName, boolean needRegisterMetric) { ThreadFactory threadFactory = namedThreadFactory(poolName); - ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory); + ScheduledThreadPoolExecutor scheduledThreadPoolExecutor + = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory); if (needRegisterMetric) { nameToThreadPoolMap.put(poolName, scheduledThreadPoolExecutor); } @@ -169,7 +178,8 @@ public class ThreadPoolManager { } /** - * A handler for rejected task that try to be blocked until the pool enqueue task succeed or timeout, used for fixed thread pool + * A handler for rejected task that try to be blocked until the pool enqueue task succeed or timeout, + * used for fixed thread pool */ static class BlockedPolicy implements RejectedExecutionHandler { @@ -189,7 +199,8 @@ public class ThreadPoolManager { try { boolean ret = executor.getQueue().offer(r, timeoutSeconds, TimeUnit.SECONDS); if (!ret) { - throw new RejectedExecutionException("submit task failed, queue size is full: " + this.threadPoolName); + throw new RejectedExecutionException("submit task failed, queue size is full: " + + this.threadPoolName); } } catch (InterruptedException e) { String errMsg = String.format("Task %s wait to enqueue in %s %s failed", diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java index 40ee36606a..d101f5d227 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java @@ -99,7 +99,8 @@ public class ThriftServer { TThreadedSelectorServer.Args args = new TThreadedSelectorServer.Args( new TNonblockingServerSocket(port, Config.thrift_client_timeout_ms)).protocolFactory( new TBinaryProtocol.Factory()).processor(processor); - ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true); + ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool( + Config.thrift_server_max_worker_threads, "thrift-server-pool", true); args.executorService(threadPoolExecutor); server = new TThreadedSelectorServer(args); } @@ -114,7 +115,8 @@ public class ThriftServer { TThreadPoolServer.Args serverArgs = new TThreadPoolServer.Args(new TServerSocket(socketTransportArgs)).protocolFactory( new TBinaryProtocol.Factory()).processor(processor); - ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true); + ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool( + Config.thrift_server_max_worker_threads, "thrift-server-pool", true); serverArgs.executorService(threadPoolExecutor); server = new TThreadPoolServer(serverArgs); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java b/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java index e67eec7033..0ccb2b1c17 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java @@ -25,6 +25,7 @@ import com.google.common.base.Strings; public class UserException extends Exception { private InternalErrorCode errorCode; private ErrorCode mysqlErrorCode; + public UserException(String msg, Throwable cause) { super(Strings.nullToEmpty(msg), cause); errorCode = InternalErrorCode.INTERNAL_ERR; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java index 20ad46a8ee..d13c924a56 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java @@ -48,7 +48,8 @@ public class BrokerInputFile implements InputFile { } // For test only. ip port is broker ip port - public static BrokerInputFile create(String filePath, BrokerDesc brokerDesc, String ip, int port) throws IOException { + public static BrokerInputFile create(String filePath, BrokerDesc brokerDesc, + String ip, int port) throws IOException { BrokerInputFile inputFile = new BrokerInputFile(filePath, brokerDesc); inputFile.init(ip, port); return inputFile; @@ -95,7 +96,8 @@ public class BrokerInputFile implements InputFile { fill(); } if (currentPos > bufferLimit) { - LOG.warn("current pos {} is larger than buffer limit {}. should not happen.", currentPos, bufferLimit); + LOG.warn("current pos {} is larger than buffer limit {}." + + " should not happen.", currentPos, bufferLimit); return -1; } @@ -146,7 +148,8 @@ public class BrokerInputFile implements InputFile { } if (currentPos > bufferLimit) { - LOG.warn("current pos {} is larger than buffer limit {}. should not happen.", currentPos, bufferLimit); + LOG.warn("current pos {} is larger than buffer limit {}." + + " should not happen.", currentPos, bufferLimit); return -1; } @@ -260,7 +263,8 @@ public class BrokerInputFile implements InputFile { currentPos += data.length; } catch (BrokerReader.EOFException e) { if (byteBuffer.remaining() > 0) { - throw new EOFException("Reach the end of file with " + byteBuffer.remaining() + " bytes left to read. " + throw new EOFException("Reach the end of file with " + + byteBuffer.remaining() + " bytes left to read. " + "read len: " + (currentPos - markCurPos)); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java index cd6a4691ab..86a5437456 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java @@ -25,10 +25,10 @@ import com.google.common.collect.ImmutableList; public class BackendLoadStatisticProcNode implements ProcNodeInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("RootPath").add("PathHash").add("StorageMedium") - .add("DataUsedCapacity").add("TotalCapacity").add("TotalUsedPct") - .add("Class").add("State") - .build(); + .add("RootPath").add("PathHash").add("StorageMedium") + .add("DataUsedCapacity").add("TotalCapacity").add("TotalUsedPct") + .add("Class").add("State") + .build(); private final ClusterLoadStatistic statistic; private final long beId; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java index 2d2842bdb0..aabee58fc5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java @@ -25,7 +25,6 @@ import org.apache.doris.system.Backend; import org.apache.doris.thrift.TStorageMedium; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Table; import java.util.List; import java.util.Map; @@ -51,7 +50,8 @@ public class ClusterLoadStatisticProcDir implements ProcDirInterface { BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); - Map map = Catalog.getCurrentCatalog().getTabletScheduler().getStatisticMap().column(tag); + Map map = Catalog.getCurrentCatalog() + .getTabletScheduler().getStatisticMap().column(tag); map.values().forEach(t -> { List> statistics = t.getClusterStatistic(medium); @@ -80,7 +80,8 @@ public class ClusterLoadStatisticProcDir implements ProcDirInterface { throw new AnalysisException("backend " + beId + " does not exist"); } - Map map = Catalog.getCurrentCatalog().getTabletScheduler().getStatisticMap().column(tag); + Map map = Catalog.getCurrentCatalog() + .getTabletScheduler().getStatisticMap().column(tag); return new BackendLoadStatisticProcNode(map.get(be.getOwnerClusterName()), beId); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java index 2a164cba6d..4ef502b119 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java @@ -47,7 +47,8 @@ public class ColocationGroupBackendSeqsProcNode implements ProcNodeInterface { if (bucketNum == 0) { bucketNum = backendsSeq.get(tag).size(); } else if (bucketNum != backendsSeq.get(tag).size()) { - throw new AnalysisException("Invalid bucket number: " + bucketNum + " vs. " + backendsSeq.get(tag).size()); + throw new AnalysisException("Invalid bucket number: " + + bucketNum + " vs. " + backendsSeq.get(tag).size()); } } result.setNames(titleNames); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java index 3fa789989a..2b6d8f6702 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java @@ -80,7 +80,8 @@ public class CurrentQueryInfoProvider { final Map instanceProfiles = collectInstanceProfile(item.getQueryProfile()); final List instanceStatisticsList = Lists.newArrayList(); for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) { - final RuntimeProfile instanceProfile = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId())); + final RuntimeProfile instanceProfile + = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId())); Preconditions.checkNotNull(instanceProfile); final InstanceStatistics Statistics = new InstanceStatistics( @@ -102,7 +103,8 @@ public class CurrentQueryInfoProvider { final Map instanceProfiles = Maps.newHashMap(); for (RuntimeProfile fragmentProfile : queryProfile.getChildMap().values()) { for (Map.Entry entry : fragmentProfile.getChildMap().entrySet()) { - Preconditions.checkState(instanceProfiles.put(parseInstanceId(entry.getKey()), entry.getValue()) == null); + Preconditions.checkState(instanceProfiles.put( + parseInstanceId(entry.getKey()), entry.getValue()) == null); } } return instanceProfiles; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java index aaee7d7e6f..5919855723 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java @@ -69,8 +69,10 @@ public class EsPartitionsProcDir implements ProcDirInterface { rangePartitionInfo = (RangePartitionInfo) esTable.getEsTablePartitions().getPartitionInfo(); } Joiner joiner = Joiner.on(", "); - Map unPartitionedIndices = esTable.getEsTablePartitions().getUnPartitionedIndexStates(); - Map partitionedIndices = esTable.getEsTablePartitions().getPartitionedIndexStates(); + Map unPartitionedIndices + = esTable.getEsTablePartitions().getUnPartitionedIndexStates(); + Map partitionedIndices + = esTable.getEsTablePartitions().getPartitionedIndexStates(); for (EsShardPartitions esShardPartitions : unPartitionedIndices.values()) { List partitionInfo = new ArrayList(); partitionInfo.add(esShardPartitions.getIndexName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java index dfe272ff10..6bda28968a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java @@ -121,10 +121,13 @@ public class JobsProcDir implements ProcDirInterface { // rollup MaterializedViewHandler materializedViewHandler = Catalog.getCurrentCatalog().getMaterializedViewHandler(); pendingNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.PENDING, dbId); - runningNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.WAITING_TXN, dbId) + runningNum = materializedViewHandler.getAlterJobV2Num( + org.apache.doris.alter.AlterJobV2.JobState.WAITING_TXN, dbId) + materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.RUNNING, dbId); - finishedNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.FINISHED, dbId); - cancelledNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.CANCELLED, dbId); + finishedNum = materializedViewHandler.getAlterJobV2Num( + org.apache.doris.alter.AlterJobV2.JobState.FINISHED, dbId); + cancelledNum = materializedViewHandler.getAlterJobV2Num( + org.apache.doris.alter.AlterJobV2.JobState.CANCELLED, dbId); totalNum = pendingNum + runningNum + finishedNum + cancelledNum; result.addRow(Lists.newArrayList(ROLLUP, pendingNum.toString(), runningNum.toString(), finishedNum.toString(), cancelledNum.toString(), totalNum.toString())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java index 8ec5060573..7cacc69746 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java @@ -51,12 +51,12 @@ public class JvmProcDir implements ProcNodeInterface { JvmInfo jvmInfo = jvmService.info(); result.addRow(genRow("jvm start time", TimeUtils.longToTimeString(jvmInfo.getStartTime()))); result.addRow(genRow("jvm version info", Joiner.on(" ").join(jvmInfo.getVersion(), - jvmInfo.getVmName(), - jvmInfo.getVmVendor(), - jvmInfo.getVmVersion()))); + jvmInfo.getVmName(), jvmInfo.getVmVendor(), jvmInfo.getVmVersion()))); - result.addRow(genRow("configured init heap size", DebugUtil.printByteWithUnit(jvmInfo.getConfiguredInitialHeapSize()))); - result.addRow(genRow("configured max heap size", DebugUtil.printByteWithUnit(jvmInfo.getConfiguredMaxHeapSize()))); + result.addRow(genRow("configured init heap size", + DebugUtil.printByteWithUnit(jvmInfo.getConfiguredInitialHeapSize()))); + result.addRow(genRow("configured max heap size", + DebugUtil.printByteWithUnit(jvmInfo.getConfiguredMaxHeapSize()))); result.addRow(genRow("frontend pid", jvmInfo.getPid())); // 2. jvm stats @@ -65,24 +65,34 @@ public class JvmProcDir implements ProcNodeInterface { result.addRow(genRow("classes total loaded", jvmStats.getClasses().getTotalLoadedClassCount())); result.addRow(genRow("classes unloaded", jvmStats.getClasses().getUnloadedClassCount())); - result.addRow(genRow("mem heap committed", DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapCommitted().getBytes()))); - result.addRow(genRow("mem heap used", DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapUsed().getBytes()))); - result.addRow(genRow("mem non heap committed", DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapCommitted().getBytes()))); - result.addRow(genRow("mem non heap used", DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapUsed().getBytes()))); + result.addRow(genRow("mem heap committed", + DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapCommitted().getBytes()))); + result.addRow(genRow("mem heap used", + DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapUsed().getBytes()))); + result.addRow(genRow("mem non heap committed", + DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapCommitted().getBytes()))); + result.addRow(genRow("mem non heap used", + DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapUsed().getBytes()))); Iterator memIter = jvmStats.getMem().iterator(); while (memIter.hasNext()) { MemoryPool memPool = memIter.next(); - result.addRow(genRow("mem pool " + memPool.getName() + " used", DebugUtil.printByteWithUnit(memPool.getUsed().getBytes()))); - result.addRow(genRow("mem pool " + memPool.getName() + " max", DebugUtil.printByteWithUnit(memPool.getMax().getBytes()))); - result.addRow(genRow("mem pool " + memPool.getName() + " peak used", DebugUtil.printByteWithUnit(memPool.getPeakUsed().getBytes()))); - result.addRow(genRow("mem pool " + memPool.getName() + " peak max", DebugUtil.printByteWithUnit(memPool.getPeakMax().getBytes()))); + result.addRow(genRow("mem pool " + memPool.getName() + " used", + DebugUtil.printByteWithUnit(memPool.getUsed().getBytes()))); + result.addRow(genRow("mem pool " + memPool.getName() + " max", + DebugUtil.printByteWithUnit(memPool.getMax().getBytes()))); + result.addRow(genRow("mem pool " + memPool.getName() + " peak used", + DebugUtil.printByteWithUnit(memPool.getPeakUsed().getBytes()))); + result.addRow(genRow("mem pool " + memPool.getName() + " peak max", + DebugUtil.printByteWithUnit(memPool.getPeakMax().getBytes()))); } for (BufferPool bp : jvmStats.getBufferPools()) { result.addRow(genRow("buffer pool " + bp.getName() + " count", bp.getCount())); - result.addRow(genRow("buffer pool " + bp.getName() + " used", DebugUtil.printByteWithUnit(bp.getUsed().getBytes()))); - result.addRow(genRow("buffer pool " + bp.getName() + " capacity", DebugUtil.printByteWithUnit(bp.getTotalCapacity().getBytes()))); + result.addRow(genRow("buffer pool " + bp.getName() + " used", + DebugUtil.printByteWithUnit(bp.getUsed().getBytes()))); + result.addRow(genRow("buffer pool " + bp.getName() + " capacity", + DebugUtil.printByteWithUnit(bp.getTotalCapacity().getBytes()))); } Iterator gcIter = jvmStats.getGc().iterator(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java index 543755b8c5..c29a8cbfd4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java @@ -91,7 +91,8 @@ public class PartitionsProcDir implements ProcDirInterface { } if (subExpr instanceof BinaryPredicate) { BinaryPredicate binaryPredicate = (BinaryPredicate) subExpr; - if (subExpr.getChild(1) instanceof StringLiteral && binaryPredicate.getOp() == BinaryPredicate.Operator.EQ) { + if (subExpr.getChild(1) instanceof StringLiteral + && binaryPredicate.getOp() == BinaryPredicate.Operator.EQ) { return ((StringLiteral) subExpr.getChild(1)).getValue().equals(element); } long leftVal; @@ -135,7 +136,8 @@ public class PartitionsProcDir implements ProcDirInterface { return str.matches(expr); } - public ProcResult fetchResultByFilter(Map filterMap, List orderByPairs, LimitElement limitElement) throws AnalysisException { + public ProcResult fetchResultByFilter(Map filterMap, List orderByPairs, + LimitElement limitElement) throws AnalysisException { List> partitionInfos = getPartitionInfos(); List> filterPartitionInfos; //where @@ -216,7 +218,8 @@ public class PartitionsProcDir implements ProcDirInterface { partitionIds = tblPartitionInfo.getPartitionItemEntryList(isTempPartition, true).stream() .map(Map.Entry::getKey).collect(Collectors.toList()); } else { - Collection partitions = isTempPartition ? olapTable.getTempPartitions() : olapTable.getPartitions(); + Collection partitions = isTempPartition + ? olapTable.getTempPartitions() : olapTable.getPartitions(); partitionIds = partitions.stream().map(Partition::getId).collect(Collectors.toList()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java index 40c66fc599..ddc4872410 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java @@ -23,5 +23,6 @@ import java.util.List; // TODO(zhaochun): merge proc result to show result public interface ProcResult { List getColumnNames(); + List> getRows(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java index 4ff5efb9b5..a585a29f76 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java @@ -95,7 +95,8 @@ public class StatisticProcNode implements ProcNodeInterface { try { for (Partition partition : olapTable.getAllPartitions()) { ++partitionNum; - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex materializedIndex + : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { ++indexNum; List tablets = materializedIndex.getTablets(); for (int i = 0; i < tablets.size(); ++i) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java index 403cda9120..bf847c84af 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java @@ -178,8 +178,10 @@ public class TabletHealthProcDir implements ProcDirInterface { olapTable.readLock(); try { for (Partition partition : olapTable.getAllPartitions()) { - ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()); - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo() + .getReplicaAllocation(partition.getId()); + for (MaterializedIndex materializedIndex : partition + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { List tablets = materializedIndex.getTablets(); for (int i = 0; i < tablets.size(); ++i) { Tablet tablet = tablets.get(i); @@ -187,12 +189,12 @@ public class TabletHealthProcDir implements ProcDirInterface { Tablet.TabletStatus res = null; if (groupId != null) { Set backendsSet = colocateTableIndex.getTabletBackendsByGroup(groupId, i); - res = tablet.getColocateHealthStatus(partition.getVisibleVersion(), replicaAlloc, backendsSet); + res = tablet.getColocateHealthStatus( + partition.getVisibleVersion(), replicaAlloc, backendsSet); } else { - Pair pair = tablet.getHealthStatusWithPriority( - infoService, db.getClusterName(), - partition.getVisibleVersion(), - replicaAlloc, aliveBeIdsInCluster); + Pair pair + = tablet.getHealthStatusWithPriority(infoService, db.getClusterName(), + partition.getVisibleVersion(), replicaAlloc, aliveBeIdsInCluster); res = pair.first; } switch (res) { // CHECKSTYLE IGNORE THIS LINE: missing switch default @@ -256,7 +258,8 @@ public class TabletHealthProcDir implements ProcDirInterface { oversizeTabletIds.add(tablet.getId()); } for (Replica replica : tablet.getReplicas()) { - if (replica.getVersionCount() > Config.min_version_count_indicate_replica_compaction_too_slow) { + if (replica.getVersionCount() + > Config.min_version_count_indicate_replica_compaction_too_slow) { replicaCompactionTooSlowNum++; replicaCompactionTooSlowTabletIds.add(tablet.getId()); break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java index c5720d76ab..42bced258e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java @@ -91,7 +91,8 @@ public class ProfileTreeBuilder { private static final Pattern FRAGMENT_ID_PATTERN; // Match string like: - // Instance e0f7390f5363419e-b416a2a7999608b6 (host=TNetworkAddress(hostname:192.168.1.1, port:9060)):(Active: 1s858ms, % non-child: 0.02%) + // Instance e0f7390f5363419e-b416a2a7999608b6 + // (host=TNetworkAddress(hostname:192.168.1.1, port:9060)):(Active: 1s858ms, % non-child: 0.02%) // Extract "e0f7390f5363419e-b416a2a7999608b6", "192.168.1.1", "9060" private static final String INSTANCE_PATTERN_STR = "^Instance (.*) \\(.*hostname:(.*), port:([0-9]+).*"; private static final Pattern INSTANCE_PATTERN; @@ -250,7 +251,8 @@ public class ProfileTreeBuilder { String extractName; String extractId; if ((!m.find() && finalSenderName == null) || m.groupCount() != 2) { - // DataStreamBuffer name like: "DataBufferSender (dst_fragment_instance_id=d95356f9219b4831-986b4602b41683ca):" + // DataStreamBuffer name like: + // "DataBufferSender (dst_fragment_instance_id=d95356f9219b4831-986b4602b41683ca):" // So it has no id. // Other profile should has id like: // EXCHANGE_NODE (id=3):(Active: 103.899ms, % non-child: 2.27%) @@ -330,7 +332,8 @@ public class ProfileTreeBuilder { if (root != null) { root.addChild(counterNode); } - counterNode.setCounter(childCounterName, RuntimeProfile.printCounter(counter.getValue(), counter.getType())); + counterNode.setCounter(childCounterName, + RuntimeProfile.printCounter(counter.getValue(), counter.getType())); buildCounterNode(profile, childCounterName, counterNode); } return; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java index aa31682904..45cf25d603 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java @@ -18,6 +18,7 @@ package org.apache.doris.common.profile; import org.apache.doris.common.TreeNode; + import com.google.common.base.Strings; import com.google.common.collect.Lists; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java index ff7e225937..da5acc0188 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java @@ -41,7 +41,8 @@ public class ClusterStatePublisher { private static final Logger LOG = LogManager.getLogger(ClusterStatePublisher.class); private static volatile ClusterStatePublisher INSTANCE; - private ExecutorService executor = ThreadPoolManager.newDaemonFixedThreadPool(5, 256, "cluster-state-publisher", true); + private ExecutorService executor = ThreadPoolManager + .newDaemonFixedThreadPool(5, 256, "cluster-state-publisher", true); private SystemInfoService clusterInfoService; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java index f5ecef57cf..60dd5a8b33 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java @@ -172,7 +172,7 @@ public class BrokerUtil { } } else if (brokerDesc.getStorageType() == StorageBackend.StorageType.HDFS) { if (!brokerDesc.getProperties().containsKey(HADOOP_FS_NAME) - || !brokerDesc.getProperties().containsKey(HADOOP_USER_NAME)) { + || !brokerDesc.getProperties().containsKey(HADOOP_USER_NAME)) { throw new UserException(String.format( "The properties of hdfs is invalid. %s and %s are needed", HADOOP_FS_NAME, HADOOP_USER_NAME)); } @@ -183,7 +183,7 @@ public class BrokerUtil { for (Map.Entry propEntry : brokerDesc.getProperties().entrySet()) { conf.set(propEntry.getKey(), propEntry.getValue()); if (propEntry.getKey().equals(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION) - && propEntry.getValue().equals(AuthType.KERBEROS.getDesc())) { + && propEntry.getValue().equals(AuthType.KERBEROS.getDesc())) { isSecurityEnabled = true; } } @@ -191,15 +191,15 @@ public class BrokerUtil { if (isSecurityEnabled) { UserGroupInformation.setConfiguration(conf); UserGroupInformation.loginUserFromKeytab( - brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL), - brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_KEYTAB)); + brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL), + brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_KEYTAB)); } FileSystem fs = FileSystem.get(new URI(fsName), conf, userName); FileStatus[] statusList = fs.globStatus(new Path(path)); for (FileStatus status : statusList) { if (status.isFile()) { fileStatuses.add(new TBrokerFileStatus(status.getPath().toUri().getPath(), - status.isDirectory(), status.getLen(), status.isFile())); + status.isDirectory(), status.getLen(), status.isFile())); } } } catch (IOException | InterruptedException | URISyntaxException e) { @@ -213,13 +213,15 @@ public class BrokerUtil { return brokerName + "[" + address.toString() + "]"; } - public static List parseColumnsFromPath(String filePath, List columnsFromPath) throws UserException { + public static List parseColumnsFromPath(String filePath, List columnsFromPath) + throws UserException { if (columnsFromPath == null || columnsFromPath.isEmpty()) { return Collections.emptyList(); } String[] strings = filePath.split("/"); if (strings.length < 2) { - throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + throw new UserException("Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } String[] columns = new String[columnsFromPath.size()]; int size = 0; @@ -229,11 +231,13 @@ public class BrokerUtil { continue; } if (str == null || !str.contains("=")) { - throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + throw new UserException("Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } String[] pair = str.split("=", 2); if (pair.length != 2) { - throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + throw new UserException("Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } int index = columnsFromPath.indexOf(pair[0]); if (index == -1) { @@ -246,7 +250,8 @@ public class BrokerUtil { } } if (size != columnsFromPath.size()) { - throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + throw new UserException("Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } return Lists.newArrayList(columns); } @@ -503,8 +508,9 @@ public class BrokerUtil { } } - public static Pair getBrokerAddressAndClient(BrokerDesc brokerDesc) throws UserException { - Pair pair = new Pair(null, null); + public static Pair getBrokerAddressAndClient(BrokerDesc brokerDesc) + throws UserException { + Pair pair = new Pair<>(null, null); TNetworkAddress address = getAddress(brokerDesc); TPaloBrokerService.Client client = borrowClient(address); pair.first = client; @@ -600,7 +606,8 @@ public class BrokerUtil { public void write(ByteBuffer byteBuffer, long bufferSize) throws UserException { if (!isReady) { - throw new UserException("Broker writer is not ready. filePath=" + brokerFilePath + ", broker=" + address); + throw new UserException("Broker writer is not ready. filePath=" + + brokerFilePath + ", broker=" + address); } failed = true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java index b632fb26c7..30cd75cd52 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java @@ -149,14 +149,16 @@ public class DynamicPartitionUtil { private static void checkEnable(String enable) throws DdlException { if (Strings.isNullOrEmpty(enable) - || (!Boolean.TRUE.toString().equalsIgnoreCase(enable) && !Boolean.FALSE.toString().equalsIgnoreCase(enable))) { + || (!Boolean.TRUE.toString().equalsIgnoreCase(enable) + && !Boolean.FALSE.toString().equalsIgnoreCase(enable))) { ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_ENABLE, enable); } } private static boolean checkCreateHistoryPartition(String create) throws DdlException { if (Strings.isNullOrEmpty(create) - || (!Boolean.TRUE.toString().equalsIgnoreCase(create) && !Boolean.FALSE.toString().equalsIgnoreCase(create))) { + || (!Boolean.TRUE.toString().equalsIgnoreCase(create) + && !Boolean.FALSE.toString().equalsIgnoreCase(create))) { ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_CREATE_HISTORY_PARTITION, create); } return Boolean.valueOf(create); @@ -168,7 +170,8 @@ public class DynamicPartitionUtil { } try { int historyPartitionNum = Integer.parseInt(val); - if (historyPartitionNum < 0 && historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) { + if (historyPartitionNum < 0 + && historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) { ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_HISTORY_PARTITION_NUM_ZERO); } } catch (NumberFormatException e) { @@ -238,7 +241,8 @@ public class DynamicPartitionUtil { } } - public static List convertStringToPeriodsList(String reservedHistoryPeriods, String timeUnit) throws DdlException { + public static List convertStringToPeriodsList(String reservedHistoryPeriods, String timeUnit) + throws DdlException { List reservedHistoryPeriodsToRangeList = new ArrayList(); if (DynamicPartitionProperty.NOT_SET_RESERVED_HISTORY_PERIODS.equals(reservedHistoryPeriods)) { return reservedHistoryPeriodsToRangeList; @@ -250,9 +254,12 @@ public class DynamicPartitionUtil { String lowerBorderOfReservedHistory = matcher.group(1); String upperBorderOfReservedHistory = matcher.group(2); if (lowerBorderOfReservedHistory.compareTo(upperBorderOfReservedHistory) > 0) { - ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_START_LARGER_THAN_ENDS, lowerBorderOfReservedHistory, upperBorderOfReservedHistory); + ErrorReport.reportDdlException( + ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_START_LARGER_THAN_ENDS, + lowerBorderOfReservedHistory, upperBorderOfReservedHistory); } else { - reservedHistoryPeriodsToRangeList.add(Range.closed(lowerBorderOfReservedHistory, upperBorderOfReservedHistory)); + reservedHistoryPeriodsToRangeList.add( + Range.closed(lowerBorderOfReservedHistory, upperBorderOfReservedHistory)); } } return reservedHistoryPeriodsToRangeList; @@ -260,7 +267,8 @@ public class DynamicPartitionUtil { private static Pattern getPattern(String timeUnit) { if (timeUnit.equalsIgnoreCase(TimeUnit.HOUR.toString())) { - return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}),([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})\\]"); + return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})" + + ",([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})\\]"); } else { return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2}),([0-9]{4}-[0-9]{2}-[0-9]{2})\\]"); } @@ -277,13 +285,14 @@ public class DynamicPartitionUtil { return o1.lowerEndpoint().compareTo(o2.lowerEndpoint()); } }); - List sortedReservedHistoryPeriods = reservedHistoryPeriodsToRangeList.stream(). - map(e -> "[" + e.lowerEndpoint() + "," + e.upperEndpoint() + "]").collect(Collectors.toList()); + List sortedReservedHistoryPeriods = reservedHistoryPeriodsToRangeList.stream() + .map(e -> "[" + e.lowerEndpoint() + "," + e.upperEndpoint() + "]").collect(Collectors.toList()); return String.join(",", sortedReservedHistoryPeriods); } - private static void checkReservedHistoryPeriodValidate(String reservedHistoryPeriods, String timeUnit) throws DdlException { + private static void checkReservedHistoryPeriodValidate(String reservedHistoryPeriods, + String timeUnit) throws DdlException { if (Strings.isNullOrEmpty(reservedHistoryPeriods)) { ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_EMPTY); } @@ -297,7 +306,8 @@ public class DynamicPartitionUtil { // 2. "dynamic_partition.reserved_history_periods" = "[,2021-08-01]" invalid one, needs pairs of values // 3. "dynamic_partition.reserved_history_periods" = "[2021-07-01,2020-08-01,]" invalid format if (!reservedHistoryPeriods.startsWith("[") || !reservedHistoryPeriods.endsWith("]")) { - ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods); + ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods); } List reservedHistoryPeriodsToRangeList = convertStringToPeriodsList(reservedHistoryPeriods, timeUnit); @@ -305,13 +315,15 @@ public class DynamicPartitionUtil { SimpleDateFormat sdf = getSimpleDateFormat(timeUnit); if (reservedHistoryPeriodsToRangeList.size() != sizeOfPeriods) { - ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods); + ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods); } else { try { for (Range range : reservedHistoryPeriodsToRangeList) { String formattedLowerBound = sdf.format(sdf.parse(range.lowerEndpoint().toString())); String formattedUpperBound = sdf.format(sdf.parse(range.upperEndpoint().toString())); - if (!range.lowerEndpoint().toString().equals(formattedLowerBound) || !range.upperEndpoint().toString().equals(formattedUpperBound)) { + if (!range.lowerEndpoint().toString().equals(formattedLowerBound) + || !range.upperEndpoint().toString().equals(formattedUpperBound)) { throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS + " value. It must be correct DATE value \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\"" + " while time_unit is DAY/WEEK/MONTH or" @@ -350,7 +362,8 @@ public class DynamicPartitionUtil { // Check if all requried properties has been set. // And also check all optional properties, if not set, set them to default value. - public static boolean checkInputDynamicPartitionProperties(Map properties, PartitionInfo partitionInfo) throws DdlException { + public static boolean checkInputDynamicPartitionProperties(Map properties, + PartitionInfo partitionInfo) throws DdlException { if (properties == null || properties.isEmpty()) { return false; } @@ -421,18 +434,21 @@ public class DynamicPartitionUtil { if (!isReplay) { // execute create partition first time only in master of FE, So no need execute // when it's replay - Catalog.getCurrentCatalog().getDynamicPartitionScheduler().executeDynamicPartitionFirstTime(dbId, olapTable.getId()); + Catalog.getCurrentCatalog().getDynamicPartitionScheduler() + .executeDynamicPartitionFirstTime(dbId, olapTable.getId()); } - Catalog.getCurrentCatalog().getDynamicPartitionScheduler().registerDynamicPartitionTable(dbId, olapTable.getId()); + Catalog.getCurrentCatalog().getDynamicPartitionScheduler() + .registerDynamicPartitionTable(dbId, olapTable.getId()); } else { - Catalog.getCurrentCatalog().getDynamicPartitionScheduler().removeDynamicPartitionTable(dbId, olapTable.getId()); + Catalog.getCurrentCatalog().getDynamicPartitionScheduler() + .removeDynamicPartitionTable(dbId, olapTable.getId()); } } } // Analyze all properties to check their validation - public static Map analyzeDynamicPartition(Map properties, PartitionInfo partitionInfo) - throws UserException { + public static Map analyzeDynamicPartition(Map properties, + PartitionInfo partitionInfo) throws UserException { // properties should not be empty, check properties before call this function Map analyzedProperties = new HashMap<>(); if (properties.containsKey(DynamicPartitionProperty.TIME_UNIT)) { @@ -505,19 +521,22 @@ public class DynamicPartitionUtil { start = 0; expectCreatePartitionNum = end - start; } else { - int historyPartitionNum = Integer.valueOf(analyzedProperties.getOrDefault(DynamicPartitionProperty.HISTORY_PARTITION_NUM, + int historyPartitionNum = Integer.parseInt(analyzedProperties.getOrDefault( + DynamicPartitionProperty.HISTORY_PARTITION_NUM, String.valueOf(DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM))); if (historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) { expectCreatePartitionNum = end - Math.max(start, -historyPartitionNum); } else { if (start == Integer.MIN_VALUE) { - throw new DdlException("Provide start or history_partition_num property when creating history partition"); + throw new DdlException("Provide start or history_partition_num property" + + " when creating history partition"); } expectCreatePartitionNum = end - start; } } if (hasEnd && (expectCreatePartitionNum > Config.max_dynamic_partition_num)) { - throw new DdlException("Too many dynamic partitions: " + expectCreatePartitionNum + ". Limit: " + Config.max_dynamic_partition_num); + throw new DdlException("Too many dynamic partitions: " + + expectCreatePartitionNum + ". Limit: " + Config.max_dynamic_partition_num); } if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_MONTH)) { @@ -564,7 +583,8 @@ public class DynamicPartitionUtil { } if (properties.containsKey(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS)) { String reservedHistoryPeriods = properties.get(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS); - checkReservedHistoryPeriodValidate(reservedHistoryPeriods, analyzedProperties.get(DynamicPartitionProperty.TIME_UNIT)); + checkReservedHistoryPeriodValidate(reservedHistoryPeriods, + analyzedProperties.get(DynamicPartitionProperty.TIME_UNIT)); properties.remove(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS); analyzedProperties.put(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods); } @@ -592,7 +612,8 @@ public class DynamicPartitionUtil { return false; } - return rangePartitionInfo.getPartitionColumns().size() == 1 && tableProperty.getDynamicPartitionProperty().getEnable(); + return rangePartitionInfo.getPartitionColumns().size() == 1 + && tableProperty.getDynamicPartitionProperty().getEnable(); } /** @@ -670,7 +691,8 @@ public class DynamicPartitionUtil { } } - public static String getHistoryPartitionRangeString(DynamicPartitionProperty dynamicPartitionProperty, String time, String format) { + public static String getHistoryPartitionRangeString(DynamicPartitionProperty dynamicPartitionProperty, + String time, String format) { ZoneId zoneId = dynamicPartitionProperty.getTimeZone().toZoneId(); Date date = null; Timestamp timestamp = null; @@ -681,10 +703,12 @@ public class DynamicPartitionUtil { date = simpleDateFormat.parse(time); } catch (ParseException e) { LOG.warn("Parse dynamic partition periods error. Error={}", e.getMessage()); - return getFormattedTimeWithoutMinuteSecond(ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format); + return getFormattedTimeWithoutMinuteSecond( + ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format); } timestamp = new Timestamp(date.getTime()); - return getFormattedTimeWithoutMinuteSecond(ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format); + return getFormattedTimeWithoutMinuteSecond( + ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format); } /** @@ -725,7 +749,8 @@ public class DynamicPartitionUtil { * Today is 2020-05-24, offset = -1, startOf.dayOfWeek = 3 * It will return 2020-05-20 (Wednesday of last week) */ - private static String getPartitionRangeOfWeek(ZonedDateTime current, int offset, StartOfDate startOf, String format) { + private static String getPartitionRangeOfWeek(ZonedDateTime current, int offset, + StartOfDate startOf, String format) { Preconditions.checkArgument(startOf.isStartOfWeek()); // 1. get the offset week ZonedDateTime offsetWeek = current.plusWeeks(offset); @@ -745,7 +770,8 @@ public class DynamicPartitionUtil { * Today is 2020-05-24, offset = 1, startOf.month = 3 * It will return 2020-06-03 */ - private static String getPartitionRangeOfMonth(ZonedDateTime current, int offset, StartOfDate startOf, String format) { + private static String getPartitionRangeOfMonth(ZonedDateTime current, + int offset, StartOfDate startOf, String format) { Preconditions.checkArgument(startOf.isStartOfMonth()); // 1. Get the offset date. int realOffset = offset; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java index b6cd819afc..e25ebf5a57 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java @@ -87,8 +87,8 @@ public class KafkaUtil { // The input parameter "timestampOffsets" is // Tne return value is public static List> getOffsetsForTimes(String brokerList, String topic, - Map convertedCustomProperties, - List> timestampOffsets) throws LoadException { + Map convertedCustomProperties, List> timestampOffsets) + throws LoadException { TNetworkAddress address = null; LOG.debug("begin to get offsets for times of topic: {}, {}", topic, timestampOffsets); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java index e203377b98..1fb8bc999a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java @@ -25,7 +25,6 @@ import org.apache.doris.common.DdlException; import com.google.common.base.Preconditions; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -70,16 +69,15 @@ public class ListUtil { return result; } - public static void checkPartitionKeyListsMatch(List list1, List list2) throws DdlException { - Collections.sort(list1, PARTITION_KEY_COMPARATOR); - Collections.sort(list2, PARTITION_KEY_COMPARATOR); + public static void checkPartitionKeyListsMatch(List list1, + List list2) throws DdlException { + list1.sort(PARTITION_KEY_COMPARATOR); + list2.sort(PARTITION_KEY_COMPARATOR); int idx1 = 0; int idx2 = 0; - List keys1 = new ArrayList<>(); - List keys2 = new ArrayList<>(); - keys1.addAll(list1.get(idx1).getItems()); - keys2.addAll(list2.get(idx2).getItems()); + List keys1 = new ArrayList<>(list1.get(idx1).getItems()); + List keys2 = new ArrayList<>(list2.get(idx2).getItems()); while (true) { int size = Math.min(keys1.size(), keys2.size()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java index 42fa8cd10b..82954dce5c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java @@ -86,7 +86,8 @@ public class MetaLockUtils { } } - public static boolean tryWriteLockTablesOrMetaException(List

tableList, long timeout, TimeUnit unit) throws MetaNotFoundException { + public static boolean tryWriteLockTablesOrMetaException(List
tableList, + long timeout, TimeUnit unit) throws MetaNotFoundException { for (int i = 0; i < tableList.size(); i++) { try { if (!tableList.get(i).tryWriteLockOrMetaException(timeout, unit)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java index 2eaa0a0533..5f6412bf96 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java @@ -32,6 +32,7 @@ public class PrintableMap { private String entryDelimiter = ","; public static final Set SENSITIVE_KEY; + static { SENSITIVE_KEY = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); SENSITIVE_KEY.add("password"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java index dfd5e2a13d..4f1a705920 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java @@ -224,7 +224,8 @@ public class ProfileManager { return builder.getFragmentTreeRoot(executionId); } - public List> getFragmentInstanceList(String queryID, String executionId, String fragmentId) + public List> getFragmentInstanceList(String queryID, + String executionId, String fragmentId) throws AnalysisException { MultiProfileTreeBuilder builder; readLock.lock(); @@ -242,7 +243,8 @@ public class ProfileManager { return builder.getInstanceList(executionId, fragmentId); } - public ProfileTreeNode getInstanceProfileTree(String queryID, String executionId, String fragmentId, String instanceId) + public ProfileTreeNode getInstanceProfileTree(String queryID, String executionId, + String fragmentId, String instanceId) throws AnalysisException { MultiProfileTreeBuilder builder; readLock.lock(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 5202122410..88ac119aeb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -207,7 +207,8 @@ public class PropertyAnalyzer { throw new AnalysisException("Remote storage cool down time should later than now"); } if (hasCooldown && (remoteCooldownTimeStamp <= cooldownTimeStamp)) { - throw new AnalysisException("`remote_storage_cooldown_time` should later than `storage_cooldown_time`."); + throw new AnalysisException( + "`remote_storage_cooldown_time` should later than `storage_cooldown_time`."); } } @@ -238,7 +239,8 @@ public class PropertyAnalyzer { private static Short analyzeReplicationNum(Map properties, String prefix, short oldReplicationNum) throws AnalysisException { Short replicationNum = oldReplicationNum; - String propKey = Strings.isNullOrEmpty(prefix) ? PROPERTIES_REPLICATION_NUM : prefix + "." + PROPERTIES_REPLICATION_NUM; + String propKey = Strings.isNullOrEmpty(prefix) + ? PROPERTIES_REPLICATION_NUM : prefix + "." + PROPERTIES_REPLICATION_NUM; if (properties != null && properties.containsKey(propKey)) { try { replicationNum = Short.valueOf(properties.get(propKey)); @@ -246,7 +248,8 @@ public class PropertyAnalyzer { throw new AnalysisException(e.getMessage()); } - if (replicationNum < Config.min_replication_num_per_tablet || replicationNum > Config.max_replication_num_per_tablet) { + if (replicationNum < Config.min_replication_num_per_tablet + || replicationNum > Config.max_replication_num_per_tablet) { throw new AnalysisException("Replication num should between " + Config.min_replication_num_per_tablet + " and " + Config.max_replication_num_per_tablet); } @@ -543,7 +546,8 @@ public class PropertyAnalyzer { return ScalarType.createType(type); } - public static Boolean analyzeBackendDisableProperties(Map properties, String key, Boolean defaultValue) throws AnalysisException { + public static Boolean analyzeBackendDisableProperties(Map properties, + String key, Boolean defaultValue) { if (properties.containsKey(key)) { String value = properties.remove(key); return Boolean.valueOf(value); @@ -551,7 +555,8 @@ public class PropertyAnalyzer { return defaultValue; } - public static Tag analyzeBackendTagProperties(Map properties, Tag defaultValue) throws AnalysisException { + public static Tag analyzeBackendTagProperties(Map properties, Tag defaultValue) + throws AnalysisException { if (properties.containsKey(TAG_LOCATION)) { String tagVal = properties.remove(TAG_LOCATION); return Tag.create(Tag.TYPE_LOCATION, tagVal); @@ -608,7 +613,8 @@ public class PropertyAnalyzer { replicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, locationVal), replicationNum); totalReplicaNum += replicationNum; } - if (totalReplicaNum < Config.min_replication_num_per_tablet || totalReplicaNum > Config.max_replication_num_per_tablet) { + if (totalReplicaNum < Config.min_replication_num_per_tablet + || totalReplicaNum > Config.max_replication_num_per_tablet) { throw new AnalysisException("Total replication num should between " + Config.min_replication_num_per_tablet + " and " + Config.max_replication_num_per_tablet); } @@ -620,7 +626,7 @@ public class PropertyAnalyzer { } public static DataSortInfo analyzeDataSortInfo(Map properties, KeysType keyType, - int keyCount, TStorageFormat storageFormat) throws AnalysisException { + int keyCount, TStorageFormat storageFormat) throws AnalysisException { if (properties == null || properties.isEmpty()) { return new DataSortInfo(TSortType.LEXICAL, keyCount); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java index 0a40cec59b..d3e659ee8e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java @@ -57,7 +57,8 @@ public class RangeUtils { } return false; } - /* + + /** * Pass only if the 2 range lists are exactly same * What is "exactly same"? * 1. {[0, 10), [10, 20)} exactly same as {[0, 20)} @@ -82,7 +83,8 @@ public class RangeUtils { * 4.2 upper bounds (20 and 20) are equal. * 5. Not more next ranges, so 2 lists are equal. */ - public static void checkPartitionItemListsMatch(List list1, List list2) throws DdlException { + public static void checkPartitionItemListsMatch(List list1, List list2) + throws DdlException { Collections.sort(list1, RangeUtils.RANGE_COMPARATOR); Collections.sort(list2, RangeUtils.RANGE_COMPARATOR); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java index 0f341f6397..0671f04405 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java @@ -58,7 +58,7 @@ public class ReflectionUtils { return result; } - static private ThreadMXBean threadBean = + private static ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); public static void setContentionTracing(boolean val) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java index f44ce0b672..b81db22f52 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java @@ -398,8 +398,7 @@ public class RuntimeProfile { // Because the profile of summary and child fragment is not a real parent-child relationship // Each child profile needs to calculate the time proportion consumed by itself public void computeTimeInChildProfile() { - childMap.values(). - forEach(RuntimeProfile::computeTimeInProfile); + childMap.values().forEach(RuntimeProfile::computeTimeInProfile); } public void computeTimeInProfile() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java index ff4a476856..e96c527906 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java @@ -300,7 +300,8 @@ public class SmallFileMgr implements Writable { int contentLength = urlConnection.getContentLength(); if (contentLength == -1 || contentLength > Config.max_small_file_size_bytes) { - throw new DdlException("Failed to download file from url: " + url + ", invalid content length: " + contentLength); + throw new DdlException("Failed to download file from url: " + url + + ", invalid content length: " + contentLength); } int bytesRead = 0; @@ -309,7 +310,7 @@ public class SmallFileMgr implements Writable { if (saveContent) { // download from url, and check file size bytesRead = 0; - byte buf[] = new byte[contentLength]; + byte[] buf = new byte[contentLength]; try (BufferedInputStream in = new BufferedInputStream(url.openStream())) { while (bytesRead < contentLength) { bytesRead += in.read(buf, bytesRead, contentLength - bytesRead); @@ -449,7 +450,8 @@ public class SmallFileMgr implements Writable { } private File getAbsoluteFile(long dbId, String catalog, String fileName) { - return Paths.get(Config.small_file_dir, String.valueOf(dbId), catalog, fileName).normalize().toAbsolutePath().toFile(); + return Paths.get(Config.small_file_dir, String.valueOf(dbId), catalog, fileName) + .normalize().toAbsolutePath().toFile(); } public List> getInfo(String dbName) throws DdlException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java index 50c60386b8..7fb2a7d8a8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java @@ -40,7 +40,7 @@ public class SqlBlockUtil { // check (sql or sqlHash) and (limitations: partitioNum, tabletNum, cardinality) are not set both public static void checkSqlAndLimitationsSetBoth(String sql, String sqlHash, - String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException { + String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException { if ((!STRING_DEFAULT.equals(sql) || !STRING_DEFAULT.equals(sqlHash)) && !isSqlBlockLimitationsEmpty(partitionNumString, tabletNumString, cardinalityString)) { ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE); @@ -50,7 +50,7 @@ public class SqlBlockUtil { // 1. check (sql or sqlHash) and (limitations: partitioNum, tabletNum, cardinality) are not set both // 2. check any of limitations is set while sql or sqlHash is not set public static void checkPropertiesValidate(String sql, String sqlHash, - String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException { + String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException { if (((!STRING_DEFAULT.equals(sql) || !STRING_DEFAULT.equals(sqlHash)) && !isSqlBlockLimitationsEmpty(partitionNumString, tabletNumString, cardinalityString)) || ((STRING_DEFAULT.equals(sql) && STRING_DEFAULT.equals(sqlHash)) @@ -60,8 +60,10 @@ public class SqlBlockUtil { } // check at least one of the (limitations: partitioNum, tabletNum, cardinality) is not empty - public static Boolean isSqlBlockLimitationsEmpty(String partitionNumString, String tabletNumString, String cardinalityString) { - return StringUtils.isEmpty(partitionNumString) && StringUtils.isEmpty(tabletNumString) && StringUtils.isEmpty(cardinalityString); + public static Boolean isSqlBlockLimitationsEmpty(String partitionNumString, + String tabletNumString, String cardinalityString) { + return StringUtils.isEmpty(partitionNumString) + && StringUtils.isEmpty(tabletNumString) && StringUtils.isEmpty(cardinalityString); } public static Boolean isSqlBlockLimitationsDefault(Long partitionNum, Long tabletNum, Long cardinality) { @@ -87,11 +89,14 @@ public class SqlBlockUtil { } else if (!STRING_DEFAULT.equals(sqlBlockRule.getSqlHash())) { if (!STRING_DEFAULT.equals(sqlBlockRule.getSql()) && StringUtils.isNotEmpty(sqlBlockRule.getSql())) { throw new AnalysisException("Only sql or sqlHash can be configured"); - } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality()) - && !isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { + } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), + sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality()) + && !isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(), + sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE); } - } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { + } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), + sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) { if (!STRING_DEFAULT.equals(sqlBlockRule.getSql()) || !STRING_DEFAULT.equals(sqlBlockRule.getSqlHash())) { ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java index 4f4409e2a8..ab26fe41b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java @@ -63,6 +63,7 @@ public class URI { public String getLocation() { return location; } + public String getScheme() { return scheme; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java index 4ca18e3ce0..a63c40223e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java @@ -51,9 +51,11 @@ public class Util { private static final long DEFAULT_EXEC_CMD_TIMEOUT_MS = 600000L; - private static final String[] ORDINAL_SUFFIX = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" }; + private static final String[] ORDINAL_SUFFIX + = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" }; - private static final List REGEX_ESCAPES = Lists.newArrayList("\\", "$", "(", ")", "*", "+", ".", "[", "]", "?", "^", "{", "}", "|"); + private static final List REGEX_ESCAPES + = Lists.newArrayList("\\", "$", "(", ")", "*", "+", ".", "[", "]", "?", "^", "{", "}", "|"); static { TYPE_STRING_MAP.put(PrimitiveType.TINYINT, "tinyint(4)"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java index c78fba3925..0d0327d8b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java @@ -144,7 +144,8 @@ public class CheckConsistencyJob { } // check partition's replication num. if 1 replication. skip - short replicaNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short replicaNum = olapTable.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); if (replicaNum == (short) 1) { LOG.debug("partition[{}]'s replication num is 1. skip consistency check", partition.getId()); return false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java index b5e261083e..8b7741f724 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java @@ -279,7 +279,8 @@ public class ConsistencyChecker extends MasterDaemon { new PriorityQueue<>(Math.max(table.getAllPartitions().size(), 1), COMPARATOR); for (Partition partition : table.getPartitions()) { // check partition's replication num. if 1 replication. skip - if (table.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum() == (short) 1) { + if (table.getPartitionInfo().getReplicaAllocation( + partition.getId()).getTotalReplicaNum() == (short) 1) { LOG.debug("partition[{}]'s replication num is 1. ignore", partition.getId()); continue; } @@ -297,15 +298,18 @@ public class ConsistencyChecker extends MasterDaemon { Partition partition = (Partition) chosenOne; // sort materializedIndices - List visibleIndexes = partition.getMaterializedIndices(IndexExtState.VISIBLE); - Queue indexQueue = new PriorityQueue<>(Math.max(visibleIndexes.size(), 1), COMPARATOR); + List visibleIndexes + = partition.getMaterializedIndices(IndexExtState.VISIBLE); + Queue indexQueue + = new PriorityQueue<>(Math.max(visibleIndexes.size(), 1), COMPARATOR); indexQueue.addAll(visibleIndexes); while ((chosenOne = indexQueue.poll()) != null) { MaterializedIndex index = (MaterializedIndex) chosenOne; // sort tablets - Queue tabletQueue = new PriorityQueue<>(Math.max(index.getTablets().size(), 1), COMPARATOR); + Queue tabletQueue + = new PriorityQueue<>(Math.max(index.getTablets().size(), 1), COMPARATOR); tabletQueue.addAll(index.getTablets()); while ((chosenOne = tabletQueue.poll()) != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java index 8ea01770ca..d9c4c711bb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java @@ -578,7 +578,8 @@ public class InternalDataSource implements DataSourceIf { if (olapTable.getState() != OlapTableState.NORMAL) { throw new DdlException("The table [" + olapTable.getState() + "]'s state is " + olapTable.getState() + ", cannot be dropped." - + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + + " please cancel the operation on olap table firstly." + + " If you want to forcibly drop(cannot be recovered)," + " please use \"DROP table FORCE\"."); } } @@ -909,7 +910,8 @@ public class InternalDataSource implements DataSourceIf { if ((olapTable.getState() != OlapTableState.NORMAL)) { throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState() + ", cannot be dropped." - + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered)," + + " please cancel the operation on olap table firstly." + + " If you want to forcibly drop(cannot be recovered)," + " please use \"DROP table FORCE\"."); } } @@ -1839,7 +1841,8 @@ public class InternalDataSource implements DataSourceIf { if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { // if this is an unpartitioned table, we should analyze data property and replication num here. - // if this is a partitioned table, there properties are already analyzed in RangePartitionDesc analyze phase. + // if this is a partitioned table, there properties are already analyzed + // in RangePartitionDesc analyze phase. // use table name as this single partition name long partitionId = partitionNameToId.get(tableName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java index 08ccf49641..66c3a81df1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java @@ -366,7 +366,8 @@ public class DeployManager extends MasterDaemon { break BE_BLOCK; } LOG.debug("get remote backend hosts: {}", remoteBackendHosts); - List localBackends = Catalog.getCurrentSystemInfo().getClusterBackends(SystemInfoService.DEFAULT_CLUSTER); + List localBackends = Catalog.getCurrentSystemInfo() + .getClusterBackends(SystemInfoService.DEFAULT_CLUSTER); List> localBackendHosts = Lists.newArrayList(); for (Backend backend : localBackends) { localBackendHosts.add(Pair.create(backend.getHost(), backend.getHeartbeatPort())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java index 1504e7cb34..1893c49734 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.Logger; import java.util.List; import java.util.Map; +import java.util.Objects; /** * This class represents one node with the http and potential thrift publish address @@ -197,8 +198,8 @@ public class EsNodeInfo { if (hasThrift != nodeInfo.hasThrift) { return false; } - return (publishAddress != null ? publishAddress.equals(nodeInfo.publishAddress) : nodeInfo.publishAddress == null) - && (thriftAddress != null ? thriftAddress.equals(nodeInfo.thriftAddress) : nodeInfo.thriftAddress == null); + return (Objects.equals(publishAddress, nodeInfo.publishAddress)) + && (Objects.equals(thriftAddress, nodeInfo.thriftAddress)); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java index e92d06ec3f..2671485272 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java @@ -58,7 +58,8 @@ public class EsRepository extends MasterDaemon { } esTables.put(esTable.getId(), esTable); esClients.put(esTable.getId(), - new EsRestClient(esTable.getSeeds(), esTable.getUserName(), esTable.getPasswd(), esTable.isHttpSslEnabled())); + new EsRestClient(esTable.getSeeds(), esTable.getUserName(), esTable.getPasswd(), + esTable.isHttpSslEnabled())); LOG.info("register a new table [{}] to sync list", esTable); } @@ -74,7 +75,8 @@ public class EsRepository extends MasterDaemon { try { esTable.syncTableMetaData(esClients.get(esTable.getId())); } catch (Throwable e) { - LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster", esTable.getName(), e); + LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster", + esTable.getName(), e); esTable.setEsTablePartitions(null); esTable.setLastMetaDataSyncException(e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java index e2db385df2..7b967b0fcc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java @@ -82,7 +82,8 @@ public class EsShardPartitions { (JSONObject) jsonObject.get("nodes"))); } catch (Exception e) { LOG.error("fetch index [{}] shard partitions failure", indexName, e); - throw new DorisEsException("fetch [" + indexName + "] shard partitions failure [" + e.getMessage() + "]"); + throw new DorisEsException("fetch [" + indexName + + "] shard partitions failure [" + e.getMessage() + "]"); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java index dc1f6b60dc..4db3e96b0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java @@ -92,7 +92,8 @@ public class EsUtil { try { return Boolean.parseBoolean(property); } catch (Exception e) { - throw new DdlException(String.format("fail to parse %s, %s = %s, `%s` should be like 'true' or 'false', value should be double quotation marks", name, name, property, name)); + throw new DdlException(String.format("fail to parse %s, %s = %s, `%s` should be like 'true' or 'false', " + + "value should be double quotation marks", name, name, property, name)); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java index f736a9ee2f..680ab2d1ce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java @@ -92,7 +92,8 @@ public class MappingPhase implements SearchPhase { properties = (JSONObject) rootSchema.get("properties"); } if (properties == null) { - throw new DorisEsException("index[" + searchContext.sourceIndex() + "] type[" + searchContext.type() + "] mapping not found for the ES Cluster"); + throw new DorisEsException("index[" + searchContext.sourceIndex() + "] type[" + searchContext.type() + + "] mapping not found for the ES Cluster"); } for (Column col : searchContext.columns()) { String colName = col.getName(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java index 96fe439a19..322140fb65 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergCatalogMgr.java @@ -112,7 +112,8 @@ public class IcebergCatalogMgr { } if (!Enums.getIfPresent(IcebergCatalogMgr.CatalogType.class, icebergCatalogType).isPresent()) { - throw new DdlException("Unknown catalog type: " + icebergCatalogType + ". Current only support HiveCatalog."); + throw new DdlException("Unknown catalog type: " + icebergCatalogType + + ". Current only support HiveCatalog."); } // only check table property when it's an iceberg table diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergTableCreationRecordMgr.java b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergTableCreationRecordMgr.java index 1765c9946b..e726ef9fa3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergTableCreationRecordMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/IcebergTableCreationRecordMgr.java @@ -58,16 +58,17 @@ public class IcebergTableCreationRecordMgr extends MasterDaemon { // Iceberg databases, used to list remote iceberg tables // dbId -> database - private Map icebergDbs = new ConcurrentHashMap<>(); + private final Map icebergDbs = new ConcurrentHashMap<>(); // database -> table identifier -> properties // used to create table - private Map> dbToTableIdentifiers = Maps.newConcurrentMap(); + private final Map> dbToTableIdentifiers = Maps.newConcurrentMap(); // table creation records, used for show stmt // dbId -> tableId -> create msg - private Map> dbToTableToCreationRecord = Maps.newConcurrentMap(); + private final Map> dbToTableToCreationRecord = Maps.newConcurrentMap(); - private Queue tableCreationRecordQueue = new PriorityQueue<>(new TableCreationComparator()); - private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private final Queue tableCreationRecordQueue + = new PriorityQueue<>(new TableCreationComparator()); + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); public IcebergTableCreationRecordMgr() { @@ -199,8 +200,10 @@ public class IcebergTableCreationRecordMgr extends MasterDaemon { while (isQueueFull()) { IcebergTableCreationRecord record = tableCreationRecordQueue.poll(); if (record != null) { - Map tableRecords = dbToTableToCreationRecord.get(record.getDbId()); - Iterator> tableRecordsIterator = tableRecords.entrySet().iterator(); + Map tableRecords + = dbToTableToCreationRecord.get(record.getDbId()); + Iterator> tableRecordsIterator + = tableRecords.entrySet().iterator(); while (tableRecordsIterator.hasNext()) { long t = tableRecordsIterator.next().getKey(); if (t == record.getTableId()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java index 47c20b8505..ca79c3a6f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/config/WebConfigurer.java @@ -65,6 +65,7 @@ public class WebConfigurer implements WebMvcConfigurer { "/notFound")); }; } + @Bean(name = "multipartResolver") public CommonsMultipartResolver multipartResolver() { CommonsMultipartResolver multipartResolver = new CommonsMultipartResolver(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java index d831e34723..c58359d9b7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java @@ -60,7 +60,8 @@ public class BaseController { checkWithCookie(request, response, true); } - public ActionAuthorizationInfo checkWithCookie(HttpServletRequest request, HttpServletResponse response, boolean checkAuth) { + public ActionAuthorizationInfo checkWithCookie(HttpServletRequest request, + HttpServletResponse response, boolean checkAuth) { // First we check if the request has Authorization header. String encodedAuthString = request.getHeader("Authorization"); if (encodedAuthString != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java index c8f29cf30f..8b3802adfa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java @@ -113,24 +113,34 @@ public class HardwareInfoController { Util.sleep(1000); long[] ticks = processor.getSystemCpuLoadTicks(); processorInfo.add("CPU, IOWait, and IRQ ticks @ 1 sec:  " + Arrays.toString(ticks)); - long user = ticks[CentralProcessor.TickType.USER.getIndex()] - prevTicks[CentralProcessor.TickType.USER.getIndex()]; - long nice = ticks[CentralProcessor.TickType.NICE.getIndex()] - prevTicks[CentralProcessor.TickType.NICE.getIndex()]; - long sys = ticks[CentralProcessor.TickType.SYSTEM.getIndex()] - prevTicks[CentralProcessor.TickType.SYSTEM.getIndex()]; - long idle = ticks[CentralProcessor.TickType.IDLE.getIndex()] - prevTicks[CentralProcessor.TickType.IDLE.getIndex()]; - long iowait = ticks[CentralProcessor.TickType.IOWAIT.getIndex()] - prevTicks[CentralProcessor.TickType.IOWAIT.getIndex()]; - long irq = ticks[CentralProcessor.TickType.IRQ.getIndex()] - prevTicks[CentralProcessor.TickType.IRQ.getIndex()]; - long softirq = ticks[CentralProcessor.TickType.SOFTIRQ.getIndex()] - prevTicks[CentralProcessor.TickType.SOFTIRQ.getIndex()]; - long steal = ticks[CentralProcessor.TickType.STEAL.getIndex()] - prevTicks[CentralProcessor.TickType.STEAL.getIndex()]; + long user = ticks[CentralProcessor.TickType.USER.getIndex()] + - prevTicks[CentralProcessor.TickType.USER.getIndex()]; + long nice = ticks[CentralProcessor.TickType.NICE.getIndex()] + - prevTicks[CentralProcessor.TickType.NICE.getIndex()]; + long sys = ticks[CentralProcessor.TickType.SYSTEM.getIndex()] + - prevTicks[CentralProcessor.TickType.SYSTEM.getIndex()]; + long idle = ticks[CentralProcessor.TickType.IDLE.getIndex()] + - prevTicks[CentralProcessor.TickType.IDLE.getIndex()]; + long iowait = ticks[CentralProcessor.TickType.IOWAIT.getIndex()] + - prevTicks[CentralProcessor.TickType.IOWAIT.getIndex()]; + long irq = ticks[CentralProcessor.TickType.IRQ.getIndex()] + - prevTicks[CentralProcessor.TickType.IRQ.getIndex()]; + long softirq = ticks[CentralProcessor.TickType.SOFTIRQ.getIndex()] + - prevTicks[CentralProcessor.TickType.SOFTIRQ.getIndex()]; + long steal = ticks[CentralProcessor.TickType.STEAL.getIndex()] + - prevTicks[CentralProcessor.TickType.STEAL.getIndex()]; long totalCpu = user + nice + sys + idle + iowait + irq + softirq + steal; processorInfo.add(String.format( - "User: %.1f%% Nice: %.1f%% System: %.1f%% Idle: %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%", + "User: %.1f%% Nice: %.1f%% System: %.1f%% Idle:" + + " %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%", 100d * user / totalCpu, 100d * nice / totalCpu, 100d * sys / totalCpu, 100d * idle / totalCpu, 100d * iowait / totalCpu, 100d * irq / totalCpu, 100d * softirq / totalCpu, 100d * steal / totalCpu)); processorInfo.add(String.format("CPU load:   %.1f%%", processor.getSystemCpuLoadBetweenTicks(prevTicks) * 100)); double[] loadAverage = processor.getSystemLoadAverage(3); - processorInfo.add("CPU load averages:  " + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0])) + processorInfo.add("CPU load averages:  " + + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0])) + (loadAverage[1] < 0 ? " N/A" : String.format(" %.2f", loadAverage[1])) + (loadAverage[2] < 0 ? " N/A" : String.format(" %.2f", loadAverage[2]))); // per core CPU @@ -174,14 +184,16 @@ public class HardwareInfoController { private List getProcesses(OperatingSystem os, GlobalMemory memory) { List processInfo = new ArrayList<>(); - processInfo.add("Processes:   " + os.getProcessCount() + ", Threads:   " + os.getThreadCount()); + processInfo.add("Processes:   " + os.getProcessCount() + + ", Threads:   " + os.getThreadCount()); // Sort by highest CPU List procs = Arrays.asList(os.getProcesses(5, OperatingSystem.ProcessSort.CPU)); processInfo.add("         PID %CPU %MEM VSZ RSS Name"); for (int i = 0; i < procs.size() && i < 5; i++) { OSProcess p = procs.get(i); - processInfo.add(String.format("         %5d %5.1f %4.1f %9s %9s %s", p.getProcessID(), + processInfo.add(String.format("         %5d %5.1f %4.1f %9s %9s %s", + p.getProcessID(), 100d * (p.getKernelTime() + p.getUserTime()) / p.getUpTime(), 100d * p.getResidentSetSize() / memory.getTotal(), FormatUtil.formatBytes(p.getVirtualSize()), FormatUtil.formatBytes(p.getResidentSetSize()), p.getName())); @@ -194,7 +206,8 @@ public class HardwareInfoController { diskInfo.add("Disks:  "); for (HWDiskStore disk : diskStores) { boolean readwrite = disk.getReads() > 0 || disk.getWrites() > 0; - diskInfo.add(String.format("         %s: (model: %s - S/N: %s) size: %s, reads: %s (%s), writes: %s (%s), xfer: %s ms", + diskInfo.add(String.format("         %s:" + + " (model: %s - S/N: %s) size: %s, reads: %s (%s), writes: %s (%s), xfer: %s ms", disk.getName(), disk.getModel(), disk.getSerial(), disk.getSize() > 0 ? FormatUtil.formatBytesDecimal(disk.getSize()) : "?", readwrite ? disk.getReads() : "?", readwrite ? FormatUtil.formatBytes(disk.getReadBytes()) : "?", @@ -202,7 +215,8 @@ public class HardwareInfoController { readwrite ? disk.getTransferTime() : "?")); HWPartition[] partitions = disk.getPartitions(); for (HWPartition part : partitions) { - diskInfo.add(String.format("         |-- %s: %s (%s) Maj:Min=%d:%d, size: %s%s", part.getIdentification(), + diskInfo.add(String.format("        " + + " |-- %s: %s (%s) Maj:Min=%d:%d, size: %s%s", part.getIdentification(), part.getName(), part.getType(), part.getMajor(), part.getMinor(), FormatUtil.formatBytesDecimal(part.getSize()), part.getMountPoint().isEmpty() ? "" : " @ " + part.getMountPoint())); @@ -239,14 +253,20 @@ public class HardwareInfoController { List getNetwork = new ArrayList<>(); getNetwork.add("Network interfaces:  "); for (NetworkIF net : networkIFs) { - getNetwork.add(String.format("    Name: %s (%s)", net.getName(), net.getDisplayName())); - getNetwork.add(String.format("        MAC Address: %s", net.getMacaddr())); - getNetwork.add(String.format("        MTU: %s, Speed: %s", net.getMTU(), FormatUtil.formatValue(net.getSpeed(), "bps"))); - getNetwork.add(String.format("        IPv4: %s", Arrays.toString(net.getIPv4addr()))); - getNetwork.add(String.format("        IPv6: %s", Arrays.toString(net.getIPv6addr()))); + getNetwork.add(String.format("    Name: %s (%s)", + net.getName(), net.getDisplayName())); + getNetwork.add(String.format("        MAC Address: %s", + net.getMacaddr())); + getNetwork.add(String.format("        MTU: %s, Speed: %s", + net.getMTU(), FormatUtil.formatValue(net.getSpeed(), "bps"))); + getNetwork.add(String.format("        IPv4: %s", + Arrays.toString(net.getIPv4addr()))); + getNetwork.add(String.format("        IPv6: %s", + Arrays.toString(net.getIPv6addr()))); boolean hasData = net.getBytesRecv() > 0 || net.getBytesSent() > 0 || net.getPacketsRecv() > 0 || net.getPacketsSent() > 0; - getNetwork.add(String.format("        Traffic: received %s/%s%s; transmitted %s/%s%s", + getNetwork.add(String.format("        Traffic:" + + " received %s/%s%s; transmitted %s/%s%s", hasData ? net.getPacketsRecv() + " packets" : "?", hasData ? FormatUtil.formatBytes(net.getBytesRecv()) : "?", hasData ? " (" + net.getInErrors() + " err)" : "", @@ -260,11 +280,16 @@ public class HardwareInfoController { private List getNetworkParameters(NetworkParams networkParams) { List networkParameterInfo = new ArrayList<>(); networkParameterInfo.add("Network parameters:    "); - networkParameterInfo.add(String.format("        Host name: %s", networkParams.getHostName())); - networkParameterInfo.add(String.format("         Domain name: %s", networkParams.getDomainName())); - networkParameterInfo.add(String.format("         DNS servers: %s", Arrays.toString(networkParams.getDnsServers()))); - networkParameterInfo.add(String.format("         IPv4 Gateway: %s", networkParams.getIpv4DefaultGateway())); - networkParameterInfo.add(String.format("         IPv6 Gateway: %s", networkParams.getIpv6DefaultGateway())); + networkParameterInfo.add(String.format("        Host name: %s", + networkParams.getHostName())); + networkParameterInfo.add(String.format("         Domain name: %s", + networkParams.getDomainName())); + networkParameterInfo.add(String.format("         DNS servers: %s", + Arrays.toString(networkParams.getDnsServers()))); + networkParameterInfo.add(String.format("         IPv4 Gateway: %s", + networkParams.getIpv4DefaultGateway())); + networkParameterInfo.add(String.format("         IPv6 Gateway: %s", + networkParams.getIpv6DefaultGateway())); return networkParameterInfo; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/ResponseEntityBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/ResponseEntityBuilder.java index 52db10efae..3056494550 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/ResponseEntityBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/entity/ResponseEntityBuilder.java @@ -58,7 +58,8 @@ public class ResponseEntityBuilder { } public static ResponseEntity internalError(Object data) { - ResponseBody body = new ResponseBody().code(RestApiStatusCode.INTERNAL_SERVER_ERROR).msg("Internal Error").data(data); + ResponseBody body = new ResponseBody().code(RestApiStatusCode.INTERNAL_SERVER_ERROR) + .msg("Internal Error").data(data); return ResponseEntity.status(HttpStatus.OK).body(body); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java index 554f003478..a857834f60 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java @@ -49,10 +49,12 @@ public class AuthInterceptor extends BaseController implements HandlerIntercepto } @Override - public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception { + public void postHandle(HttpServletRequest request, HttpServletResponse response, + Object handler, ModelAndView modelAndView) throws Exception { } @Override - public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception { + public void afterCompletion(HttpServletRequest request, HttpServletResponse response, + Object handler, Exception ex) throws Exception { } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java index 379a33eea4..7eb724d4d8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/ServletTraceIterceptor.java @@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletResponse; public class ServletTraceIterceptor implements Filter { private static final Logger LOG = LogManager.getLogger(ServletTraceIterceptor.class); + @Override public void init(FilterConfig filterConfig) throws ServletException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/CheckDecommissionAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/CheckDecommissionAction.java index f24f852b9d..a2fded9481 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/CheckDecommissionAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/CheckDecommissionAction.java @@ -83,7 +83,8 @@ public class CheckDecommissionAction extends RestBaseController { try { List backends = SystemHandler.checkDecommission(hostPortPairs); - List backendsList = backends.stream().map(b -> b.getHost() + ":" + b.getHeartbeatPort()).collect(Collectors.toList()); + List backendsList = backends.stream().map(b -> b.getHost() + ":" + + b.getHeartbeatPort()).collect(Collectors.toList()); return ResponseEntityBuilder.ok(backendsList); } catch (DdlException e) { return ResponseEntityBuilder.okWithCommonError(e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/GetDdlStmtAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/GetDdlStmtAction.java index fc64544803..601adfd60e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/GetDdlStmtAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/GetDdlStmtAction.java @@ -76,7 +76,8 @@ public class GetDdlStmtAction extends RestBaseController { table.readLock(); try { - Catalog.getDdlStmt(table, createTableStmt, addPartitionStmt, createRollupStmt, true, false /* show password */); + Catalog.getDdlStmt(table, createTableStmt, addPartitionStmt, + createRollupStmt, true, false /* show password */); } finally { table.readUnlock(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java index 72b088300e..9fad1da2da 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/LoadAction.java @@ -60,7 +60,8 @@ public class LoadAction extends RestBaseController { public Object load(HttpServletRequest request, HttpServletResponse response, @PathVariable(value = DB_KEY) String db, @PathVariable(value = TABLE_KEY) String table) { if (Config.disable_mini_load) { - ResponseEntity entity = ResponseEntityBuilder.notFound("The mini load operation has been disabled by default, if you need to add disable_mini_load=false in fe.conf."); + ResponseEntity entity = ResponseEntityBuilder.notFound("The mini load operation has been" + + " disabled by default, if you need to add disable_mini_load=false in fe.conf."); return entity; } else { executeCheckPassword(request, response); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java index 7354ca0a33..d48dcecd6b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/RestBaseController.java @@ -155,7 +155,8 @@ public class RestBaseController extends BaseController { } } - public void writeFileResponse(HttpServletRequest request, HttpServletResponse response, File imageFile) throws IOException { + public void writeFileResponse(HttpServletRequest request, + HttpServletResponse response, File imageFile) throws IOException { Preconditions.checkArgument(imageFile != null && imageFile.exists()); response.setHeader("Content-type", "application/octet-stream"); response.addHeader("Content-Disposition", "attachment;fileName=" + imageFile.getName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableQueryPlanAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableQueryPlanAction.java index 302ef1b4e1..61ca6bc947 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableQueryPlanAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableQueryPlanAction.java @@ -78,7 +78,8 @@ import javax.servlet.http.HttpServletResponse; public class TableQueryPlanAction extends RestBaseController { public static final Logger LOG = LogManager.getLogger(TableQueryPlanAction.class); - @RequestMapping(path = "/api/{" + DB_KEY + "}/{" + TABLE_KEY + "}/_query_plan", method = {RequestMethod.GET, RequestMethod.POST}) + @RequestMapping(path = "/api/{" + DB_KEY + "}/{" + TABLE_KEY + "}/_query_plan", + method = {RequestMethod.GET, RequestMethod.POST}) public Object query_plan( @PathVariable(value = DB_KEY) final String dbName, @PathVariable(value = TABLE_KEY) final String tblName, @@ -160,29 +161,34 @@ public class TableQueryPlanAction extends RestBaseController { StatementBase query = stmtExecutor.getParsedStmt(); // only process select semantic if (!(query instanceof SelectStmt)) { - throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, "Select statement needed, but found [" + sql + " ]"); + throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, + "Select statement needed, but found [" + sql + " ]"); } SelectStmt stmt = (SelectStmt) query; // just only process sql like `select * from table where `, only support executing scan semantic if (stmt.hasAggInfo() || stmt.hasAnalyticInfo() || stmt.hasOrderByClause() || stmt.hasOffset() || stmt.hasLimit() || stmt.isExplain()) { - throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, "only support single table filter-prune-scan, but found [ " + sql + "]"); + throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, + "only support single table filter-prune-scan, but found [ " + sql + "]"); } // process only one table by one http query List fromTables = stmt.getTableRefs(); if (fromTables.size() != 1) { - throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, "Select statement must have only one table"); + throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, + "Select statement must have only one table"); } TableRef fromTable = fromTables.get(0); if (fromTable instanceof InlineViewRef) { - throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, "Select statement must not embed another statement"); + throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, + "Select statement must not embed another statement"); } // check consistent http requested resource with sql referenced // if consistent in this way, can avoid check privilege TableName tableAndDb = fromTables.get(0).getName(); if (!(tableAndDb.getDb().equals(requestDb) && tableAndDb.getTbl().equals(requestTable))) { - throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, "requested database and table must consistent with sql: request [ " + throw new DorisHttpException(HttpResponseStatus.BAD_REQUEST, + "requested database and table must consistent with sql: request [ " + requestDb + "." + requestTable + "]" + "and sql [" + tableAndDb.toString() + "]"); } @@ -192,13 +198,15 @@ public class TableQueryPlanAction extends RestBaseController { // in this way, just retrieve only one scannode List scanNodes = planner.getScanNodes(); if (scanNodes.size() != 1) { - throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Planner should plan just only one ScanNode but found [ " + scanNodes.size() + "]"); + throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Planner should plan just only one ScanNode but found [ " + scanNodes.size() + "]"); } List scanRangeLocations = scanNodes.get(0).getScanRangeLocations(0); // acquire the PlanFragment which the executable template List fragments = planner.getFragments(); if (fragments.size() != 1) { - throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Planner should plan just only one PlanFragment but found [ " + fragments.size() + "]"); + throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Planner should plan just only one PlanFragment but found [ " + fragments.size() + "]"); } TQueryPlanInfo tQueryPlanInfo = new TQueryPlanInfo(); @@ -234,7 +242,8 @@ public class TableQueryPlanAction extends RestBaseController { byte[] queryPlanStream = serializer.serialize(tQueryPlanInfo); opaquedQueryPlan = Base64.getEncoder().encodeToString(queryPlanStream); } catch (TException e) { - throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, "TSerializer failed to serialize PlanFragment, reason [ " + e.getMessage() + " ]"); + throw new DorisHttpException(HttpResponseStatus.INTERNAL_SERVER_ERROR, + "TSerializer failed to serialize PlanFragment, reason [ " + e.getMessage() + " ]"); } result.put("partitions", tabletRoutings); result.put("opaqued_query_plan", opaquedQueryPlan); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableSchemaAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableSchemaAction.java index a2a8ff4fea..94f6c8e578 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableSchemaAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/TableSchemaAction.java @@ -88,7 +88,8 @@ public class TableSchemaAction extends RestBaseController { baseInfo.put("comment", column.getComment()); baseInfo.put("name", column.getDisplayName()); Optional aggregationType = Optional.ofNullable(column.getAggregationType()); - baseInfo.put("aggregation_type", aggregationType.isPresent() ? column.getAggregationType().toSql() : ""); + baseInfo.put("aggregation_type", aggregationType.isPresent() + ? column.getAggregationType().toSql() : ""); propList.add(baseInfo); } resultMap.put("status", 200); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/UploadAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/UploadAction.java index adb9d7d9d3..8431bd5195 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/UploadAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/UploadAction.java @@ -67,7 +67,8 @@ public class UploadAction extends RestBaseController { * @param response * @return */ - @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", method = {RequestMethod.POST}) + @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", + method = {RequestMethod.POST}) public Object upload( @PathVariable(value = NS_KEY) String ns, @PathVariable(value = DB_KEY) String dbName, @@ -119,7 +120,8 @@ public class UploadAction extends RestBaseController { * @param response * @return */ - @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", method = {RequestMethod.PUT}) + @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", + method = {RequestMethod.PUT}) public Object submit( @PathVariable(value = NS_KEY) String ns, @PathVariable(value = DB_KEY) String dbName, @@ -152,7 +154,8 @@ public class UploadAction extends RestBaseController { } Preconditions.checkNotNull(tmpFile, fileIdStr); - LoadContext loadContext = new LoadContext(request, dbName, tblName, authInfo.fullUserName, authInfo.password, tmpFile); + LoadContext loadContext = new LoadContext(request, dbName, tblName, + authInfo.fullUserName, authInfo.password, tmpFile); Future future = loadSubmitter.submit(loadContext); try { @@ -173,7 +176,8 @@ public class UploadAction extends RestBaseController { * @param response * @return */ - @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", method = {RequestMethod.GET}) + @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", + method = {RequestMethod.GET}) public Object list( @PathVariable(value = NS_KEY) String ns, @PathVariable(value = DB_KEY) String dbName, @@ -216,7 +220,8 @@ public class UploadAction extends RestBaseController { } } - @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", method = {RequestMethod.DELETE}) + @RequestMapping(path = "/api/{" + NS_KEY + "}/{" + DB_KEY + "}/{" + TABLE_KEY + "}/upload", + method = {RequestMethod.DELETE}) public Object delete( @PathVariable(value = NS_KEY) String ns, @PathVariable(value = DB_KEY) String dbName, @@ -273,7 +278,8 @@ public class UploadAction extends RestBaseController { public String fuzzyParse; - public LoadContext(HttpServletRequest request, String db, String tbl, String user, String passwd, TmpFileMgr.TmpFile file) { + public LoadContext(HttpServletRequest request, String db, + String tbl, String user, String passwd, TmpFileMgr.TmpFile file) { this.db = db; this.tbl = tbl; this.user = user; diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java index 3f1469a86e..bedc21ef5c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java @@ -96,7 +96,7 @@ public class NodeAction extends RestBaseController { .build(); private Object httpExecutorLock = new Object(); - private volatile static ExecutorService httpExecutor = null; + private static volatile ExecutorService httpExecutor = null; // Returns all fe information, similar to 'show frontends'. @RequestMapping(path = "/frontends", method = RequestMethod.GET) @@ -535,8 +535,8 @@ public class NodeAction extends RestBaseController { if (jsonObject.get("code").getAsInt() != HttpUtils.REQUEST_SUCCESS_CODE) { throw new Exception(jsonObject.get("msg").getAsString()); } - SetConfigAction.SetConfigEntity setConfigEntity = GsonUtils.GSON.fromJson(jsonObject.get("data").getAsJsonObject(), - SetConfigAction.SetConfigEntity.class); + SetConfigAction.SetConfigEntity setConfigEntity = GsonUtils.GSON.fromJson( + jsonObject.get("data").getAsJsonObject(), SetConfigAction.SetConfigEntity.class); for (SetConfigAction.ErrConfig errConfig : setConfigEntity.getErrConfigs()) { Map failed = Maps.newHashMap(); addFailedConfig(errConfig.getConfigName(), errConfig.getConfigValue(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/ImportAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/ImportAction.java index a52362c451..a82cf4aa37 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/ImportAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/ImportAction.java @@ -72,7 +72,8 @@ public class ImportAction { * } */ @RequestMapping(path = "/api/import/file_review", method = RequestMethod.POST) - public Object fileReview(@RequestBody FileReviewRequestVo body, HttpServletRequest request, HttpServletResponse response) { + public Object fileReview(@RequestBody FileReviewRequestVo body, + HttpServletRequest request, HttpServletResponse response) { FileInfo fileInfo = body.getFileInfo(); ConnectInfo connectInfo = body.getConnectInfo(); BrokerDesc brokerDesc = new BrokerDesc(connectInfo.getBrokerName(), connectInfo.getBrokerProps()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java index 1627b75513..293f250f99 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/LoadSubmitter.java @@ -83,11 +83,13 @@ public class LoadSubmitter { // choose a backend to submit the stream load Backend be = selectOneBackend(); - String loadUrlStr = String.format(STREAM_LOAD_URL_PATTERN, be.getHost(), be.getHttpPort(), loadContext.db, loadContext.tbl); + String loadUrlStr = String.format(STREAM_LOAD_URL_PATTERN, be.getHost(), + be.getHttpPort(), loadContext.db, loadContext.tbl); URL loadUrl = new URL(loadUrlStr); HttpURLConnection conn = (HttpURLConnection) loadUrl.openConnection(); conn.setRequestMethod("PUT"); - String auth = String.format("%s:%s", ClusterNamespace.getNameFromFullName(loadContext.user), loadContext.passwd); + String auth = String.format("%s:%s", ClusterNamespace.getNameFromFullName(loadContext.user), + loadContext.passwd); String authEncoding = Base64.getEncoder().encodeToString(auth.getBytes(StandardCharsets.UTF_8)); conn.setRequestProperty("Authorization", "Basic " + authEncoding); conn.addRequestProperty("Expect", "100-continue"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java index e29db73d6f..c96879741c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/StatementSubmitter.java @@ -20,7 +20,6 @@ package org.apache.doris.httpv2.util; import org.apache.doris.analysis.DdlStmt; import org.apache.doris.analysis.ExportStmt; -import org.apache.doris.analysis.InsertStmt; import org.apache.doris.analysis.QueryStmt; import org.apache.doris.analysis.ShowStmt; import org.apache.doris.analysis.SqlParser; @@ -100,14 +99,15 @@ public class StatementSubmitter { conn = DriverManager.getConnection(dbUrl, queryCtx.user, queryCtx.passwd); long startTime = System.currentTimeMillis(); if (stmtBase instanceof QueryStmt || stmtBase instanceof ShowStmt) { - stmt = conn.prepareStatement(queryCtx.stmt, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + stmt = conn.prepareStatement( + queryCtx.stmt, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); // set fetch size to 1 to enable streaming result set to avoid OOM. ((PreparedStatement) stmt).setFetchSize(1); ResultSet rs = ((PreparedStatement) stmt).executeQuery(); ExecutionResultSet resultSet = generateResultSet(rs, startTime); rs.close(); return resultSet; - } else if (stmtBase instanceof InsertStmt || stmtBase instanceof DdlStmt || stmtBase instanceof ExportStmt) { + } else if (stmtBase instanceof DdlStmt || stmtBase instanceof ExportStmt) { stmt = conn.createStatement(); stmt.execute(queryCtx.stmt); ExecutionResultSet resultSet = generateExecStatus(startTime); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java index 302341452a..dcdbc4bac6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/util/TmpFileMgr.java @@ -88,7 +88,8 @@ public class TmpFileMgr { */ public synchronized TmpFile upload(UploadFile uploadFile) throws TmpFileException { if (uploadFile.file.getSize() > MAX_SINGLE_FILE_SIZE) { - throw new TmpFileException("File size " + uploadFile.file.getSize() + " exceed limit " + MAX_SINGLE_FILE_SIZE); + throw new TmpFileException("File size " + uploadFile.file.getSize() + + " exceed limit " + MAX_SINGLE_FILE_SIZE); } if (totalFileSize + uploadFile.file.getSize() > MAX_TOTAL_FILE_SIZE_BYTES) { @@ -204,7 +205,8 @@ public class TmpFileMgr { // make a copy without lines and maxColNum. // so that can call `setPreview` and will not affect other instance public TmpFile copy() { - TmpFile copiedFile = new TmpFile(this.id, this.uuid, this.originFileName, this.fileSize, this.columnSeparator); + TmpFile copiedFile = new TmpFile(this.id, this.uuid, this.originFileName, + this.fileSize, this.columnSeparator); copiedFile.absPath = this.absPath; return copiedFile; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBDebugger.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBDebugger.java index cbaf1d7757..313f4becb3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBDebugger.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBDebugger.java @@ -89,7 +89,8 @@ public class BDBDebugger { httpServer.start(); // MySQl server - QeService qeService = new QeService(Config.query_port, Config.mysql_service_nio_enabled, ExecuteEnv.getInstance().getScheduler()); + QeService qeService = new QeService(Config.query_port, + Config.mysql_service_nio_enabled, ExecuteEnv.getInstance().getScheduler()); qeService.start(); ThreadPoolManager.registerAllThreadPoolMetric(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java index 638a0204d4..ab8e7fb12c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java @@ -92,8 +92,8 @@ public class BDBEnvironment { LOG.error("Current node is not in the electable_nodes list. will exit"); System.exit(-1); } - DbResetRepGroup resetUtility = new DbResetRepGroup(envHome, PALO_JOURNAL_GROUP, selfNodeName, - selfNodeHostPort); + DbResetRepGroup resetUtility = new DbResetRepGroup( + envHome, PALO_JOURNAL_GROUP, selfNodeName, selfNodeHostPort); resetUtility.reset(); LOG.info("group has been reset."); } @@ -108,8 +108,10 @@ public class BDBEnvironment { replicationConfig.setMaxClockDelta(Config.max_bdbje_clock_delta_ms, TimeUnit.MILLISECONDS); replicationConfig.setConfigParam(ReplicationConfig.TXN_ROLLBACK_LIMIT, String.valueOf(Config.txn_rollback_limit)); - replicationConfig.setConfigParam(ReplicationConfig.REPLICA_TIMEOUT, Config.bdbje_heartbeat_timeout_second + " s"); - replicationConfig.setConfigParam(ReplicationConfig.FEEDER_TIMEOUT, Config.bdbje_heartbeat_timeout_second + " s"); + replicationConfig.setConfigParam(ReplicationConfig.REPLICA_TIMEOUT, + Config.bdbje_heartbeat_timeout_second + " s"); + replicationConfig.setConfigParam(ReplicationConfig.FEEDER_TIMEOUT, + Config.bdbje_heartbeat_timeout_second + " s"); if (isElectable) { replicationConfig.setReplicaAckTimeout(Config.bdbje_replica_ack_timeout_second, TimeUnit.SECONDS); diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java index 7c1f26f9de..053c10e348 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java @@ -43,7 +43,7 @@ import java.util.List; public class LdapClient { private static final Logger LOG = LogManager.getLogger(LdapClient.class); - private volatile static ClientInfo clientInfo; + private static volatile ClientInfo clientInfo; @Data private static class ClientInfo { @@ -172,9 +172,8 @@ public class LdapClient { } private static String getUserDn(String userName) { - List userDns = getDn(org.springframework.ldap.query.LdapQueryBuilder.query(). - base(LdapConfig.ldap_user_basedn) - .filter(getUserFilter(LdapConfig.ldap_user_filter, userName))); + List userDns = getDn(org.springframework.ldap.query.LdapQueryBuilder.query() + .base(LdapConfig.ldap_user_basedn).filter(getUserFilter(LdapConfig.ldap_user_filter, userName))); if (userDns == null || userDns.isEmpty()) { return null; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapPrivsChecker.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapPrivsChecker.java index 62c4897adb..a26c3168ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapPrivsChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapPrivsChecker.java @@ -38,9 +38,9 @@ import org.apache.logging.log4j.Logger; import java.util.Map; /** - * If the user logs in with LDAP authentication, the user LDAP group privileges will be saved in 'ldapGroupsPrivs' of ConnectContext. - * When checking user privileges, Doris need to check both the privileges granted by Doris and LDAP group privileges. - * This class is used for checking current user LDAP group privileges. + * If the user logs in with LDAP authentication, the user LDAP group privileges will be saved in 'ldapGroupsPrivs' of + * ConnectContext. When checking user privileges, Doris need to check both the privileges granted by Doris + * and LDAP group privileges. This class is used for checking current user LDAP group privileges. */ public class LdapPrivsChecker { private static final Logger LOG = LogManager.getLogger(LdapPrivsChecker.class); @@ -145,13 +145,14 @@ public class LdapPrivsChecker { } } - private static void getCurrentUserResourcePrivs(UserIdentity currentUser, String resourceName, PrivBitSet savedPrivs, - PaloAuth.PrivLevel level) { + private static void getCurrentUserResourcePrivs(UserIdentity currentUser, + String resourceName, PrivBitSet savedPrivs, PaloAuth.PrivLevel level) { if (!hasLdapPrivs(currentUser)) { return; } PaloRole currentUserLdapPrivs = ConnectContext.get().getLdapGroupsPrivs(); - for (Map.Entry entry : currentUserLdapPrivs.getResourcePatternToPrivs().entrySet()) { + for (Map.Entry entry + : currentUserLdapPrivs.getResourcePatternToPrivs().entrySet()) { switch (entry.getKey().getPrivLevel()) { case GLOBAL: if (level.equals(PaloAuth.PrivLevel.GLOBAL)) { @@ -192,7 +193,8 @@ public class LdapPrivsChecker { } PaloRole currentUserLdapPrivs = ConnectContext.get().getLdapGroupsPrivs(); for (Map.Entry entry : currentUserLdapPrivs.getTblPatternToPrivs().entrySet()) { - if (entry.getKey().getPrivLevel().equals(PaloAuth.PrivLevel.TABLE) && entry.getKey().getQualifiedDb().equals(db)) { + if (entry.getKey().getPrivLevel().equals(PaloAuth.PrivLevel.TABLE) + && entry.getKey().getQualifiedDb().equals(db)) { return true; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java index ab402d728c..c379b268a2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java @@ -174,7 +174,8 @@ public class BrokerFileGroup implements Writable { for (String pName : partitionNames.getPartitionNames()) { Partition partition = olapTable.getPartition(pName, partitionNames.isTemp()); if (partition == null) { - throw new DdlException("Unknown partition '" + pName + "' in table '" + olapTable.getName() + "'"); + throw new DdlException("Unknown partition '" + pName + + "' in table '" + olapTable.getName() + "'"); } partitionIds.add(partition.getId()); } @@ -257,7 +258,8 @@ public class BrokerFileGroup implements Writable { jsonPaths = dataDescription.getJsonPaths(); jsonRoot = dataDescription.getJsonRoot(); fuzzyParse = dataDescription.isFuzzyParse(); - // For broker load, we only support reading json format data line by line, so we set readJsonByLine to true here. + // For broker load, we only support reading json format data line by line, + // so we set readJsonByLine to true here. readJsonByLine = true; numAsString = dataDescription.isNumAsString(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java index 2a1cf4ff0b..144ab8f3ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroupAggInfo.java @@ -178,8 +178,10 @@ public class BrokerFileGroupAggInfo implements Writable { if (fileGroupList == null) { // check if there are overlapping partitions of same table if (tableIdToPartitionIds.containsKey(fileGroup.getTableId()) - && tableIdToPartitionIds.get(fileGroup.getTableId()).stream().anyMatch(id -> fileGroup.getPartitionIds().contains(id))) { - throw new DdlException("There are overlapping partitions of same table in data description of load job stmt"); + && tableIdToPartitionIds.get(fileGroup.getTableId()).stream() + .anyMatch(id -> fileGroup.getPartitionIds().contains(id))) { + throw new DdlException("There are overlapping partitions of same table" + + " in data description of load job stmt"); } fileGroupList = Lists.newArrayList(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java index ef7bd7e973..d25bc8b2b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java @@ -102,13 +102,13 @@ public class DeleteHandler implements Writable { private static final Logger LOG = LogManager.getLogger(DeleteHandler.class); // TransactionId -> DeleteJob - private Map idToDeleteJob; + private final Map idToDeleteJob; // Db -> DeleteInfo list @SerializedName(value = "dbToDeleteInfos") - private Map> dbToDeleteInfos; + private final Map> dbToDeleteInfos; - private ReentrantReadWriteLock lock; + private final ReentrantReadWriteLock lock; public DeleteHandler() { idToDeleteJob = Maps.newConcurrentMap(); @@ -164,7 +164,8 @@ public class DeleteHandler implements Writable { || olapTable.getPartitionInfo().getType() == PartitionType.LIST) { if (!ConnectContext.get().getSessionVariable().isDeleteWithoutPartition()) { throw new DdlException("This is a range or list partitioned table." - + " You should specify partition in delete stmt, or set delete_without_partition to true"); + + " You should specify partition in delete stmt," + + " or set delete_without_partition to true"); } else { partitionNames.addAll(olapTable.getPartitionNames()); } @@ -205,12 +206,14 @@ public class DeleteHandler implements Writable { DeleteInfo deleteInfo = new DeleteInfo(db.getId(), olapTable.getId(), tableName, deleteConditions); - deleteInfo.setPartitions(noPartitionSpecified, partitions.stream().map(p -> p.getId()).collect(Collectors.toList()), partitionNames); + deleteInfo.setPartitions(noPartitionSpecified, partitions.stream().map(Partition::getId) + .collect(Collectors.toList()), partitionNames); deleteJob = new DeleteJob(jobId, transactionId, label, partitionReplicaNum, deleteInfo); idToDeleteJob.put(deleteJob.getTransactionId(), deleteJob); Catalog.getCurrentGlobalTransactionMgr().getCallbackFactory().addCallback(deleteJob); - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), transactionId); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(db.getId(), transactionId); // must call this to make sure we only handle the tablet in the mIndex we saw here. // table may be under schema changge or rollup, and the newly created tablets will not be checked later, // to make sure that the delete transaction can be done successfully. @@ -256,7 +259,8 @@ public class DeleteHandler implements Writable { true, TPriority.NORMAL, TTaskType.REALTIME_PUSH, transactionId, - Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId()); + Catalog.getCurrentGlobalTransactionMgr() + .getTransactionIDGenerator().getNextTransactionId()); pushTask.setIsSchemaChanging(false); pushTask.setCountDownLatch(countDownLatch); @@ -315,7 +319,8 @@ public class DeleteHandler implements Writable { DeleteState state = deleteJob.getState(); switch (state) { case UN_QUORUM: - LOG.warn("delete job timeout: transactionId {}, timeout {}, {}", transactionId, timeoutMs, errMsg); + LOG.warn("delete job timeout: transactionId {}, timeout {}, {}", + transactionId, timeoutMs, errMsg); cancelJob(deleteJob, CancelType.TIMEOUT, "delete job timeout"); throw new DdlException("failed to execute delete. transaction id " + transactionId + ", timeout(ms) " + timeoutMs + ", " + errMsg); @@ -325,11 +330,13 @@ public class DeleteHandler implements Writable { long nowQuorumTimeMs = System.currentTimeMillis(); long endQuorumTimeoutMs = nowQuorumTimeMs + timeoutMs / 2; // if job's state is quorum_finished then wait for a period of time and commit it. - while (deleteJob.getState() == DeleteState.QUORUM_FINISHED && endQuorumTimeoutMs > nowQuorumTimeMs) { + while (deleteJob.getState() == DeleteState.QUORUM_FINISHED + && endQuorumTimeoutMs > nowQuorumTimeMs) { deleteJob.checkAndUpdateQuorum(); Thread.sleep(1000); nowQuorumTimeMs = System.currentTimeMillis(); - LOG.debug("wait for quorum finished delete job: {}, txn id: {}" + deleteJob.getId(), transactionId); + LOG.debug("wait for quorum finished delete job: {}, txn id: {}", + deleteJob.getId(), transactionId); } } catch (MetaNotFoundException e) { cancelJob(deleteJob, CancelType.METADATA_MISSING, e.getMessage()); @@ -354,12 +361,13 @@ public class DeleteHandler implements Writable { } } - private void commitJob(DeleteJob job, Database db, Table table, long timeoutMs) throws DdlException, QueryStateException { + private void commitJob(DeleteJob job, Database db, Table table, long timeoutMs) + throws DdlException, QueryStateException { TransactionStatus status = null; try { unprotectedCommitJob(job, db, table, timeoutMs); - status = Catalog.getCurrentGlobalTransactionMgr(). - getTransactionState(db.getId(), job.getTransactionId()).getTransactionStatus(); + status = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(db.getId(), job.getTransactionId()).getTransactionStatus(); } catch (UserException e) { if (cancelJob(job, CancelType.COMMIT_FAIL, e.getMessage())) { throw new DdlException(e.getMessage(), e); @@ -415,7 +423,8 @@ public class DeleteHandler implements Writable { tabletCommitInfos.add(new TabletCommitInfo(tabletId, replica.getBackendId())); } } - return globalTransactionMgr.commitAndPublishTransaction(db, Lists.newArrayList(table), transactionId, tabletCommitInfos, timeoutMs); + return globalTransactionMgr.commitAndPublishTransaction(db, Lists.newArrayList(table), + transactionId, tabletCommitInfos, timeoutMs); } /** @@ -467,18 +476,23 @@ public class DeleteHandler implements Writable { * @return */ public boolean cancelJob(DeleteJob job, CancelType cancelType, String reason) { - LOG.info("start to cancel delete job, transactionId: {}, cancelType: {}", job.getTransactionId(), cancelType.name()); + LOG.info("start to cancel delete job, transactionId: {}, cancelType: {}", + job.getTransactionId(), cancelType.name()); GlobalTransactionMgr globalTransactionMgr = Catalog.getCurrentGlobalTransactionMgr(); try { if (job != null) { globalTransactionMgr.abortTransaction(job.getDeleteInfo().getDbId(), job.getTransactionId(), reason); } } catch (Exception e) { - TransactionState state = globalTransactionMgr.getTransactionState(job.getDeleteInfo().getDbId(), job.getTransactionId()); + TransactionState state = globalTransactionMgr.getTransactionState( + job.getDeleteInfo().getDbId(), job.getTransactionId()); if (state == null) { - LOG.warn("cancel delete job failed because txn not found, transactionId: {}", job.getTransactionId()); - } else if (state.getTransactionStatus() == TransactionStatus.COMMITTED || state.getTransactionStatus() == TransactionStatus.VISIBLE) { - LOG.warn("cancel delete job {} failed because it has been committed, transactionId: {}", job.getTransactionId()); + LOG.warn("cancel delete job failed because txn not found, transactionId: {}", + job.getTransactionId()); + } else if (state.getTransactionStatus() == TransactionStatus.COMMITTED + || state.getTransactionStatus() == TransactionStatus.VISIBLE) { + LOG.warn("cancel delete job failed because it has been committed, transactionId: {}", + job.getTransactionId()); return false; } else { LOG.warn("errors while abort transaction", e); @@ -506,9 +520,9 @@ public class DeleteHandler implements Writable { return slotRef; } - private void checkDeleteV2(OlapTable table, List partitions, List conditions, List deleteConditions) + private void checkDeleteV2(OlapTable table, List partitions, + List conditions, List deleteConditions) throws DdlException { - // check condition column is key column and condition value // Here we use "getFullSchema()" to get all columns including VISIBLE and SHADOW columns Map nameToColumn = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); @@ -524,7 +538,8 @@ public class DeleteHandler implements Writable { } if (Column.isShadowColumn(columnName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Can not apply delete condition to shadow column"); + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "Can not apply delete condition to shadow column"); } // Check if this column is under schema change, if yes, there will be a shadow column related to it. @@ -559,10 +574,12 @@ public class DeleteHandler implements Writable { } else if (value.toLowerCase().equals("false")) { binaryPredicate.setChild(1, LiteralExpr.create("0", Type.TINYINT)); } - } else if (column.getDataType() == PrimitiveType.DATE || column.getDataType() == PrimitiveType.DATETIME) { + } else if (column.getDataType() == PrimitiveType.DATE + || column.getDataType() == PrimitiveType.DATETIME) { DateLiteral dateLiteral = new DateLiteral(value, Type.fromPrimitiveType(column.getDataType())); value = dateLiteral.getStringValue(); - binaryPredicate.setChild(1, LiteralExpr.create(value, Type.fromPrimitiveType(column.getDataType()))); + binaryPredicate.setChild(1, LiteralExpr.create(value, + Type.fromPrimitiveType(column.getDataType()))); } LiteralExpr.create(value, Type.fromPrimitiveType(column.getDataType())); } catch (AnalysisException e) { @@ -575,12 +592,16 @@ public class DeleteHandler implements Writable { InPredicate inPredicate = (InPredicate) condition; for (int i = 1; i <= inPredicate.getInElementNum(); i++) { value = ((LiteralExpr) inPredicate.getChild(i)).getStringValue(); - if (column.getDataType() == PrimitiveType.DATE || column.getDataType() == PrimitiveType.DATETIME) { - DateLiteral dateLiteral = new DateLiteral(value, Type.fromPrimitiveType(column.getDataType())); + if (column.getDataType() == PrimitiveType.DATE + || column.getDataType() == PrimitiveType.DATETIME) { + DateLiteral dateLiteral = new DateLiteral(value, + Type.fromPrimitiveType(column.getDataType())); value = dateLiteral.getStringValue(); - inPredicate.setChild(i, LiteralExpr.create(value, Type.fromPrimitiveType(column.getDataType()))); + inPredicate.setChild(i, LiteralExpr.create(value, + Type.fromPrimitiveType(column.getDataType()))); } else { - LiteralExpr.create(value, Type.fromPrimitiveType(column.getDataType())); + LiteralExpr.create(value, + Type.fromPrimitiveType(column.getDataType())); } } } catch (AnalysisException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java index b462d31cfa..c53b731025 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java @@ -91,7 +91,8 @@ public class DeleteJob extends AbstractTxnStateChangeCallback { Short replicaNum = partitionReplicaNum.get(tDeleteInfo.getPartitionId()); if (replicaNum == null) { // should not happen - throw new MetaNotFoundException("Unknown partition " + tDeleteInfo.getPartitionId() + " when commit delete job"); + throw new MetaNotFoundException("Unknown partition " + + tDeleteInfo.getPartitionId() + " when commit delete job"); } if (tDeleteInfo.getFinishedReplicas().size() == replicaNum) { finishedTablets.add(tDeleteInfo.getTabletId()); @@ -116,7 +117,8 @@ public class DeleteJob extends AbstractTxnStateChangeCallback { } } - LOG.info("check delete job quorum, transaction id: {}, total tablets: {}, quorum tablets: {}, dropped tablets: {}", + LOG.info("check delete job quorum, transaction id: {}, total tablets: {}," + + " quorum tablets: {}, dropped tablets: {}", signature, totalTablets.size(), quorumTablets.size(), dropCounter); if (finishedTablets.containsAll(totalTablets)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/EtlStatus.java b/fe/fe-core/src/main/java/org/apache/doris/load/EtlStatus.java index 263140db87..24cd08100e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/EtlStatus.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/EtlStatus.java @@ -177,6 +177,7 @@ public class EtlStatus implements Writable { Text.writeString(out, entry.getValue()); } } + public void readFields(DataInput in) throws IOException { state = TEtlState.valueOf(Text.readString(in)); trackingUrl = Text.readString(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/FailMsg.java b/fe/fe-core/src/main/java/org/apache/doris/load/FailMsg.java index d54ab92d85..ac227c0f32 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/FailMsg.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/FailMsg.java @@ -77,6 +77,7 @@ public class FailMsg implements Writable { Text.writeString(out, cancelType.name()); Text.writeString(out, msg); } + public void readFields(DataInput in) throws IOException { cancelType = CancelType.valueOf(Text.readString(in)); msg = Text.readString(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java index df65545ce0..97ba9e20db 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java @@ -608,7 +608,7 @@ public class Load { * This is only used for hadoop load */ public static void checkAndCreateSource(Database db, DataDescription dataDescription, - Map>> tableToPartitionSources, EtlJobType jobType) throws DdlException { + Map>> tableToPartitionSources, EtlJobType jobType) throws DdlException { Source source = new Source(dataDescription.getFilePaths()); long tableId = -1; Set sourcePartitionIds = Sets.newHashSet(); @@ -673,7 +673,8 @@ public class Load { source.setColumnNames(columnNames); // check default value - Map>> columnToHadoopFunction = dataDescription.getColumnToHadoopFunction(); + Map>> columnToHadoopFunction + = dataDescription.getColumnToHadoopFunction(); List parsedColumnExprList = dataDescription.getParsedColumnExprList(); Map parsedColumnExprMap = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); for (ImportColumnDesc importColumnDesc : parsedColumnExprList) { @@ -722,7 +723,8 @@ public class Load { // their names. These columns are invisible to user, but we need to generate data for these columns. // So we add column mappings for these column. // eg1: - // base schema is (A, B, C), and B is under schema change, so there will be a shadow column: '__doris_shadow_B' + // base schema is (A, B, C), and B is under schema change, + // so there will be a shadow column: '__doris_shadow_B' // So the final column mapping should looks like: (A, B, C, __doris_shadow_B = substitute(B)); for (Column column : table.getFullSchema()) { if (column.isNameWithPrefix(SchemaChangeHandler.SHADOW_NAME_PRFIX)) { @@ -748,16 +750,21 @@ public class Load { * -> * (A, B, C) SET (__doris_shadow_B = substitute(B)) */ - columnToHadoopFunction.put(column.getName(), Pair.create("substitute", Lists.newArrayList(originCol))); - ImportColumnDesc importColumnDesc = new ImportColumnDesc(column.getName(), new SlotRef(null, originCol)); + columnToHadoopFunction.put(column.getName(), + Pair.create("substitute", Lists.newArrayList(originCol))); + ImportColumnDesc importColumnDesc + = new ImportColumnDesc(column.getName(), new SlotRef(null, originCol)); parsedColumnExprList.add(importColumnDesc); } } else { /* * There is a case that if user does not specify the related origin column, eg: - * COLUMNS (A, C), and B is not specified, but B is being modified so there is a shadow column '__doris_shadow_B'. - * We can not just add a mapping function "__doris_shadow_B = substitute(B)", because Doris can not find column B. - * In this case, __doris_shadow_B can use its default value, so no need to add it to column mapping + * COLUMNS (A, C), and B is not specified, but B is being modified + * so there is a shadow column '__doris_shadow_B'. + * We can not just add a mapping function "__doris_shadow_B = substitute(B)", + * because Doris can not find column B. + * In this case, __doris_shadow_B can use its default value, + * so no need to add it to column mapping */ // do nothing } @@ -770,7 +777,8 @@ public class Load { * -> * (A, B, C) SET (__DORIS_DELETE_SIGN__ = 0) */ - columnToHadoopFunction.put(column.getName(), Pair.create("default_value", Lists.newArrayList(column.getDefaultValue()))); + columnToHadoopFunction.put(column.getName(), Pair.create("default_value", + Lists.newArrayList(column.getDefaultValue()))); ImportColumnDesc importColumnDesc = null; try { importColumnDesc = new ImportColumnDesc(column.getName(), @@ -913,8 +921,10 @@ public class Load { } else { /* * There is a case that if user does not specify the related origin column, eg: - * COLUMNS (A, C), and B is not specified, but B is being modified so there is a shadow column '__doris_shadow_B'. - * We can not just add a mapping function "__doris_shadow_B = substitute(B)", because Doris can not find column B. + * COLUMNS (A, C), and B is not specified, but B is being modified + * so there is a shadow column '__doris_shadow_B'. + * We can not just add a mapping function "__doris_shadow_B = substitute(B)", + * because Doris can not find column B. * In this case, __doris_shadow_B can use its default value, so no need to add it to column mapping */ // do nothing @@ -928,7 +938,7 @@ public class Load { * not init slot desc and analyze exprs */ public static void initColumns(Table tbl, List columnExprs, - Map>> columnToHadoopFunction) throws UserException { + Map>> columnToHadoopFunction) throws UserException { initColumns(tbl, columnExprs, columnToHadoopFunction, null, null, null, null, null, null, false, false); } @@ -1862,7 +1872,7 @@ public class Load { } public LinkedList> getLoadJobInfosByDb(long dbId, String dbName, String labelValue, - boolean accurateMatch, Set states) throws AnalysisException { + boolean accurateMatch, Set states) throws AnalysisException { LinkedList> loadJobInfos = new LinkedList>(); readLock(); try { @@ -2330,7 +2340,8 @@ public class Load { updatePartitionVersion(partition, partitionLoadInfo.getVersion(), jobId); // update table row count - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(IndexExtState.ALL)) { + for (MaterializedIndex materializedIndex + : partition.getMaterializedIndices(IndexExtState.ALL)) { long indexRowCount = 0L; for (Tablet tablet : materializedIndex.getTablets()) { long tabletRowCount = 0L; @@ -2868,8 +2879,8 @@ public class Load { long jobId = job.getId(); JobState srcState = job.getState(); CancelType tmpCancelType = CancelType.UNKNOWN; - // should abort in transaction manager first because it maybe abort job successfully and abort in transaction manager failed - // then there will be rubbish transactions in transaction manager + // should abort in transaction manager first because it maybe aborts job successfully + // and abort in transaction manager failed then there will be rubbish transactions in transaction manager try { Catalog.getCurrentGlobalTransactionMgr().abortTransaction( job.getDbId(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/LoadChecker.java b/fe/fe-core/src/main/java/org/apache/doris/load/LoadChecker.java index 3e79763ce9..f829e31e01 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/LoadChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/LoadChecker.java @@ -90,14 +90,18 @@ public class LoadChecker extends MasterDaemon { Map pendingPriorityMap = Maps.newHashMap(); pendingPriorityMap.put(TPriority.NORMAL, - new MasterTaskExecutor("load_pending_thread_num_normal_priority", Config.load_pending_thread_num_normal_priority, true)); + new MasterTaskExecutor("load_pending_thread_num_normal_priority", + Config.load_pending_thread_num_normal_priority, true)); pendingPriorityMap.put(TPriority.HIGH, - new MasterTaskExecutor("load_pending_thread_num_high_priority", Config.load_pending_thread_num_high_priority, true)); + new MasterTaskExecutor("load_pending_thread_num_high_priority", + Config.load_pending_thread_num_high_priority, true)); executors.put(JobState.PENDING, pendingPriorityMap); Map etlPriorityMap = Maps.newHashMap(); - etlPriorityMap.put(TPriority.NORMAL, new MasterTaskExecutor("load_etl_thread_num_normal_priority", Config.load_etl_thread_num_normal_priority, true)); - etlPriorityMap.put(TPriority.HIGH, new MasterTaskExecutor("load_etl_thread_num_high_priority", Config.load_etl_thread_num_high_priority, true)); + etlPriorityMap.put(TPriority.NORMAL, new MasterTaskExecutor("load_etl_thread_num_normal_priority", + Config.load_etl_thread_num_normal_priority, true)); + etlPriorityMap.put(TPriority.HIGH, new MasterTaskExecutor("load_etl_thread_num_high_priority", + Config.load_etl_thread_num_high_priority, true)); executors.put(JobState.ETL, etlPriorityMap); } @@ -246,13 +250,15 @@ public class LoadChecker extends MasterDaemon { try { tables = db.getTablesOnIdOrderOrThrowException(tableIds); } catch (UserException e) { - load.cancelLoadJob(job, CancelType.LOAD_RUN_FAIL, "table does not exist. dbId: " + dbId + ", err: " + e.getMessage()); + load.cancelLoadJob(job, CancelType.LOAD_RUN_FAIL, + "table does not exist. dbId: " + dbId + ", err: " + e.getMessage()); return; } if (job.getTransactionId() < 0) { LOG.warn("cancel load job {} because it is an old type job, user should resubmit it", job); - load.cancelLoadJob(job, CancelType.UNKNOWN, "cancelled because system is during upgrade, user should resubmit it"); + load.cancelLoadJob(job, CancelType.UNKNOWN, + "cancelled because system is during upgrade, user should resubmit it"); return; } // check if the job is aborted in transaction manager @@ -323,7 +329,8 @@ public class LoadChecker extends MasterDaemon { // check transaction state Load load = Catalog.getCurrentCatalog().getLoadInstance(); GlobalTransactionMgr globalTransactionMgr = Catalog.getCurrentGlobalTransactionMgr(); - TransactionState transactionState = globalTransactionMgr.getTransactionState(job.getDbId(), job.getTransactionId()); + TransactionState transactionState = globalTransactionMgr.getTransactionState( + job.getDbId(), job.getTransactionId()); List tabletCommitInfos = new ArrayList(); // when be finish load task, fe will update job's finish task info, use lock here to prevent // concurrent problems @@ -332,7 +339,8 @@ public class LoadChecker extends MasterDaemon { try { MetaLockUtils.writeLockTablesOrMetaException(tables); } catch (UserException e) { - load.cancelLoadJob(job, CancelType.LOAD_RUN_FAIL, "table does not exist. dbId: " + job.getDbId() + ", err: " + e.getMessage()); + load.cancelLoadJob(job, CancelType.LOAD_RUN_FAIL, "table does not exist. dbId: " + + job.getDbId() + ", err: " + e.getMessage()); return; } try { @@ -393,7 +401,8 @@ public class LoadChecker extends MasterDaemon { return null; } - short replicationNum = table.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short replicationNum = table.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); // check all indices (base + roll up (not include ROLLUP state index)) List indices = partition.getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indices) { @@ -417,8 +426,8 @@ public class LoadChecker extends MasterDaemon { for (Tablet tablet : index.getTablets()) { // the job is submitted before rollup finished and try to finish after rollup finished // then the job's tablet load info does not contain the new rollup index's tablet - // not deal with this case because the finished replica will include new rollup index's replica - // and check it at commit time + // not deal with this case because the finished replica will include new rollup index's + // replica and check it at commit time if (tabletLoadInfos.containsKey(tablet.getId())) { jobTotalTablets.add(tablet.getId()); } @@ -430,11 +439,11 @@ public class LoadChecker extends MasterDaemon { long tabletId = tablet.getId(); // get tablet file path TabletLoadInfo tabletLoadInfo = tabletLoadInfos.get(tabletId); - // the tabletinfo maybe null, in this case: + // the tablet info maybe null, in this case: // the job is submitted before rollup finished and try to finish after rollup finished // then the job's tablet load info does not contain the new rollup index's tablet - // not deal with this case because the finished replica will include new rollup index's replica - // and check it at commit time + // not deal with this case because the finished replica will include new rollup index's + // replica and check it at commit time if (tabletLoadInfo == null) { continue; } @@ -457,15 +466,12 @@ public class LoadChecker extends MasterDaemon { // check replica state and replica version if (!tabletLoadInfo.isReplicaSent(replicaId)) { PushTask pushTask = new PushTask(job.getResourceInfo(), - replica.getBackendId(), db.getId(), tableId, - partitionId, indexId, - tabletId, replicaId, schemaHash, - -1, filePath, fileSize, 0, - job.getId(), type, job.getConditions(), - needDecompress, job.getPriority(), - TTaskType.REALTIME_PUSH, - job.getTransactionId(), - Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId()); + replica.getBackendId(), db.getId(), tableId, partitionId, indexId, + tabletId, replicaId, schemaHash, -1, filePath, fileSize, 0, + job.getId(), type, job.getConditions(), needDecompress, job.getPriority(), + TTaskType.REALTIME_PUSH, job.getTransactionId(), + Catalog.getCurrentGlobalTransactionMgr() + .getTransactionIDGenerator().getNextTransactionId()); pushTask.setIsSchemaChanging(autoLoadToTwoTablet); if (AgentTaskQueue.addTask(pushTask)) { batchTask.addTask(pushTask); @@ -473,8 +479,10 @@ public class LoadChecker extends MasterDaemon { tabletLoadInfo.addSentReplica(replicaId); } } - // yiguolei: wait here to check if quorum finished, should exclude the replica that is in clone state - // for example, there are 3 replicas, A normal B normal C clone, if A and C finish loading, we should not commit + // yiguolei: wait here to check if quorum finished, + // should exclude the replica that is in clone state + // for example, there are 3 replicas, A normal B normal C clone, + // if A and C finish loading, we should not commit // because commit will failed, then the job is failed if (job.isReplicaFinished(replicaId) && replica.getLastFailedVersion() < 0) { finishedReplicas.add(replicaId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java index aec316a6ad..689c13ca5b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/LoadJob.java @@ -648,7 +648,8 @@ public class LoadJob implements Writable { + ", loadFinishTimeMs=" + loadFinishTimeMs + ", failMsg=" + failMsg + ", etlJobType=" + etlJobType + ", etlJobInfo=" + etlJobInfo + ", priority=" + priority + ", transactionId=" + transactionId + ", quorumFinishTimeMs=" + quorumFinishTimeMs - + ", unfinished tablets=[" + this.unfinishedTablets.subList(0, Math.min(3, this.unfinishedTablets.size())) + "]" + + ", unfinished tablets=[" + this.unfinishedTablets.subList( + 0, Math.min(3, this.unfinishedTablets.size())) + "]" + "]"; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/PartitionLoadInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/PartitionLoadInfo.java index 4b7ce8a1f2..fd12adbdae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/PartitionLoadInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/PartitionLoadInfo.java @@ -79,6 +79,7 @@ public class PartitionLoadInfo implements Writable { out.writeBoolean(needLoad); } + public void readFields(DataInput in) throws IOException { version = in.readLong(); // Versionhash useless just for compatible diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/Source.java b/fe/fe-core/src/main/java/org/apache/doris/load/Source.java index 7115e4c9ce..a901503489 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/Source.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/Source.java @@ -170,6 +170,7 @@ public class Source implements Writable { } } } + public void readFields(DataInput in) throws IOException { int count = 0; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecord.java b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecord.java index 3c4bf8dd6f..9ce323c914 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecord.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecord.java @@ -43,7 +43,9 @@ public class StreamLoadRecord { private String finishTime; - public StreamLoadRecord(String label, String db, String table, String user, String clientIp, String status, String message, String url, String totalRows, String loadedRows, String filteredRows, String unselectedRows, String loadBytes, String startTime, String finishTime) { + public StreamLoadRecord(String label, String db, String table, String user, String clientIp, String status, + String message, String url, String totalRows, String loadedRows, String filteredRows, String unselectedRows, + String loadBytes, String startTime, String finishTime) { this.label = label; this.db = db; this.table = table; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java index 961723c0ef..f5edc57077 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/StreamLoadRecordMgr.java @@ -122,7 +122,8 @@ public class StreamLoadRecordMgr extends MasterDaemon { long deDbId = record.getDbId(); Map labelToStreamLoadRecord = dbIdToLabelToStreamLoadRecord.get(deDbId); - Iterator> iterRecord = labelToStreamLoadRecord.entrySet().iterator(); + Iterator> iterRecord + = labelToStreamLoadRecord.entrySet().iterator(); while (iterRecord.hasNext()) { String labelInMap = iterRecord.next().getKey(); if (labelInMap.equals(deLabel)) { @@ -150,7 +151,8 @@ public class StreamLoadRecordMgr extends MasterDaemon { return new ArrayList<>(streamLoadRecordHeap); } - public List> getStreamLoadRecordByDb(long dbId, String label, boolean accurateMatch, StreamLoadState state) { + public List> getStreamLoadRecordByDb( + long dbId, String label, boolean accurateMatch, StreamLoadState state) { LinkedList> streamLoadRecords = new LinkedList>(); readLock(); @@ -348,7 +350,8 @@ public class StreamLoadRecordMgr extends MasterDaemon { for (Backend backend : backends.values()) { if (beIdToLastStreamLoad.containsKey(backend.getId())) { long lastStreamLoadTime = beIdToLastStreamLoad.get(backend.getId()); - LOG.info("Replay stream load bdbje. backend: {}, last stream load time: {}", backend.getHost(), lastStreamLoadTime); + LOG.info("Replay stream load bdbje. backend: {}, last stream load time: {}", + backend.getHost(), lastStreamLoadTime); backend.setLastStreamLoadTime(lastStreamLoadTime); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java index 5e02b6891c..6e642cb1a3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java @@ -111,7 +111,8 @@ public class BrokerLoadJob extends BulkLoadJob { } /** - * Situation1: When attachment is instance of BrokerPendingTaskAttachment, this method is called by broker pending task. + * Situation1: When attachment is instance of BrokerPendingTaskAttachment, + * this method is called by broker pending task. * LoadLoadingTask will be created after BrokerPendingTask is finished. * Situation2: When attachment is instance of BrokerLoadingTaskAttachment, this method is called by LoadLoadingTask. * CommitTxn will be called after all of LoadingTasks are finished. @@ -183,13 +184,15 @@ public class BrokerLoadJob extends BulkLoadJob { } private void createLoadingTask(Database db, BrokerPendingTaskAttachment attachment) throws UserException { - List
tableList = db.getTablesOnIdOrderOrThrowException(Lists.newArrayList(fileGroupAggInfo.getAllTableIds())); + List
tableList = db.getTablesOnIdOrderOrThrowException( + Lists.newArrayList(fileGroupAggInfo.getAllTableIds())); // divide job into broker loading task by table List newLoadingTasks = Lists.newArrayList(); this.jobProfile = new RuntimeProfile("BrokerLoadJob " + id + ". " + label); MetaLockUtils.readLockTables(tableList); try { - for (Map.Entry> entry : fileGroupAggInfo.getAggKeyToFileGroups().entrySet()) { + for (Map.Entry> entry + : fileGroupAggInfo.getAggKeyToFileGroups().entrySet()) { FileGroupAggKey aggKey = entry.getKey(); List brokerFileGroups = entry.getValue(); long tableId = aggKey.getTableId(); @@ -212,7 +215,8 @@ public class BrokerLoadJob extends BulkLoadJob { // load id will be added to loadStatistic when executing this task // save all related tables and rollups in transaction state - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(dbId, transactionId); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(dbId, transactionId); if (txnState == null) { throw new UserException("txn does not exist: " + transactionId); } @@ -267,8 +271,8 @@ public class BrokerLoadJob extends BulkLoadJob { // check data quality if (!checkDataQuality()) { - cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.ETL_QUALITY_UNSATISFIED, DataQualityException.QUALITY_FAIL_MSG), - true, true); + cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.ETL_QUALITY_UNSATISFIED, + DataQualityException.QUALITY_FAIL_MSG), true, true); return; } Database db = null; @@ -315,7 +319,8 @@ public class BrokerLoadJob extends BulkLoadJob { summaryProfile.addInfoString(ProfileManager.QUERY_ID, String.valueOf(id)); summaryProfile.addInfoString(ProfileManager.START_TIME, TimeUtils.longToTimeString(createTimestamp)); summaryProfile.addInfoString(ProfileManager.END_TIME, TimeUtils.longToTimeString(finishTimestamp)); - summaryProfile.addInfoString(ProfileManager.TOTAL_TIME, DebugUtil.getPrettyStringMs(finishTimestamp - createTimestamp)); + summaryProfile.addInfoString(ProfileManager.TOTAL_TIME, + DebugUtil.getPrettyStringMs(finishTimestamp - createTimestamp)); summaryProfile.addInfoString(ProfileManager.QUERY_TYPE, "Load"); summaryProfile.addInfoString(ProfileManager.QUERY_STATE, "N/A"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java index 2602ef837a..b295a8f31e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadPendingTask.java @@ -132,7 +132,8 @@ public class BrokerLoadPendingTask extends LoadTask { totalFileNum += tableTotalFileNum; ((BrokerPendingTaskAttachment) attachment).addFileStatus(aggKey, fileStatusList); LOG.info("get {} files to be loaded. total size: {}. cost: {} ms, job: {}", - tableTotalFileNum, tableTotalFileSize, (System.currentTimeMillis() - start), callback.getCallbackId()); + tableTotalFileNum, tableTotalFileSize, (System.currentTimeMillis() - start), + callback.getCallbackId()); } ((BrokerLoadJob) callback).setLoadFileInfo(totalFileNum, totalFileSize); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BulkLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BulkLoadJob.java index 07e8c6aa35..6f4ad21fc1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BulkLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BulkLoadJob.java @@ -92,7 +92,8 @@ public abstract class BulkLoadJob extends LoadJob { super(jobType); } - public BulkLoadJob(EtlJobType jobType, long dbId, String label, OriginStatement originStmt, UserIdentity userInfo) throws MetaNotFoundException { + public BulkLoadJob(EtlJobType jobType, long dbId, String label, + OriginStatement originStmt, UserIdentity userInfo) throws MetaNotFoundException { super(jobType, dbId, label); this.originStmt = originStmt; this.authorizationInfo = gatherAuthInfo(); @@ -169,7 +170,8 @@ public abstract class BulkLoadJob extends LoadJob { public Set getTableNamesForShow() { Optional db = Catalog.getCurrentCatalog().getDb(dbId); return fileGroupAggInfo.getAllTableIds().stream() - .map(tableId -> db.flatMap(d -> d.getTable(tableId)).map(Table::getName).orElse(String.valueOf(tableId))) + .map(tableId -> db.flatMap(d -> d.getTable(tableId)) + .map(Table::getName).orElse(String.valueOf(tableId))) .collect(Collectors.toSet()); } @@ -334,7 +336,8 @@ public abstract class BulkLoadJob extends LoadJob { } String filePathListName = StringUtils.join(filePathList, ","); String brokerUserName = getBrokerUserName(); - AuditEvent auditEvent = new LoadAuditEvent.AuditEventBuilder().setEventType(AuditEvent.EventType.LOAD_SUCCEED) + AuditEvent auditEvent = new LoadAuditEvent.AuditEventBuilder() + .setEventType(AuditEvent.EventType.LOAD_SUCCEED) .setJobId(id).setLabel(label).setLoadType(jobType.name()).setDb(dbName).setTableList(tableListName) .setFilePathList(filePathListName).setBrokerUser(brokerUserName).setTimestamp(createTimestamp) .setLoadStartTime(loadStartTimestamp).setLoadFinishTime(finishTimestamp) diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ConfigFile.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ConfigFile.java index 17384a72cb..26c6f0bf92 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ConfigFile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ConfigFile.java @@ -23,6 +23,7 @@ import org.apache.doris.common.LoadException; // Each time before running the yarn command, we need to check that the // config file exists in the specified path, and if not, create them. public interface ConfigFile { - public void createFile() throws LoadException; - public String getFilePath(); + void createFile() throws LoadException; + + String getFilePath(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/InsertLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/InsertLoadJob.java index e11868e26a..d638c0d9d6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/InsertLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/InsertLoadJob.java @@ -48,8 +48,8 @@ public class InsertLoadJob extends LoadJob { super(EtlJobType.INSERT); } - public InsertLoadJob(String label, long transactionId, long dbId, long tableId, long createTimestamp, String failMsg, - String trackingUrl) throws MetaNotFoundException { + public InsertLoadJob(String label, long transactionId, long dbId, long tableId, + long createTimestamp, String failMsg, String trackingUrl) throws MetaNotFoundException { super(EtlJobType.INSERT, dbId, label); this.tableId = tableId; this.transactionId = transactionId; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java index a39a79195a..845aa35ec3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java @@ -359,7 +359,8 @@ public abstract class LoadJob extends AbstractTxnStateChangeCallback implements if (ConnectContext.get() != null) { jobProperties.put(LoadStmt.EXEC_MEM_LIMIT, ConnectContext.get().getSessionVariable().getMaxExecMemByte()); jobProperties.put(LoadStmt.TIMEZONE, ConnectContext.get().getSessionVariable().getTimeZone()); - jobProperties.put(LoadStmt.SEND_BATCH_PARALLELISM, ConnectContext.get().getSessionVariable().getSendBatchParallelism()); + jobProperties.put(LoadStmt.SEND_BATCH_PARALLELISM, + ConnectContext.get().getSessionVariable().getSendBatchParallelism()); } if (properties == null || properties.isEmpty()) { @@ -414,7 +415,8 @@ public abstract class LoadJob extends AbstractTxnStateChangeCallback implements isJobTypeRead = jobTypeRead; } - public void beginTxn() throws LabelAlreadyUsedException, BeginTransactionException, AnalysisException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { + public void beginTxn() throws LabelAlreadyUsedException, BeginTransactionException, + AnalysisException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java index ae8aafa9cf..900d3db1c8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadLoadingTask.java @@ -101,10 +101,11 @@ public class LoadLoadingTask extends LoadTask { this.singleTabletLoadPerSink = singleTabletLoadPerSink; } - public void init(TUniqueId loadId, List> fileStatusList, int fileNum, UserIdentity userInfo) throws UserException { + public void init(TUniqueId loadId, List> fileStatusList, + int fileNum, UserIdentity userInfo) throws UserException { this.loadId = loadId; - planner = new LoadingTaskPlanner(callback.getCallbackId(), txnId, db.getId(), table, - brokerDesc, fileGroups, strictMode, timezone, this.timeoutS, this.loadParallelism, this.sendBatchParallelism, userInfo); + planner = new LoadingTaskPlanner(callback.getCallbackId(), txnId, db.getId(), table, brokerDesc, fileGroups, + strictMode, timezone, this.timeoutS, this.loadParallelism, this.sendBatchParallelism, userInfo); planner.plan(loadId, fileStatusList, fileNum); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java index 0e55cc999f..eb7ac15595 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java @@ -77,7 +77,8 @@ public class SparkEtlJobHandler { private static final String YARN_KILL_CMD = "%s --config %s application -kill %s"; public void submitEtlJob(long loadJobId, String loadLabel, EtlJobConfig etlJobConfig, SparkResource resource, - BrokerDesc brokerDesc, SparkLoadAppHandle handle, SparkPendingTaskAttachment attachment) throws LoadException { + BrokerDesc brokerDesc, SparkLoadAppHandle handle, SparkPendingTaskAttachment attachment) + throws LoadException { // delete outputPath deleteEtlOutputPath(etlJobConfig.outputPath, brokerDesc); @@ -262,7 +263,8 @@ public class SparkEtlJobHandler { return status; } - public void killEtlJob(SparkLoadAppHandle handle, String appId, long loadJobId, SparkResource resource) throws LoadException { + public void killEtlJob(SparkLoadAppHandle handle, String appId, + long loadJobId, SparkResource resource) throws LoadException { if (resource.isYarnMaster()) { // The appId may be empty when the load job is in PENDING phase. This is because the appId is // parsed from the spark launcher process's output (spark launcher process submit job and then @@ -287,7 +289,8 @@ public class SparkEtlJobHandler { LOG.info("yarn application -kill {}, output: {}", appId, result.getStdout()); if (result.getReturnCode() != 0) { String stderr = result.getStderr(); - LOG.warn("yarn application kill failed. app id: {}, load job id: {}, msg: {}", appId, loadJobId, stderr); + LOG.warn("yarn application kill failed. app id: {}, load job id: {}, msg: {}", + appId, loadJobId, stderr); } } else { if (handle != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java index fc1d2a4303..012b7ad6d5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java @@ -140,13 +140,14 @@ public class SparkLauncherMonitor { } } - LOG.debug("spark appId that handle get is {}, state: {}", handle.getAppId(), handle.getState().toString()); + LOG.debug("spark appId that handle get is {}, state: {}", + handle.getAppId(), handle.getState().toString()); switch (newState) { case UNKNOWN: case CONNECTED: case SUBMITTED: - // If the app stays in the UNKNOWN/CONNECTED/SUBMITTED state for more than submitTimeoutMs - // stop monitoring and kill the process + // If the app stays in the UNKNOWN/CONNECTED/SUBMITTED state + // for more than submitTimeoutMs stop monitoring and kill the process if (System.currentTimeMillis() - startTime > submitTimeoutMs) { isStop = true; handle.kill(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java index cbc8f64f1f..7b1397c859 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java @@ -107,7 +107,8 @@ import java.util.Set; /** * There are 4 steps in SparkLoadJob: * Step1: SparkLoadPendingTask will be created by unprotectedExecuteJob method and submit spark etl job. - * Step2: LoadEtlChecker will check spark etl job status periodically and send push tasks to be when spark etl job is finished. + * Step2: LoadEtlChecker will check spark etl job status periodically + * and send push tasks to be when spark etl job is finished. * Step3: LoadLoadingChecker will check loading status periodically and commit transaction when push tasks are finished. * Step4: PublishVersionDaemon will send publish version tasks to be and finish transaction. */ @@ -294,7 +295,8 @@ public class SparkLoadJob extends BulkLoadJob { // get etl status SparkEtlJobHandler handler = new SparkEtlJobHandler(); - EtlStatus status = handler.getEtlJobStatus(sparkLoadAppHandle, appId, id, etlOutputPath, sparkResource, brokerDesc); + EtlStatus status = handler.getEtlJobStatus(sparkLoadAppHandle, + appId, id, etlOutputPath, sparkResource, brokerDesc); writeLock(); try { switch (status.getState()) { @@ -361,7 +363,8 @@ public class SparkLoadJob extends BulkLoadJob { unprotectedPrepareLoadingInfos(); } - private void unprotectedUpdateToLoadingState(EtlStatus etlStatus, Map filePathToSize) throws LoadException { + private void unprotectedUpdateToLoadingState(EtlStatus etlStatus, + Map filePathToSize) throws LoadException { try { for (Map.Entry entry : filePathToSize.entrySet()) { String filePath = entry.getKey(); @@ -427,13 +430,15 @@ public class SparkLoadJob extends BulkLoadJob { AgentBatchTask batchTask = new AgentBatchTask(); boolean hasLoadPartitions = false; Set totalTablets = Sets.newHashSet(); - List
tableList = db.getTablesOnIdOrderOrThrowException(Lists.newArrayList(tableToLoadPartitions.keySet())); + List
tableList = db.getTablesOnIdOrderOrThrowException( + Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.readLockTables(tableList); try { writeLock(); try { // check state is still loading. If state is cancelled or finished, return. - // if state is cancelled or finished and not return, this would throw all partitions have no load data exception, + // if state is cancelled or finished and not return, + // this would throw all partitions have no load data exception, // because tableToLoadPartitions was already cleaned up, if (state != JobState.LOADING) { LOG.warn("job state is not loading. job id: {}, state: {}", id, state); @@ -451,7 +456,8 @@ public class SparkLoadJob extends BulkLoadJob { } hasLoadPartitions = true; - int quorumReplicaNum = olapTable.getPartitionInfo().getReplicaAllocation(partitionId).getTotalReplicaNum() / 2 + 1; + int quorumReplicaNum = olapTable.getPartitionInfo() + .getReplicaAllocation(partitionId).getTotalReplicaNum() / 2 + 1; List indexes = partition.getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indexes) { @@ -478,7 +484,8 @@ public class SparkLoadJob extends BulkLoadJob { PushBrokerReaderParams params = getPushBrokerReaderParams(olapTable, indexId); // deep copy TBrokerScanRange because filePath and fileSize will be updated // in different tablet push task - TBrokerScanRange tBrokerScanRange = new TBrokerScanRange(params.tBrokerScanRange); + TBrokerScanRange tBrokerScanRange + = new TBrokerScanRange(params.tBrokerScanRange); // update filePath fileSize TBrokerRangeDesc tBrokerRangeDesc = tBrokerScanRange.getRanges().get(0); tBrokerRangeDesc.setPath(""); @@ -503,11 +510,11 @@ public class SparkLoadJob extends BulkLoadJob { tBrokerRangeDesc.path, tBrokerRangeDesc.file_size); - PushTask pushTask = new PushTask(backendId, dbId, olapTable.getId(), partitionId, - indexId, tabletId, replicaId, schemaHash, - 0, id, TPushType.LOAD_V2, - TPriority.NORMAL, transactionId, taskSignature, - tBrokerScanRange, params.tDescriptorTable); + PushTask pushTask = new PushTask( + backendId, dbId, olapTable.getId(), partitionId, indexId, tabletId, + replicaId, schemaHash, 0, id, TPushType.LOAD_V2, + TPriority.NORMAL, transactionId, taskSignature, + tBrokerScanRange, params.tDescriptorTable); if (AgentTaskQueue.addTask(pushTask)) { batchTask.addTask(pushTask); if (!tabletToSentReplicaPushTask.containsKey(tabletId)) { @@ -632,7 +639,8 @@ public class SparkLoadJob extends BulkLoadJob { .add("msg", "Load job try to commit txn") .build()); Database db = getDb(); - List
tableList = db.getTablesOnIdOrderOrThrowException(Lists.newArrayList(tableToLoadPartitions.keySet())); + List
tableList = db.getTablesOnIdOrderOrThrowException( + Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.writeLockTablesOrMetaException(tableList); try { Catalog.getCurrentGlobalTransactionMgr().commitTransaction( @@ -824,9 +832,10 @@ public class SparkLoadJob extends BulkLoadJob { @SerializedName(value = "tabletMetaToFileInfo") private Map> tabletMetaToFileInfo; - public SparkLoadJobStateUpdateInfo(long jobId, JobState state, long transactionId, SparkLoadAppHandle sparkLoadAppHandle, - long etlStartTimestamp, String appId, String etlOutputPath, long loadStartTimestamp, - Map> tabletMetaToFileInfo) { + public SparkLoadJobStateUpdateInfo( + long jobId, JobState state, long transactionId, SparkLoadAppHandle sparkLoadAppHandle, + long etlStartTimestamp, String appId, String etlOutputPath, long loadStartTimestamp, + Map> tabletMetaToFileInfo) { super(jobId, state, transactionId, loadStartTimestamp); this.sparkLoadAppHandle = sparkLoadAppHandle; this.etlStartTimestamp = etlStartTimestamp; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadPendingTask.java index 4429f9e059..33590743ab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadPendingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadPendingTask.java @@ -120,7 +120,8 @@ public class SparkLoadPendingTask extends LoadTask { // handler submit etl job SparkEtlJobHandler handler = new SparkEtlJobHandler(); - handler.submitEtlJob(loadJobId, loadLabel, etlJobConfig, resource, brokerDesc, sparkLoadAppHandle, sparkAttachment); + handler.submitEtlJob(loadJobId, loadLabel, etlJobConfig, resource, + brokerDesc, sparkLoadAppHandle, sparkAttachment); LOG.info("submit spark etl job success. load job id: {}, attachment: {}", loadJobId, sparkAttachment); } @@ -130,7 +131,8 @@ public class SparkLoadPendingTask extends LoadTask { } private void createEtlJobConf() throws LoadException { - Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new LoadException("db does not exist. id: " + s)); + Database db = Catalog.getCurrentCatalog().getDbOrException( + dbId, s -> new LoadException("db does not exist. id: " + s)); Map tables = Maps.newHashMap(); Map> tableIdToPartitionIds = Maps.newHashMap(); @@ -149,7 +151,8 @@ public class SparkLoadPendingTask extends LoadTask { FileGroupAggKey aggKey = entry.getKey(); long tableId = aggKey.getTableId(); - OlapTable table = (OlapTable) db.getTableOrException(tableId, s -> new LoadException("table does not exist. id: " + s)); + OlapTable table = (OlapTable) db.getTableOrException( + tableId, s -> new LoadException("table does not exist. id: " + s)); EtlTable etlTable = null; if (tables.containsKey(tableId)) { @@ -197,7 +200,8 @@ public class SparkLoadPendingTask extends LoadTask { continue; } - OlapTable table = (OlapTable) db.getTableOrException(tableId, s -> new LoadException("table does not exist. id: " + s)); + OlapTable table = (OlapTable) db.getTableOrException( + tableId, s -> new LoadException("table does not exist. id: " + s)); table.readLock(); try { Set partitionIds; @@ -476,7 +480,8 @@ public class SparkLoadPendingTask extends LoadTask { Map hiveTableProperties = Maps.newHashMap(); if (fileGroup.isLoadFromTable()) { long srcTableId = fileGroup.getSrcTableId(); - HiveTable srcHiveTable = (HiveTable) db.getTableOrException(srcTableId, s -> new LoadException("table does not exist. id: " + s)); + HiveTable srcHiveTable = (HiveTable) db.getTableOrException( + srcTableId, s -> new LoadException("table does not exist. id: " + s)); hiveDbTableName = srcHiveTable.getHiveDbTable(); hiveTableProperties.putAll(srcHiveTable.getHiveProperties()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java index 4476e0d7a6..753a1fc192 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java @@ -199,7 +199,8 @@ public class SparkRepository { String destFilePath = remoteArchivePath + PATH_DELIMITER + assemblyFileName(PREFIX_LIB, md5sum, SPARK_2X, ".zip"); rename(origFilePath, destFilePath); - currentArchive.libraries.add(new SparkLibrary(destFilePath, md5sum, SparkLibrary.LibType.SPARK2X, size)); + currentArchive.libraries.add(new SparkLibrary( + destFilePath, md5sum, SparkLibrary.LibType.SPARK2X, size)); } // CHECKSTYLE IGNORE THIS LINE LOG.info("finished to upload archive to repository, currentDppVersion={}, path={}", currentDppVersion, remoteArchivePath); @@ -342,8 +343,8 @@ public class SparkRepository { public SparkLibrary getDppLibrary() { SparkLibrary result = null; - Optional library = libraries.stream(). - filter(lib -> lib.libType == SparkLibrary.LibType.DPP).findFirst(); + Optional library = libraries.stream() + .filter(lib -> lib.libType == SparkLibrary.LibType.DPP).findFirst(); if (library.isPresent()) { result = library.get(); } @@ -352,8 +353,8 @@ public class SparkRepository { public SparkLibrary getSpark2xLibrary() { SparkLibrary result = null; - Optional library = libraries.stream(). - filter(lib -> lib.libType == SparkLibrary.LibType.SPARK2X).findFirst(); + Optional library = libraries.stream() + .filter(lib -> lib.libType == SparkLibrary.LibType.SPARK2X).findFirst(); if (library.isPresent()) { result = library.get(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java index de74087351..f1401e5884 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java @@ -182,9 +182,10 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { } // This is mainly for compatibility. In the previous version, we directly obtained the value of the - // KAFKA_DEFAULT_OFFSETS attribute. In the new version, we support date time as the value of KAFKA_DEFAULT_OFFSETS, - // and this attribute will be converted into a timestamp during the analyzing phase, thus losing some information. - // So we use KAFKA_ORIGIN_DEFAULT_OFFSETS to store the original datetime formatted KAFKA_DEFAULT_OFFSETS value + // KAFKA_DEFAULT_OFFSETS attribute. In the new version, we support date time as the value of + // KAFKA_DEFAULT_OFFSETS, and this attribute will be converted into a timestamp during the analyzing phase, + // thus losing some information. So we use KAFKA_ORIGIN_DEFAULT_OFFSETS to store the original datetime + // formatted KAFKA_DEFAULT_OFFSETS value if (convertedCustomProperties.containsKey(CreateRoutineLoadStmt.KAFKA_ORIGIN_DEFAULT_OFFSETS)) { kafkaDefaultOffSet = convertedCustomProperties.remove(CreateRoutineLoadStmt.KAFKA_ORIGIN_DEFAULT_OFFSETS); } else if (convertedCustomProperties.containsKey(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS)) { @@ -435,7 +436,8 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { if (!((KafkaProgress) progress).containsPartition(kafkaPartition)) { List newPartitions = Lists.newArrayList(); newPartitions.add(kafkaPartition); - List> newPartitionsOffsets = getNewPartitionOffsetsFromDefaultOffset(newPartitions); + List> newPartitionsOffsets + = getNewPartitionOffsetsFromDefaultOffset(newPartitions); Preconditions.checkState(newPartitionsOffsets.size() == 1); for (Pair partitionOffset : newPartitionsOffsets) { ((KafkaProgress) progress).addPartitionOffset(partitionOffset); @@ -455,7 +457,8 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { } } - private List> getNewPartitionOffsetsFromDefaultOffset(List newPartitions) throws UserException { + private List> getNewPartitionOffsetsFromDefaultOffset(List newPartitions) + throws UserException { List> partitionOffsets = Lists.newArrayList(); // get default offset long beginOffset = convertedDefaultOffsetToLong(); @@ -464,7 +467,8 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { } if (isOffsetForTimes()) { try { - partitionOffsets = KafkaUtil.getOffsetsForTimes(this.brokerList, this.topic, convertedCustomProperties, partitionOffsets); + partitionOffsets = KafkaUtil.getOffsetsForTimes(this.brokerList, + this.topic, convertedCustomProperties, partitionOffsets); } catch (LoadException e) { LOG.warn(new LogBuilder(LogKey.ROUTINE_LOAD_JOB, id) .add("partition:timestamp", Joiner.on(",").join(partitionOffsets)) diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java index e22b42a210..d522bd9ebe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java @@ -53,7 +53,8 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo { // Last fetched and cached latest partition offsets. private List> cachedPartitionWithLatestOffsets = Lists.newArrayList(); - public KafkaTaskInfo(UUID id, long jobId, String clusterName, long timeoutMs, Map partitionIdToOffset) { + public KafkaTaskInfo(UUID id, long jobId, String clusterName, + long timeoutMs, Map partitionIdToOffset) { super(id, jobId, clusterName, timeoutMs); this.partitionIdToOffset = partitionIdToOffset; } @@ -83,7 +84,8 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo { tRoutineLoadTask.setDb(database.getFullName()); tRoutineLoadTask.setTbl(tbl.getName()); // label = job_name+job_id+task_id+txn_id - String label = Joiner.on("-").join(routineLoadJob.getName(), routineLoadJob.getId(), DebugUtil.printId(id), txnId); + String label = Joiner.on("-").join(routineLoadJob.getName(), + routineLoadJob.getId(), DebugUtil.printId(id), txnId); tRoutineLoadTask.setLabel(label); tRoutineLoadTask.setAuthCode(routineLoadJob.getAuthCode()); TKafkaLoadInfo tKafkaLoadInfo = new TKafkaLoadInfo(); @@ -125,6 +127,7 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo { tPlanFragment.getOutputSink().getOlapTableSink().setTxnId(txnId); return tExecPlanFragmentParams; } + // implement method for compatibility public String getHeaderType() { return ""; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java index eb956f43dd..940715c096 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java @@ -110,6 +110,7 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl public static final boolean DEFAULT_LOAD_TO_SINGLE_TABLET = false; protected static final String STAR_STRING = "*"; + /* +-----------------+ fe schedule job | NEED_SCHEDULE | user resume job @@ -731,9 +732,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl // if this is a replay thread, the update state should already be replayed by OP_CHANGE_ROUTINE_LOAD_JOB if (!isReplay) { // remove all of task in jobs and change job state to paused - updateState(JobState.PAUSED, - new ErrorReason(InternalErrorCode.TOO_MANY_FAILURE_ROWS_ERR, "current error rows of job is more than max error num"), - isReplay); + updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TOO_MANY_FAILURE_ROWS_ERR, + "current error rows of job is more than max error num"), isReplay); } } @@ -758,9 +758,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl .build()); if (!isReplay) { // remove all of task in jobs and change job state to paused - updateState(JobState.PAUSED, - new ErrorReason(InternalErrorCode.TOO_MANY_FAILURE_ROWS_ERR, "current error rows is more than max error num"), - isReplay); + updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TOO_MANY_FAILURE_ROWS_ERR, + "current error rows is more than max error num"), isReplay); } // reset currentTotalNum and currentErrorNum this.jobStatistic.currentErrorRows = 0; @@ -897,7 +896,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl String errmsg = "be " + taskBeId + " commit task failed " + txnState.getLabel() + " with error " + e.getMessage() + " while transaction " + txnState.getTransactionId() + " has been committed"; - updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.INTERNAL_ERR, errmsg), false /* not replay */); + updateState(JobState.PAUSED, + new ErrorReason(InternalErrorCode.INTERNAL_ERR, errmsg), false /* not replay */); } finally { writeUnlock(); LOG.debug("unlock write lock of routine load job after committed: {}", id); @@ -919,8 +919,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl public void afterVisible(TransactionState txnState, boolean txnOperated) { if (!txnOperated) { String msg = String.format( - "should not happen, we find that txnOperated if false when handling afterVisble. job id: %d, txn id: %d", - id, txnState.getTransactionId()); + "should not happen, we find that txnOperated if false when handling afterVisble." + + " job id: %d, txn id: %d", id, txnState.getTransactionId()); LOG.warn(msg); // print a log and return. // if this really happen, the job will be blocked, and this task can be seen by @@ -946,18 +946,22 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl // The routineLoadTaskInfoList will be cleared when job being paused. // So the task can not be found here. // This is a normal case, we just print a log here to observe. - LOG.info("Can not find task with transaction {} after visible, job: {}", txnState.getTransactionId(), id); + LOG.info("Can not find task with transaction {} after visible, job: {}", + txnState.getTransactionId(), id); return; } RoutineLoadTaskInfo routineLoadTaskInfo = routineLoadTaskInfoOptional.get(); if (routineLoadTaskInfo.getTxnStatus() != TransactionStatus.COMMITTED) { // TODO(cmy): Normally, this should not happen. But for safe reason, just pause the job String msg = String.format( - "should not happen, we find that task %s is not COMMITTED when handling afterVisble. job id: %d, txn id: %d, txn status: %s", - DebugUtil.printId(routineLoadTaskInfo.getId()), id, txnState.getTransactionId(), routineLoadTaskInfo.getTxnStatus().name()); + "should not happen, we find that task %s is not COMMITTED when handling afterVisble." + + " job id: %d, txn id: %d, txn status: %s", + DebugUtil.printId(routineLoadTaskInfo.getId()), id, txnState.getTransactionId(), + routineLoadTaskInfo.getTxnStatus().name()); LOG.warn(msg); try { - updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.IMPOSSIBLE_ERROR_ERR, msg), false /* not replay */); + updateState(JobState.PAUSED, + new ErrorReason(InternalErrorCode.IMPOSSIBLE_ERROR_ERR, msg), false /* not replay */); } catch (UserException e) { // should not happen LOG.warn("failed to pause the job {}. this should not happen", id, e); @@ -1023,16 +1027,19 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl // TODO(ml): use previous be id depend on change reason } // step2: commit task , update progress, maybe create a new task - executeTaskOnTxnStatusChanged(routineLoadTaskInfo, txnState, TransactionStatus.ABORTED, txnStatusChangeReason); + executeTaskOnTxnStatusChanged(routineLoadTaskInfo, txnState, + TransactionStatus.ABORTED, txnStatusChangeReason); } } catch (Exception e) { - String msg = "be " + taskBeId + " abort task " + txnState.getLabel() + " failed with error " + e.getMessage(); + String msg = "be " + taskBeId + " abort task " + txnState.getLabel() + + " failed with error " + e.getMessage(); updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TASKS_ABORT_ERR, msg), false /* not replay */); LOG.warn(new LogBuilder(LogKey.ROUTINE_LOAD_JOB, id) - .add("task_id", txnState.getLabel()) - .add("error_msg", "change job state to paused when task has been aborted with error " + e.getMessage()) - .build(), e); + .add("task_id", txnState.getLabel()) + .add("error_msg", "change job state to paused" + + " when task has been aborted with error " + e.getMessage()) + .build(), e); } finally { writeUnlock(); LOG.debug("unlock write lock of routine load job after aborted: {}", id); @@ -1051,9 +1058,11 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl // check task exists or not before call method private void executeTaskOnTxnStatusChanged(RoutineLoadTaskInfo routineLoadTaskInfo, TransactionState txnState, - TransactionStatus txnStatus, TransactionState.TxnStatusChangeReason txnStatusChangeReason) throws UserException { + TransactionStatus txnStatus, TransactionState.TxnStatusChangeReason txnStatusChangeReason) + throws UserException { // step0: get progress from transaction state - RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment = (RLTaskTxnCommitAttachment) txnState.getTxnCommitAttachment(); + RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment + = (RLTaskTxnCommitAttachment) txnState.getTxnCommitAttachment(); if (rlTaskTxnCommitAttachment == null) { if (LOG.isDebugEnabled()) { LOG.debug(new LogBuilder(LogKey.ROUTINE_LOAD_TASK, routineLoadTaskInfo.getId()) @@ -1080,7 +1089,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl Catalog.getCurrentCatalog().getRoutineLoadTaskScheduler().addTaskInQueue(newRoutineLoadTaskInfo); } else if (txnStatus == TransactionStatus.COMMITTED) { // this txn is just COMMITTED, create new task when the this txn is VISIBLE - // or if publish version task has some error, there will be lots of COMMITTED txns in GlobalTransactionMgr + // or if publish version task has some error, + // there will be lots of COMMITTED txns in GlobalTransactionMgr } } } @@ -1221,8 +1231,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl writeLock(); try { if (!state.isFinalState()) { - unprotectUpdateState(JobState.CANCELLED, - new ErrorReason(InternalErrorCode.TABLE_ERR, "table does not exist"), false /* not replay */); + unprotectUpdateState(JobState.CANCELLED, new ErrorReason(InternalErrorCode.TABLE_ERR, + "table does not exist"), false /* not replay */); } return; } finally { @@ -1320,7 +1330,8 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl List> rows = Lists.newArrayList(); routineLoadTaskInfoList.forEach(entity -> { try { - entity.setTxnStatus(Catalog.getCurrentCatalog().getGlobalTransactionMgr().getDatabaseTransactionMgr(dbId).getTransactionState(entity.getTxnId()).getTransactionStatus()); + entity.setTxnStatus(Catalog.getCurrentCatalog().getGlobalTransactionMgr() + .getDatabaseTransactionMgr(dbId).getTransactionState(entity.getTxnId()).getTransactionStatus()); rows.add(entity.getTaskShowInfo()); } catch (AnalysisException e) { LOG.warn("failed to setTxnStatus db: {}, txnId: {}, err: {}", dbId, entity.getTxnId(), e.getMessage()); @@ -1446,8 +1457,10 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl private String jobPropertiesToJsonString() { Map jobProperties = Maps.newHashMap(); - jobProperties.put("partitions", partitions == null ? STAR_STRING : Joiner.on(",").join(partitions.getPartitionNames())); - jobProperties.put("columnToColumnExpr", columnDescs == null ? STAR_STRING : Joiner.on(",").join(columnDescs.descs)); + jobProperties.put("partitions", partitions == null + ? STAR_STRING : Joiner.on(",").join(partitions.getPartitionNames())); + jobProperties.put("columnToColumnExpr", columnDescs == null + ? STAR_STRING : Joiner.on(",").join(columnDescs.descs)); jobProperties.put("precedingFilter", precedingFilter == null ? STAR_STRING : precedingFilter.toSql()); jobProperties.put("whereExpr", whereExpr == null ? STAR_STRING : whereExpr.toSql()); if (getFormat().equalsIgnoreCase("json")) { @@ -1633,9 +1646,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl } } - abstract public void modifyProperties(AlterRoutineLoadStmt stmt) throws UserException; + public abstract void modifyProperties(AlterRoutineLoadStmt stmt) throws UserException; - abstract public void replayModifyProperties(AlterRoutineLoadJobOperationLog log); + public abstract void replayModifyProperties(AlterRoutineLoadJobOperationLog log); // for ALTER ROUTINE LOAD protected void modifyCommonJobProperties(Map jobProperties) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java index cae82d0c24..24e1b19037 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java @@ -114,7 +114,8 @@ public class RoutineLoadManager implements Writable { // return the map of be id -> running tasks num private Map getBeCurrentTasksNumMap() { Map beCurrentTaskNumMap = Maps.newHashMap(); - for (RoutineLoadJob routineLoadJob : getRoutineLoadJobByState(Sets.newHashSet(RoutineLoadJob.JobState.RUNNING))) { + for (RoutineLoadJob routineLoadJob : getRoutineLoadJobByState( + Sets.newHashSet(RoutineLoadJob.JobState.RUNNING))) { Map jobBeCurrentTasksNumMap = routineLoadJob.getBeCurrentTasksNumMap(); for (Map.Entry entry : jobBeCurrentTasksNumMap.entrySet()) { if (beCurrentTaskNumMap.containsKey(entry.getKey())) { @@ -165,7 +166,8 @@ public class RoutineLoadManager implements Writable { + dbName); } if (getRoutineLoadJobByState(Sets.newHashSet(RoutineLoadJob.JobState.NEED_SCHEDULE, - RoutineLoadJob.JobState.RUNNING, RoutineLoadJob.JobState.PAUSED)).size() > Config.max_routine_load_job_num) { + RoutineLoadJob.JobState.RUNNING, RoutineLoadJob.JobState.PAUSED)).size() + > Config.max_routine_load_job_num) { throw new DdlException("There are more than " + Config.max_routine_load_job_num + " routine load jobs are running. exceed limit."); } @@ -436,7 +438,8 @@ public class RoutineLoadManager implements Writable { if (!beIdToMaxConcurrentTasks.containsKey(previousBeId)) { idleTaskNum = 0; } else if (beIdToConcurrentTasks.containsKey(previousBeId)) { - idleTaskNum = beIdToMaxConcurrentTasks.get(previousBeId) - beIdToConcurrentTasks.get(previousBeId); + idleTaskNum = beIdToMaxConcurrentTasks.get(previousBeId) + - beIdToConcurrentTasks.get(previousBeId); } else { idleTaskNum = beIdToMaxConcurrentTasks.get(previousBeId); } @@ -547,7 +550,8 @@ public class RoutineLoadManager implements Writable { if includeHistory is false, filter not running job in result else return all of result */ - public List getJob(String dbFullName, String jobName, boolean includeHistory, PatternMatcher matcher) + public List getJob(String dbFullName, String jobName, + boolean includeHistory, PatternMatcher matcher) throws MetaNotFoundException { Preconditions.checkArgument(jobName == null || matcher == null, "jobName and matcher cannot be not null at the same time"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadScheduler.java index e6662427ba..e734fa68f8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadScheduler.java @@ -102,10 +102,11 @@ public class RoutineLoadScheduler extends MasterDaemon { if (errorJobState != null) { LOG.warn(new LogBuilder(LogKey.ROUTINE_LOAD_JOB, routineLoadJob.getId()) - .add("current_state", routineLoadJob.getState()) - .add("desired_state", errorJobState) - .add("warn_msg", "failed to scheduler job, change job state to desired_state with error reason " + userException.getMessage()) - .build(), userException); + .add("current_state", routineLoadJob.getState()) + .add("desired_state", errorJobState) + .add("warn_msg", "failed to scheduler job," + + " change job state to desired_state with error reason " + userException.getMessage()) + .build(), userException); try { ErrorReason reason = new ErrorReason(userException.getErrorCode(), userException.getMessage()); routineLoadJob.updateState(errorJobState, reason, false); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java index ade282f207..c159e83d74 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java @@ -168,8 +168,8 @@ public abstract class RoutineLoadTaskInfo { RoutineLoadJob routineLoadJob = routineLoadManager.getJob(jobId); try { MetricRepo.COUNTER_LOAD_ADD.increase(1L); - txnId = Catalog.getCurrentGlobalTransactionMgr().beginTransaction( - routineLoadJob.getDbId(), Lists.newArrayList(routineLoadJob.getTableId()), DebugUtil.printId(id), null, + txnId = Catalog.getCurrentGlobalTransactionMgr().beginTransaction(routineLoadJob.getDbId(), + Lists.newArrayList(routineLoadJob.getTableId()), DebugUtil.printId(id), null, new TxnCoordinator(TxnSourceType.FE, FrontendOptions.getLocalHostAddress()), TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK, routineLoadJob.getId(), timeoutMs / 1000); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java index 589f72dc8f..f25831d145 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java @@ -100,7 +100,8 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { try { // This step will be blocked when queue is empty RoutineLoadTaskInfo routineLoadTaskInfo = needScheduleTasksQueue.take(); - if (System.currentTimeMillis() - routineLoadTaskInfo.getLastScheduledTime() < routineLoadTaskInfo.getTimeoutMs()) { + if (System.currentTimeMillis() - routineLoadTaskInfo.getLastScheduledTime() + < routineLoadTaskInfo.getTimeoutMs()) { // try to delay scheduling this task for 'timeout', to void too many failure needScheduleTasksQueue.put(routineLoadTaskInfo); return; @@ -114,7 +115,8 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { private void scheduleOneTask(RoutineLoadTaskInfo routineLoadTaskInfo) throws Exception { routineLoadTaskInfo.setLastScheduledTime(System.currentTimeMillis()); - LOG.debug("schedule routine load task info {} for job {}", routineLoadTaskInfo.id, routineLoadTaskInfo.getJobId()); + LOG.debug("schedule routine load task info {} for job {}", + routineLoadTaskInfo.id, routineLoadTaskInfo.getJobId()); // check if task has been abandoned if (!routineLoadManager.checkTaskInJob(routineLoadTaskInfo)) { // task has been abandoned while renew task has been added in queue @@ -140,13 +142,14 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { return; } } catch (UserException e) { - routineLoadManager.getJob(routineLoadTaskInfo.getJobId()). - updateState(JobState.PAUSED, new ErrorReason(e.getErrorCode(), e.getMessage()), false); + routineLoadManager.getJob(routineLoadTaskInfo.getJobId()) + .updateState(JobState.PAUSED, new ErrorReason(e.getErrorCode(), e.getMessage()), false); throw e; } catch (Exception e) { // exception happens, PAUSE the job routineLoadManager.getJob(routineLoadTaskInfo.getJobId()).updateState(JobState.PAUSED, - new ErrorReason(InternalErrorCode.CREATE_TASKS_ERR, "failed to allocate task: " + e.getMessage()), false); + new ErrorReason(InternalErrorCode.CREATE_TASKS_ERR, + "failed to allocate task: " + e.getMessage()), false); LOG.warn(new LogBuilder(LogKey.ROUTINE_LOAD_TASK, routineLoadTaskInfo.getId()).add("error_msg", "allocate task encounter exception: " + e.getMessage()).build(), e); throw e; @@ -206,7 +209,8 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { (System.currentTimeMillis() - startTime), routineLoadTaskInfo.getJobId()); if (tRoutineLoadTask.isSetKafkaLoadInfo()) { LOG.debug("send kafka routine load task {} with partition offset: {}, job: {}", - tRoutineLoadTask.label, tRoutineLoadTask.kafka_load_info.partition_begin_offset, tRoutineLoadTask.getJobId()); + tRoutineLoadTask.label, tRoutineLoadTask.kafka_load_info.partition_begin_offset, + tRoutineLoadTask.getJobId()); } } catch (LoadException e) { // submit task failed (such as TOO_MANY_TASKS error), but txn has already begun. diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannel.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannel.java index 19f49447a5..cc0bf3b4ef 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannel.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannel.java @@ -44,7 +44,8 @@ public class SyncChannel { protected String srcTable; protected SyncChannelCallback callback; - public SyncChannel(long id, SyncJob syncJob, Database db, OlapTable table, List columns, String srcDataBase, String srcTable) { + public SyncChannel(long id, SyncJob syncJob, Database db, OlapTable table, + List columns, String srcDataBase, String srcTable) { this.id = id; this.jobId = syncJob.getId(); this.db = db; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannelHandle.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannelHandle.java index ded37f2dde..4c85fe49fc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannelHandle.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChannelHandle.java @@ -25,7 +25,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; public class SyncChannelHandle implements SyncChannelCallback { - private final static Logger LOG = LogManager.getLogger(SyncChannelHandle.class); + private static final Logger LOG = LogManager.getLogger(SyncChannelHandle.class); // channel id -> dummy value(-1) private MarkedCountDownLatch latch; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJob.java index 399026a37c..cf0094fa5d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJob.java @@ -294,7 +294,8 @@ public abstract class SyncJob implements Writable { @SerializedName(value = "failMsg") protected SyncFailMsg failMsg; - public SyncJobUpdateStateInfo(long id, JobState jobState, long lastStartTimeMs, long lastStopTimeMs, long finishTimeMs, SyncFailMsg failMsg) { + public SyncJobUpdateStateInfo(long id, JobState jobState, long lastStartTimeMs, + long lastStopTimeMs, long finishTimeMs, SyncFailMsg failMsg) { this.id = id; this.jobState = jobState; this.lastStartTimeMs = lastStartTimeMs; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java index cb101e6891..a63ca2ad73 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncChannel.java @@ -76,7 +76,8 @@ public class CanalSyncChannel extends SyncChannel { private Data batchBuffer; private InsertStreamTxnExecutor txnExecutor; - public CanalSyncChannel(long id, SyncJob syncJob, Database db, OlapTable table, List columns, String srcDataBase, String srcTable) { + public CanalSyncChannel(long id, SyncJob syncJob, Database db, OlapTable table, List columns, + String srcDataBase, String srcTable) { super(id, syncJob, db, table, columns, srcDataBase, srcTable); this.index = SyncTaskPool.getNextIndex(); this.batchBuffer = new Data<>(); @@ -84,11 +85,12 @@ public class CanalSyncChannel extends SyncChannel { this.timeoutSecond = -1L; } - private final static class SendTask extends SyncTask { + private static final class SendTask extends SyncTask { private final InsertStreamTxnExecutor executor; private final Data rows; - public SendTask(long signature, int index, SyncChannelCallback callback, Data rows, InsertStreamTxnExecutor executor) { + public SendTask(long signature, int index, SyncChannelCallback callback, Data rows, + InsertStreamTxnExecutor executor) { super(signature, index, callback); this.executor = executor; this.rows = rows; @@ -101,7 +103,7 @@ public class CanalSyncChannel extends SyncChannel { } } - private final static class EOFTask extends SyncTask { + private static final class EOFTask extends SyncTask { public EOFTask(long signature, int index, SyncChannelCallback callback) { super(signature, index, callback); @@ -201,7 +203,8 @@ public class CanalSyncChannel extends SyncChannel { LOG.info("abort txn in channel {}, table: {}, txn id: {}, last batch: {}, reason: {}", id, targetTable, txnExecutor.getTxnId(), lastBatchId, reason); } catch (TException e) { - LOG.warn("Failed to abort txn in channel {}, table: {}, txn: {}, msg:{}", id, targetTable, txnExecutor.getTxnId(), e.getMessage()); + LOG.warn("Failed to abort txn in channel {}, table: {}, txn: {}, msg:{}", + id, targetTable, txnExecutor.getTxnId(), e.getMessage()); throw e; } catch (InterruptedException | ExecutionException | TimeoutException e) { LOG.warn("Error occur while waiting abort txn response in channel {}, table: {}, txn: {}, msg:{}", @@ -225,7 +228,8 @@ public class CanalSyncChannel extends SyncChannel { LOG.info("commit txn in channel {}, table: {}, txn id: {}, last batch: {}", id, targetTable, txnExecutor.getTxnId(), lastBatchId); } catch (TException e) { - LOG.warn("Failed to commit txn in channel {}, table: {}, txn: {}, msg:{}", id, targetTable, txnExecutor.getTxnId(), e.getMessage()); + LOG.warn("Failed to commit txn in channel {}, table: {}, txn: {}, msg:{}", + id, targetTable, txnExecutor.getTxnId(), e.getMessage()); throw e; } catch (InterruptedException | ExecutionException | TimeoutException e) { LOG.warn("Error occur while waiting commit txn return in channel {}, table: {}, txn: {}, msg:{}", diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java index 71c004ba19..3b440f783e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalSyncJob.java @@ -49,13 +49,13 @@ import java.util.Map; public class CanalSyncJob extends SyncJob { private static final Logger LOG = LogManager.getLogger(CanalSyncJob.class); - protected final static String CANAL_SERVER_IP = "canal.server.ip"; - protected final static String CANAL_SERVER_PORT = "canal.server.port"; - protected final static String CANAL_DESTINATION = "canal.destination"; - protected final static String CANAL_USERNAME = "canal.username"; - protected final static String CANAL_PASSWORD = "canal.password"; - protected final static String CANAL_BATCH_SIZE = "canal.batchSize"; - protected final static String CANAL_DEBUG = "canal.debug"; + protected static final String CANAL_SERVER_IP = "canal.server.ip"; + protected static final String CANAL_SERVER_PORT = "canal.server.port"; + protected static final String CANAL_DESTINATION = "canal.destination"; + protected static final String CANAL_USERNAME = "canal.username"; + protected static final String CANAL_PASSWORD = "canal.password"; + protected static final String CANAL_BATCH_SIZE = "canal.batchSize"; + protected static final String CANAL_DEBUG = "canal.debug"; @SerializedName(value = "remote") private final CanalDestination remote; diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java index caced40563..b5e9dc70c0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/CanalUtils.java @@ -53,7 +53,8 @@ public class CanalUtils { context_format += "| End : [{}] " + SEP; context_format += "----------------------------------------------------------" + SEP; row_format = SEP - + "----------------> binlog[{}:{}] , name[{},{}] , eventType : {} , executeTime : {}({}) , gtid : ({}) , delay : {} ms" + + "----------------> binlog[{}:{}] , name[{},{}] , eventType : {} ," + + " executeTime : {}({}) , gtid : ({}) , delay : {} ms" + SEP; transaction_format = SEP + "================> binlog[{}:{}] , executeTime : {}({}) , gtid : ({}) , delay : {}ms" @@ -68,7 +69,8 @@ public class CanalUtils { String startPosition = buildPositionForDump(entries.get(0)); String endPosition = buildPositionForDump(entries.get(entries.size() - 1)); SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT); - logger.info(context_format, dataEvents.getId(), entries.size(), dataEvents.getMemSize(), format.format(new Date()), startPosition, endPosition); + logger.info(context_format, dataEvents.getId(), entries.size(), dataEvents.getMemSize(), + format.format(new Date()), startPosition, endPosition); } public static void printSummary(Message message, int size, long memsize) { @@ -79,7 +81,8 @@ public class CanalUtils { String startPosition = buildPositionForDump(message.getEntries().get(0)); String endPosition = buildPositionForDump(message.getEntries().get(message.getEntries().size() - 1)); SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT); - logger.info(context_format, message.getId(), size, memsize, format.format(new Date()), startPosition, endPosition); + logger.info(context_format, message.getId(), size, memsize, + format.format(new Date()), startPosition, endPosition); } public static String buildPositionForDump(CanalEntry.Entry entry) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/SyncCanalClient.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/SyncCanalClient.java index 11ef95bebb..d41def0465 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/SyncCanalClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/canal/SyncCanalClient.java @@ -59,7 +59,8 @@ public class SyncCanalClient { int batchSize, boolean debug, String filter) { this.connector = connector; this.consumer = new CanalSyncDataConsumer(syncJob, connector, getLock, debug); - this.receiver = new CanalSyncDataReceiver(syncJob, connector, destination, filter, consumer, batchSize, getLock); + this.receiver = new CanalSyncDataReceiver(syncJob, connector, destination, + filter, consumer, batchSize, getLock); this.idToChannels = Maps.newHashMap(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java index 9486713280..52300b4816 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/position/EntryPosition.java @@ -117,6 +117,7 @@ public class EntryPosition { return true; } } + @Override public String toString() { return "[" + journalName + ":" + position + "]"; @@ -163,7 +164,8 @@ public class EntryPosition { public static boolean checkPosition(CanalEntry.Header header, EntryPosition entryPosition) { boolean result = entryPosition.getExecuteTime().equals(header.getExecuteTime()); - boolean isEmptyPosition = (Strings.isNullOrEmpty(entryPosition.getJournalName()) && entryPosition.getPosition() == null); + boolean isEmptyPosition = (Strings.isNullOrEmpty(entryPosition.getJournalName()) + && entryPosition.getPosition() == null); if (!isEmptyPosition) { result &= entryPosition.getPosition().equals(header.getLogfileOffset()); if (result) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateManager.java index 3199df7928..697709fd4c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateManager.java @@ -55,7 +55,8 @@ public class UpdateManager { private UpdateStmtExecutor addUpdateExecutor(UpdateStmt updateStmt) throws AnalysisException, DdlException { writeLock(); try { - List currentUpdateList = tableIdToCurrentUpdate.get(updateStmt.getTargetTable().getId()); + List currentUpdateList + = tableIdToCurrentUpdate.get(updateStmt.getTargetTable().getId()); if (!enableConcurrentUpdate && currentUpdateList != null && currentUpdateList.size() > 0) { throw new DdlException("There is an update operation in progress for the current table. " + "Please try again later, or set enable_concurrent_update in fe.conf to true"); @@ -75,7 +76,8 @@ public class UpdateManager { private void removeUpdateExecutor(UpdateStmtExecutor updateStmtExecutor) { writeLock(); try { - List currentUpdateList = tableIdToCurrentUpdate.get(updateStmtExecutor.getTargetTableId()); + List currentUpdateList + = tableIdToCurrentUpdate.get(updateStmtExecutor.getTargetTableId()); if (currentUpdateList == null) { return; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateStmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateStmtExecutor.java index 779dcb043c..400aab727b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateStmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/update/UpdateStmtExecutor.java @@ -99,7 +99,8 @@ public class UpdateStmtExecutor { LOG.warn("failed to plan update stmt, query id:{}", DebugUtil.printId(queryId), e); Catalog.getCurrentGlobalTransactionMgr().abortTransaction(dbId, txnId, e.getMessage()); QeProcessorImpl.INSTANCE.unregisterQuery(queryId); - throw new DdlException("failed to plan update stmt, query id: " + DebugUtil.printId(queryId) + ", err: " + e.getMessage()); + throw new DdlException("failed to plan update stmt, query id: " + + DebugUtil.printId(queryId) + ", err: " + e.getMessage()); } finally { targetTable.readUnlock(); } @@ -114,7 +115,8 @@ public class UpdateStmtExecutor { } catch (Throwable e) { LOG.warn("failed to execute update stmt, query id:{}", DebugUtil.printId(queryId), e); Catalog.getCurrentGlobalTransactionMgr().abortTransaction(dbId, txnId, e.getMessage()); - throw new DdlException("failed to execute update stmt, query id: " + DebugUtil.printId(queryId) + ", err: " + e.getMessage()); + throw new DdlException("failed to execute update stmt, query id: " + + DebugUtil.printId(queryId) + ", err: " + e.getMessage()); } finally { QeProcessorImpl.INSTANCE.unregisterQuery(queryId); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java b/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java index 98ec95c148..1ebe4c1dca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/Checkpoint.java @@ -91,7 +91,8 @@ public class Checkpoint extends MasterDaemon { imageVersion = storage.getLatestImageSeq(); // get max finalized journal id checkPointVersion = editLog.getFinalizedJournalId(); - LOG.info("last checkpoint journal id: {}, current finalized journal id: {}", imageVersion, checkPointVersion); + LOG.info("last checkpoint journal id: {}, current finalized journal id: {}", + imageVersion, checkPointVersion); if (imageVersion >= checkPointVersion) { return; } @@ -122,8 +123,8 @@ public class Checkpoint extends MasterDaemon { catalog.loadImage(imageDir); catalog.replayJournal(checkPointVersion); if (catalog.getReplayedJournalId() != checkPointVersion) { - throw new CheckpointException(String.format("checkpoint version should be %d, actual replayed journal id is %d", - checkPointVersion, catalog.getReplayedJournalId())); + throw new CheckpointException(String.format("checkpoint version should be %d," + + " actual replayed journal id is %d", checkPointVersion, catalog.getReplayedJournalId())); } catalog.fixBugAfterMetadataReplayed(false); latestImageFilePath = catalog.saveImage(); @@ -249,8 +250,8 @@ public class Checkpoint extends MasterDaemon { minOtherNodesJournalId = id; } } catch (Throwable e) { - throw new CheckpointException(String.format("Exception when getting current replayed journal id. host=%s, port=%d", - host, port), e); + throw new CheckpointException(String.format("Exception when getting current replayed" + + " journal id. host=%s, port=%d", host, port), e); } finally { if (conn != null) { conn.disconnect(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java index 8f7cc064d9..2810156003 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java @@ -171,7 +171,8 @@ public class MasterImpl { break; case SCHEMA_CHANGE: case ROLLUP: - throw new RuntimeException("Schema change and rollup job is not used any more, use alter task instead"); + throw new RuntimeException("Schema change and rollup job is not used any more," + + " use alter task instead"); case CLONE: finishClone(task, request); break; @@ -234,7 +235,8 @@ public class MasterImpl { try { CreateReplicaTask createReplicaTask = (CreateReplicaTask) task; if (request.getTaskStatus().getStatusCode() != TStatusCode.OK) { - createReplicaTask.countDownToZero(task.getBackendId() + ": " + request.getTaskStatus().getErrorMsgs().toString()); + createReplicaTask.countDownToZero(task.getBackendId() + ": " + + request.getTaskStatus().getErrorMsgs().toString()); } else { long tabletId = createReplicaTask.getTabletId(); @@ -245,17 +247,19 @@ public class MasterImpl { if (createReplicaTask.isRecoverTask()) { /** - * This create replica task may be generated by recovery(See comment of Config.recover_with_empty_tablet) + * This create replica task may be generated by recovery + * (See comment of Config.recover_with_empty_tablet) * So we set replica back to good. */ replica.setBad(false); - LOG.info("finish recover create replica task. set replica to good. tablet {}, replica {}, backend {}", - tabletId, task.getBackendId(), replica.getId()); + LOG.info("finish recover create replica task. set replica to good. tablet {}," + + " replica {}, backend {}", tabletId, task.getBackendId(), replica.getId()); } } // this should be called before 'countDownLatch()' - Catalog.getCurrentSystemInfo().updateBackendReportVersion(task.getBackendId(), request.getReportVersion(), task.getDbId(), task.getTableId()); + Catalog.getCurrentSystemInfo().updateBackendReportVersion(task.getBackendId(), + request.getReportVersion(), task.getDbId(), task.getTableId()); createReplicaTask.countDownLatch(task.getBackendId(), task.getSignature()); LOG.debug("finish create replica. tablet id: {}, be: {}, report version: {}", @@ -273,10 +277,12 @@ public class MasterImpl { try { UpdateTabletMetaInfoTask tabletTask = (UpdateTabletMetaInfoTask) task; if (request.getTaskStatus().getStatusCode() != TStatusCode.OK) { - tabletTask.countDownToZero(task.getBackendId() + ": " + request.getTaskStatus().getErrorMsgs().toString()); + tabletTask.countDownToZero(task.getBackendId() + ": " + + request.getTaskStatus().getErrorMsgs().toString()); } else { tabletTask.countDownLatch(task.getBackendId(), tabletTask.getTablets()); - LOG.debug("finish update tablet meta. tablet id: {}, be: {}", tabletTask.getTablets(), task.getBackendId()); + LOG.debug("finish update tablet meta. tablet id: {}, be: {}", + tabletTask.getTablets(), task.getBackendId()); } } finally { AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.UPDATE_TABLET_META_INFO, task.getSignature()); @@ -366,7 +372,8 @@ public class MasterImpl { TabletMeta tabletMeta = tabletMetaList.get(i); checkReplica(finishTabletInfos.get(i), tabletMeta); long tabletId = tabletIds.get(i); - Replica replica = findRelatedReplica(olapTable, partition, backendId, tabletId, tabletMeta.getIndexId()); + Replica replica = findRelatedReplica( + olapTable, partition, backendId, tabletId, tabletMeta.getIndexId()); // if the replica is under schema change, could not find the replica with aim schema hash if (replica != null) { job.addFinishedReplica(replica); @@ -389,7 +396,8 @@ public class MasterImpl { } } else if (pushTask.getPushType() == TPushType.LOAD_V2) { long loadJobId = pushTask.getLoadJobId(); - org.apache.doris.load.loadv2.LoadJob job = Catalog.getCurrentCatalog().getLoadManager().getLoadJob(loadJobId); + org.apache.doris.load.loadv2.LoadJob job + = Catalog.getCurrentCatalog().getLoadManager().getLoadJob(loadJobId); if (job == null) { throw new MetaNotFoundException("cannot find load job, job[" + loadJobId + "]"); } @@ -397,7 +405,8 @@ public class MasterImpl { TabletMeta tabletMeta = tabletMetaList.get(i); checkReplica(finishTabletInfos.get(i), tabletMeta); long tabletId = tabletIds.get(i); - Replica replica = findRelatedReplica(olapTable, partition, backendId, tabletId, tabletMeta.getIndexId()); + Replica replica = findRelatedReplica( + olapTable, partition, backendId, tabletId, tabletMeta.getIndexId()); // if the replica is under schema change, could not find the replica with aim schema hash if (replica != null) { ((SparkLoadJob) job).addFinishedReplica(replica.getId(), pushTabletId, backendId); @@ -606,7 +615,8 @@ public class MasterImpl { if (request.isSetReportVersion()) { // report version is required. here we check if set, for compatibility. long reportVersion = request.getReportVersion(); - Catalog.getCurrentSystemInfo().updateBackendReportVersion(task.getBackendId(), reportVersion, task.getDbId(), task.getTableId()); + Catalog.getCurrentSystemInfo().updateBackendReportVersion( + task.getBackendId(), reportVersion, task.getDbId(), task.getTableId()); } PublishVersionTask publishVersionTask = (PublishVersionTask) task; diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/PartitionInMemoryInfoCollector.java b/fe/fe-core/src/main/java/org/apache/doris/master/PartitionInMemoryInfoCollector.java index 5eebbb6c61..78e2c7ffa6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/PartitionInMemoryInfoCollector.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/PartitionInMemoryInfoCollector.java @@ -79,7 +79,8 @@ public class PartitionInMemoryInfoCollector extends MasterDaemon { } } if (LOG.isDebugEnabled()) { - LOG.debug("Update database[{}] partition in memory info, partitionInMemoryCount : {}.", db.getFullName(), partitionInMemoryCount); + LOG.debug("Update database[{}] partition in memory info, partitionInMemoryCount : {}.", + db.getFullName(), partitionInMemoryCount); } } catch (Exception e) { LOG.warn("Update database[" + db.getFullName() + "] partition in memory info failed", e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index 7fb54271d1..1f5e146bd4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -188,7 +188,8 @@ public class ReportHandler extends Daemon { if (currentSize > Config.report_queue_size) { LOG.warn("the report queue size exceeds the limit: {}. current: {}", Config.report_queue_size, currentSize); throw new Exception( - "the report queue size exceeds the limit: " + Config.report_queue_size + ". current: " + currentSize); + "the report queue size exceeds the limit: " + + Config.report_queue_size + ". current: " + currentSize); } reportQueue.put(reportTask); } @@ -454,7 +455,8 @@ public class ReportHandler extends Daemon { long backendVersion = -1L; long rowCount = -1L; long dataSize = -1L; - // schema change maybe successfully in fe, but not inform be, then be will report two schema hash + // schema change maybe successfully in fe, but not inform be, + // then be will report two schema hash // just select the dest schema hash for (TTabletInfo tabletInfo : backendTablets.get(tabletId).getTabletInfos()) { if (tabletInfo.getSchemaHash() == schemaHash) { @@ -542,7 +544,8 @@ public class ReportHandler extends Daemon { continue; } - short replicationNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); + short replicationNum = olapTable.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); long indexId = tabletMeta.getIndexId(); MaterializedIndex index = partition.getIndex(indexId); @@ -566,7 +569,8 @@ public class ReportHandler extends Daemon { } // check report version again - long currentBackendReportVersion = Catalog.getCurrentSystemInfo().getBackendReportVersion(backendId); + long currentBackendReportVersion = Catalog.getCurrentSystemInfo() + .getBackendReportVersion(backendId); if (backendReportVersion < currentBackendReportVersion) { continue; } @@ -614,8 +618,8 @@ public class ReportHandler extends Daemon { tabletId, replica.getId(), backendId); BackendTabletsInfo tabletsInfo = new BackendTabletsInfo(backendId); tabletsInfo.setBad(true); - ReplicaPersistInfo replicaPersistInfo = ReplicaPersistInfo.createForReport( - dbId, tableId, partitionId, indexId, tabletId, backendId, replica.getId()); + ReplicaPersistInfo replicaPersistInfo = ReplicaPersistInfo.createForReport(dbId, + tableId, partitionId, indexId, tabletId, backendId, replica.getId()); tabletsInfo.addReplicaInfo(replicaPersistInfo); Catalog.getCurrentCatalog().getEditLog().logBackendTabletsInfo(tabletsInfo); } @@ -753,14 +757,15 @@ public class ReportHandler extends Daemon { AgentTaskExecutor.submit(batchTask); } - private static void handleRepublishVersionInfo(Map> transactionsToPublish, - long backendId) { + private static void handleRepublishVersionInfo( + Map> transactionsToPublish, long backendId) { AgentBatchTask batchTask = new AgentBatchTask(); long createPublishVersionTaskTime = System.currentTimeMillis(); for (Long dbId : transactionsToPublish.keySet()) { ListMultimap map = transactionsToPublish.get(dbId); for (long txnId : map.keySet()) { - PublishVersionTask task = new PublishVersionTask(backendId, txnId, dbId, map.get(txnId), createPublishVersionTaskTime); + PublishVersionTask task = new PublishVersionTask(backendId, txnId, dbId, + map.get(txnId), createPublishVersionTaskTime); batchTask.addTask(task); // add to AgentTaskQueue for handling finish report. AgentTaskQueue.addTask(task); @@ -947,7 +952,8 @@ public class ReportHandler extends Daemon { // The init partition's version in FE is (1-0), the tablet's version in BE is (2-0) // If the BE report version is (2-0) and partition's version is (1-0), // we should add the tablet to meta. - // But old version doris is too old, we should not consider them any more, just throw exception in this case + // But old version doris is too old, we should not consider them any more, + // just throw exception in this case if (version > partition.getNextVersion() - 1) { // this is a fatal error throw new MetaNotFoundException("version is invalid. tablet[" + version + "]" diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/CounterMetric.java b/fe/fe-core/src/main/java/org/apache/doris/metric/CounterMetric.java index ecc96a490a..21b1090f2b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/CounterMetric.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/CounterMetric.java @@ -26,5 +26,5 @@ public abstract class CounterMetric extends Metric { super(name, MetricType.COUNTER, unit, description); } - abstract public void increase(T delta); + public abstract void increase(T delta); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/Metric.java b/fe/fe-core/src/main/java/org/apache/doris/metric/Metric.java index b6e2a0e50b..aa26ecde1e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/Metric.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/Metric.java @@ -41,7 +41,7 @@ public abstract class Metric { CONNECTIONS, PACKETS, NOUNIT - }; + } protected String name; protected MetricType type; diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java b/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java index 1772197e46..e7a6a4c3df 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/MetricRepo.java @@ -109,7 +109,8 @@ public final class MetricRepo { public static GaugeMetricImpl GAUGE_QUERY_ERR_RATE; public static GaugeMetricImpl GAUGE_MAX_TABLET_COMPACTION_SCORE; - private static ScheduledThreadPoolExecutor metricTimer = ThreadPoolManager.newDaemonScheduledThreadPool(1, "Metric-Timer-Pool", true); + private static ScheduledThreadPoolExecutor metricTimer = ThreadPoolManager + .newDaemonScheduledThreadPool(1, "Metric-Timer-Pool", true); private static MetricCalculator metricCalculator = new MetricCalculator(); // init() should only be called after catalog is contructed. @@ -177,9 +178,11 @@ public final class MetricRepo { return 0L; } if (jobType == JobType.SCHEMA_CHANGE) { - return alter.getSchemaChangeHandler().getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.RUNNING); + return alter.getSchemaChangeHandler().getAlterJobV2Num( + org.apache.doris.alter.AlterJobV2.JobState.RUNNING); } else { - return alter.getMaterializedViewHandler().getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.RUNNING); + return alter.getMaterializedViewHandler().getAlterJobV2Num( + org.apache.doris.alter.AlterJobV2.JobState.RUNNING); } } }; @@ -231,13 +234,16 @@ public final class MetricRepo { // qps, rps and error rate // these metrics should be set an init value, in case that metric calculator is not running - GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps", MetricUnit.NOUNIT, "query per second"); + GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps", MetricUnit.NOUNIT, + "query per second"); GAUGE_QUERY_PER_SECOND.setValue(0.0); PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_QUERY_PER_SECOND); - GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps", MetricUnit.NOUNIT, "request per second"); + GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps", MetricUnit.NOUNIT, + "request per second"); GAUGE_REQUEST_PER_SECOND.setValue(0.0); PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_REQUEST_PER_SECOND); - GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate", MetricUnit.NOUNIT, "query error rate"); + GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate", MetricUnit.NOUNIT, + "query error rate"); PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_QUERY_ERR_RATE); GAUGE_QUERY_ERR_RATE.setValue(0.0); GAUGE_MAX_TABLET_COMPACTION_SCORE = new GaugeMetricImpl<>("max_tablet_compaction_score", @@ -246,24 +252,33 @@ public final class MetricRepo { GAUGE_MAX_TABLET_COMPACTION_SCORE.setValue(0L); // 2. counter - COUNTER_REQUEST_ALL = new LongCounterMetric("request_total", MetricUnit.REQUESTS, "total request"); + COUNTER_REQUEST_ALL = new LongCounterMetric("request_total", MetricUnit.REQUESTS, + "total request"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_REQUEST_ALL); - COUNTER_QUERY_ALL = new LongCounterMetric("query_total", MetricUnit.REQUESTS, "total query"); + COUNTER_QUERY_ALL = new LongCounterMetric("query_total", MetricUnit.REQUESTS, + "total query"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_ALL); - COUNTER_QUERY_BEGIN = new LongCounterMetric("query_begin", MetricUnit.REQUESTS, "query begin"); + COUNTER_QUERY_BEGIN = new LongCounterMetric("query_begin", MetricUnit.REQUESTS, + "query begin"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_BEGIN); - COUNTER_QUERY_ERR = new LongCounterMetric("query_err", MetricUnit.REQUESTS, "total error query"); + COUNTER_QUERY_ERR = new LongCounterMetric("query_err", MetricUnit.REQUESTS, + "total error query"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_ERR); - COUNTER_LOAD_ADD = new LongCounterMetric("load_add", MetricUnit.REQUESTS, "total load submit"); + COUNTER_LOAD_ADD = new LongCounterMetric("load_add", MetricUnit.REQUESTS, + "total load submit"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_LOAD_ADD); - COUNTER_QUERY_TABLE = new LongCounterMetric("query_table", MetricUnit.REQUESTS, "total query from table"); + COUNTER_QUERY_TABLE = new LongCounterMetric("query_table", MetricUnit.REQUESTS, + "total query from table"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_TABLE); - COUNTER_QUERY_OLAP_TABLE = new LongCounterMetric("query_olap_table", MetricUnit.REQUESTS, "total query from olap table"); + COUNTER_QUERY_OLAP_TABLE = new LongCounterMetric("query_olap_table", MetricUnit.REQUESTS, + "total query from olap table"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_OLAP_TABLE); - COUNTER_CACHE_MODE_SQL = new LongCounterMetric("cache_mode_sql", MetricUnit.REQUESTS, "total query of sql mode"); + COUNTER_CACHE_MODE_SQL = new LongCounterMetric("cache_mode_sql", MetricUnit.REQUESTS, + "total query of sql mode"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_CACHE_MODE_SQL); - COUNTER_CACHE_HIT_SQL = new LongCounterMetric("cache_hit_sql", MetricUnit.REQUESTS, "total hits query by sql model"); + COUNTER_CACHE_HIT_SQL = new LongCounterMetric("cache_hit_sql", MetricUnit.REQUESTS, + "total hits query by sql model"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_CACHE_HIT_SQL); COUNTER_CACHE_MODE_PARTITION = new LongCounterMetric("query_mode_partition", MetricUnit.REQUESTS, "total query of partition mode"); @@ -278,56 +293,73 @@ public final class MetricRepo { "hit partition of cache partition model"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_CACHE_PARTITION_HIT); - COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished", MetricUnit.REQUESTS, "total load finished"); + COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished", MetricUnit.REQUESTS, + "total load finished"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_LOAD_FINISHED); - COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write", MetricUnit.OPERATIONS, "counter of edit log write into bdbje"); + COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write", MetricUnit.OPERATIONS, + "counter of edit log write into bdbje"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_WRITE); - COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read", MetricUnit.OPERATIONS, "counter of edit log read from bdbje"); + COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read", MetricUnit.OPERATIONS, + "counter of edit log read from bdbje"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_READ); - COUNTER_EDIT_LOG_SIZE_BYTES = new LongCounterMetric("edit_log_size_bytes", MetricUnit.BYTES, "size of edit log"); + COUNTER_EDIT_LOG_SIZE_BYTES = new LongCounterMetric("edit_log_size_bytes", MetricUnit.BYTES, + "size of edit log"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_SIZE_BYTES); // image generate - COUNTER_IMAGE_WRITE_SUCCESS = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, "counter of image succeed in write"); + COUNTER_IMAGE_WRITE_SUCCESS = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, + "counter of image succeed in write"); COUNTER_IMAGE_WRITE_SUCCESS.addLabel(new MetricLabel("type", "success")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_WRITE_SUCCESS); - COUNTER_IMAGE_WRITE_FAILED = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, "counter of image failed to write"); + COUNTER_IMAGE_WRITE_FAILED = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, + "counter of image failed to write"); COUNTER_IMAGE_WRITE_FAILED.addLabel(new MetricLabel("type", "failed")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_WRITE_FAILED); - COUNTER_IMAGE_PUSH_SUCCESS = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, "counter of image succeeded in pushing to other frontends"); + COUNTER_IMAGE_PUSH_SUCCESS = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, + "counter of image succeeded in pushing to other frontends"); COUNTER_IMAGE_PUSH_SUCCESS.addLabel(new MetricLabel("type", "success")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_PUSH_SUCCESS); - COUNTER_IMAGE_PUSH_FAILED = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, "counter of image failed to other frontends"); + COUNTER_IMAGE_PUSH_FAILED = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, + "counter of image failed to other frontends"); COUNTER_IMAGE_PUSH_FAILED.addLabel(new MetricLabel("type", "failed")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_PUSH_FAILED); // image clean - COUNTER_IMAGE_CLEAN_SUCCESS = new LongCounterMetric("image_clean", MetricUnit.OPERATIONS, "counter of image succeeded in cleaning"); + COUNTER_IMAGE_CLEAN_SUCCESS = new LongCounterMetric("image_clean", MetricUnit.OPERATIONS, + "counter of image succeeded in cleaning"); COUNTER_IMAGE_CLEAN_SUCCESS.addLabel(new MetricLabel("type", "success")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_CLEAN_SUCCESS); - COUNTER_IMAGE_CLEAN_FAILED = new LongCounterMetric("image_clean", MetricUnit.OPERATIONS, "counter of image failed to clean"); + COUNTER_IMAGE_CLEAN_FAILED = new LongCounterMetric("image_clean", MetricUnit.OPERATIONS, + "counter of image failed to clean"); COUNTER_IMAGE_CLEAN_FAILED.addLabel(new MetricLabel("type", "failed")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_CLEAN_FAILED); // edit log clean - COUNTER_EDIT_LOG_CLEAN_SUCCESS = new LongCounterMetric("edit_log_clean", MetricUnit.OPERATIONS, "counter of edit log succeed in cleaning"); + COUNTER_EDIT_LOG_CLEAN_SUCCESS = new LongCounterMetric("edit_log_clean", MetricUnit.OPERATIONS, + "counter of edit log succeed in cleaning"); COUNTER_EDIT_LOG_CLEAN_SUCCESS.addLabel(new MetricLabel("type", "success")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_CLEAN_SUCCESS); - COUNTER_EDIT_LOG_CLEAN_FAILED = new LongCounterMetric("edit_log_clean", MetricUnit.OPERATIONS, "counter of edit log failed to clean"); + COUNTER_EDIT_LOG_CLEAN_FAILED = new LongCounterMetric("edit_log_clean", MetricUnit.OPERATIONS, + "counter of edit log failed to clean"); COUNTER_EDIT_LOG_CLEAN_FAILED.addLabel(new MetricLabel("type", "failed")); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_CLEAN_FAILED); - COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject", MetricUnit.REQUESTS, "counter of rejected transactions"); + COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject", MetricUnit.REQUESTS, + "counter of rejected transactions"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_REJECT); - COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin", MetricUnit.REQUESTS, "counter of beginning transactions"); + COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin", MetricUnit.REQUESTS, + "counter of beginning transactions"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_BEGIN); - COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success", MetricUnit.REQUESTS, "counter of success transactions"); + COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success", MetricUnit.REQUESTS, + "counter of success transactions"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_SUCCESS); - COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed", MetricUnit.REQUESTS, "counter of failed transactions"); + COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed", MetricUnit.REQUESTS, + "counter of failed transactions"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_FAILED); - COUNTER_ROUTINE_LOAD_ROWS = new LongCounterMetric("routine_load_rows", MetricUnit.ROWS, "total rows of routine load"); + COUNTER_ROUTINE_LOAD_ROWS = new LongCounterMetric("routine_load_rows", + MetricUnit.ROWS, "total rows of routine load"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_ROUTINE_LOAD_ROWS); COUNTER_ROUTINE_LOAD_RECEIVED_BYTES = new LongCounterMetric("routine_load_receive_bytes", MetricUnit.BYTES, "total received bytes of routine load"); @@ -340,18 +372,22 @@ public final class MetricRepo { "total hit sql block rule query"); PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_HIT_SQL_BLOCK_RULE); // 3. histogram - HISTO_QUERY_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("query", "latency", "ms")); - HISTO_EDIT_LOG_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("editlog", "write", "latency", "ms")); + HISTO_QUERY_LATENCY = METRIC_REGISTER.histogram( + MetricRegistry.name("query", "latency", "ms")); + HISTO_EDIT_LOG_WRITE_LATENCY = METRIC_REGISTER.histogram( + MetricRegistry.name("editlog", "write", "latency", "ms")); - METRIC_REGISTER.register(MetricRegistry.name("palo", "fe", "query", "max_instances_num_per_user"), (Gauge) () -> { - try { - return ((QeProcessorImpl) QeProcessorImpl.INSTANCE).getInstancesNumPerUser().values().stream() - .reduce(-1, BinaryOperator.maxBy(Integer::compareTo)); - } catch (Throwable ex) { - LOG.warn("Get max_instances_num_per_user error", ex); - return -2; + METRIC_REGISTER.register(MetricRegistry.name("palo", "fe", "query", "max_instances_num_per_user"), + (Gauge) () -> { + try { + return ((QeProcessorImpl) QeProcessorImpl.INSTANCE).getInstancesNumPerUser().values().stream() + .reduce(-1, BinaryOperator.maxBy(Integer::compareTo)); + } catch (Throwable ex) { + LOG.warn("Get max_instances_num_per_user error", ex); + return -2; + } } - }); + ); // init system metrics initSystemMetrics(); @@ -432,8 +468,8 @@ public final class MetricRepo { PALO_METRIC_REGISTER.addPaloMetrics(memFree); // Memory Total - GaugeMetric memAvailable = (GaugeMetric) new GaugeMetric( - "meminfo", MetricUnit.BYTES, "An estimate of how much memory is available for starting new applications, without swapping") { + GaugeMetric memAvailable = (GaugeMetric) new GaugeMetric("meminfo", MetricUnit.BYTES, + "An estimate of how much memory is available for starting new applications, without swapping") { @Override public Long getValue() { return SYSTEM_METRICS.memAvailable; @@ -443,8 +479,8 @@ public final class MetricRepo { PALO_METRIC_REGISTER.addPaloMetrics(memAvailable); // Buffers - GaugeMetric buffers = (GaugeMetric) new GaugeMetric( - "meminfo", MetricUnit.BYTES, "Memory in buffer cache, so relatively temporary storage for raw disk blocks") { + GaugeMetric buffers = (GaugeMetric) new GaugeMetric("meminfo", MetricUnit.BYTES, + "Memory in buffer cache, so relatively temporary storage for raw disk blocks") { @Override public Long getValue() { return SYSTEM_METRICS.buffers; diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/PrometheusMetricVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/metric/PrometheusMetricVisitor.java index 3cf0eccaaa..507c0817c2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/PrometheusMetricVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/PrometheusMetricVisitor.java @@ -72,14 +72,19 @@ public class PrometheusMetricVisitor extends MetricVisitor { // heap sb.append(Joiner.on(" ").join(HELP, JVM_HEAP_SIZE_BYTES, "jvm heap stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_HEAP_SIZE_BYTES, "gauge\n")); - sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"max\"} ").append(jvmStats.getMem().getHeapMax().getBytes()).append("\n"); - sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"committed\"} ").append(jvmStats.getMem().getHeapCommitted().getBytes()).append("\n"); - sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"used\"} ").append(jvmStats.getMem().getHeapUsed().getBytes()).append("\n"); + sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"max\"} ") + .append(jvmStats.getMem().getHeapMax().getBytes()).append("\n"); + sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"committed\"} ") + .append(jvmStats.getMem().getHeapCommitted().getBytes()).append("\n"); + sb.append(JVM_HEAP_SIZE_BYTES).append("{type=\"used\"} ") + .append(jvmStats.getMem().getHeapUsed().getBytes()).append("\n"); // non heap sb.append(Joiner.on(" ").join(HELP, JVM_NON_HEAP_SIZE_BYTES, "jvm non heap stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_NON_HEAP_SIZE_BYTES, "gauge\n")); - sb.append(JVM_NON_HEAP_SIZE_BYTES).append("{type=\"committed\"} ").append(jvmStats.getMem().getNonHeapCommitted().getBytes()).append("\n"); - sb.append(JVM_NON_HEAP_SIZE_BYTES).append("{type=\"used\"} ").append(jvmStats.getMem().getNonHeapUsed().getBytes()).append("\n"); + sb.append(JVM_NON_HEAP_SIZE_BYTES).append("{type=\"committed\"} ") + .append(jvmStats.getMem().getNonHeapCommitted().getBytes()).append("\n"); + sb.append(JVM_NON_HEAP_SIZE_BYTES).append("{type=\"used\"} ") + .append(jvmStats.getMem().getNonHeapUsed().getBytes()).append("\n"); // mem pool Iterator memIter = jvmStats.getMem().iterator(); @@ -88,15 +93,21 @@ public class PrometheusMetricVisitor extends MetricVisitor { if (memPool.getName().equalsIgnoreCase("young")) { sb.append(Joiner.on(" ").join(HELP, JVM_YOUNG_SIZE_BYTES, "jvm young mem pool stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_YOUNG_SIZE_BYTES, "gauge\n")); - sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"used\"} ").append(memPool.getUsed().getBytes()).append("\n"); - sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"peak_used\"} ").append(memPool.getPeakUsed().getBytes()).append("\n"); - sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"max\"} ").append(memPool.getMax().getBytes()).append("\n"); + sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"used\"} ") + .append(memPool.getUsed().getBytes()).append("\n"); + sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"peak_used\"} ") + .append(memPool.getPeakUsed().getBytes()).append("\n"); + sb.append(JVM_YOUNG_SIZE_BYTES).append("{type=\"max\"} ") + .append(memPool.getMax().getBytes()).append("\n"); } else if (memPool.getName().equalsIgnoreCase("old")) { sb.append(Joiner.on(" ").join(HELP, JVM_OLD_SIZE_BYTES, "jvm old mem pool stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_OLD_SIZE_BYTES, "gauge\n")); - sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"used\"} ").append(memPool.getUsed().getBytes()).append("\n"); - sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"peak_used\"} ").append(memPool.getPeakUsed().getBytes()).append("\n"); - sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"max\"} ").append(memPool.getMax().getBytes()).append("\n"); + sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"used\"} ") + .append(memPool.getUsed().getBytes()).append("\n"); + sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"peak_used\"} ") + .append(memPool.getPeakUsed().getBytes()).append("\n"); + sb.append(JVM_OLD_SIZE_BYTES).append("{type=\"max\"} " + ).append(memPool.getMax().getBytes()).append("\n"); } } @@ -108,9 +119,12 @@ public class PrometheusMetricVisitor extends MetricVisitor { sb.append(Joiner.on(" ").join(HELP, JVM_DIRECT_BUFFER_POOL_SIZE_BYTES, "jvm direct buffer pool stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_DIRECT_BUFFER_POOL_SIZE_BYTES, "gauge\n")); - sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"count\"} ").append(pool.getCount()).append("\n"); - sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"used\"} ").append(pool.getUsed().getBytes()).append("\n"); - sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"capacity\"} ").append(pool.getTotalCapacity().getBytes()).append("\n"); + sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"count\"} ") + .append(pool.getCount()).append("\n"); + sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"used\"} ") + .append(pool.getUsed().getBytes()).append("\n"); + sb.append(JVM_DIRECT_BUFFER_POOL_SIZE_BYTES).append("{type=\"capacity\"} ") + .append(pool.getTotalCapacity().getBytes()).append("\n"); } } @@ -122,12 +136,14 @@ public class PrometheusMetricVisitor extends MetricVisitor { sb.append(Joiner.on(" ").join(HELP, JVM_YOUNG_GC, "jvm young gc stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_YOUNG_GC, "gauge\n")); sb.append(JVM_YOUNG_GC).append("{type=\"count\"} ").append(gc.getCollectionCount()).append("\n"); - sb.append(JVM_YOUNG_GC).append("{type=\"time\"} ").append(gc.getCollectionTime().getMillis()).append("\n"); + sb.append(JVM_YOUNG_GC).append("{type=\"time\"} ") + .append(gc.getCollectionTime().getMillis()).append("\n"); } else if (gc.getName().equalsIgnoreCase("old")) { sb.append(Joiner.on(" ").join(HELP, JVM_OLD_GC, "jvm old gc stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_OLD_GC, "gauge\n")); sb.append(JVM_OLD_GC).append("{type=\"count\"} ").append(gc.getCollectionCount()).append("\n"); - sb.append(JVM_OLD_GC).append("{type=\"time\"} ").append(gc.getCollectionTime().getMillis()).append("\n"); + sb.append(JVM_OLD_GC).append("{type=\"time\"} ") + .append(gc.getCollectionTime().getMillis()).append("\n"); } } @@ -135,14 +151,22 @@ public class PrometheusMetricVisitor extends MetricVisitor { Threads threads = jvmStats.getThreads(); sb.append(Joiner.on(" ").join(HELP, JVM_THREAD, "jvm thread stat\n")); sb.append(Joiner.on(" ").join(TYPE, JVM_THREAD, "gauge\n")); - sb.append(JVM_THREAD).append("{type=\"count\"} ").append(threads.getCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"peak_count\"} ").append(threads.getPeakCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"new_count\"} ").append(threads.getThreadsNewCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"runnable_count\"} ").append(threads.getThreadsRunnableCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"blocked_count\"} ").append(threads.getThreadsBlockedCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"waiting_count\"} ").append(threads.getThreadsWaitingCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"timed_waiting_count\"} ").append(threads.getThreadsTimedWaitingCount()).append("\n"); - sb.append(JVM_THREAD).append("{type=\"terminated_count\"} ").append(threads.getThreadsTerminatedCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"count\"} ") + .append(threads.getCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"peak_count\"} ") + .append(threads.getPeakCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"new_count\"} ") + .append(threads.getThreadsNewCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"runnable_count\"} ") + .append(threads.getThreadsRunnableCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"blocked_count\"} ") + .append(threads.getThreadsBlockedCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"waiting_count\"} ") + .append(threads.getThreadsWaitingCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"timed_waiting_count\"} ") + .append(threads.getThreadsTimedWaitingCount()).append("\n"); + sb.append(JVM_THREAD).append("{type=\"terminated_count\"} ") + .append(threads.getThreadsTerminatedCount()).append("\n"); return; } @@ -203,7 +227,8 @@ public class PrometheusMetricVisitor extends MetricVisitor { sb.append(NODE_INFO).append("{type=\"be_node_num\", state=\"decommissioned\"} ") .append(Catalog.getCurrentSystemInfo().getDecommissionedBackendIds().size()).append("\n"); sb.append(NODE_INFO).append("{type=\"broker_node_num\", state=\"dead\"} ").append( - Catalog.getCurrentCatalog().getBrokerMgr().getAllBrokers().stream().filter(b -> !b.isAlive).count()).append("\n"); + Catalog.getCurrentCatalog().getBrokerMgr().getAllBrokers() + .stream().filter(b -> !b.isAlive).count()).append("\n"); // only master FE has this metrics, to help the Grafana knows who is the master if (Catalog.getCurrentCatalog().isMaster()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/SimpleCoreMetricVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/metric/SimpleCoreMetricVisitor.java index a65f742148..a4064619c6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/SimpleCoreMetricVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/SimpleCoreMetricVisitor.java @@ -60,6 +60,7 @@ public class SimpleCoreMetricVisitor extends MetricVisitor { private int metricNumber = 0; private static final Map CORE_METRICS = Maps.newHashMap(); + static { CORE_METRICS.put(MAX_JOURMAL_ID, TYPE_LONG); CORE_METRICS.put(CONNECTION_TOTAL, TYPE_LONG); @@ -135,9 +136,12 @@ public class SimpleCoreMetricVisitor extends MetricVisitor { @Override public void getNodeInfo(StringBuilder sb) { - long feDeadNum = Catalog.getCurrentCatalog().getFrontends(null).stream().filter(f -> !f.isAlive()).count(); - long beDeadNum = Catalog.getCurrentSystemInfo().getIdToBackend().values().stream().filter(b -> !b.isAlive()).count(); - long brokerDeadNum = Catalog.getCurrentCatalog().getBrokerMgr().getAllBrokers().stream().filter(b -> !b.isAlive).count(); + long feDeadNum = Catalog.getCurrentCatalog() + .getFrontends(null).stream().filter(f -> !f.isAlive()).count(); + long beDeadNum = Catalog.getCurrentSystemInfo().getIdToBackend() + .values().stream().filter(b -> !b.isAlive()).count(); + long brokerDeadNum = Catalog.getCurrentCatalog().getBrokerMgr() + .getAllBrokers().stream().filter(b -> !b.isAlive).count(); sb.append(prefix + "_frontend_dead_num").append(" ").append(String.valueOf(feDeadNum)).append("\n"); sb.append(prefix + "_backend_dead_num").append(" ").append(String.valueOf(beDeadNum)).append("\n"); sb.append(prefix + "_broker_dead_num").append(" ").append(String.valueOf(brokerDeadNum)).append("\n"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java b/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java index 53d14e3cf1..7e138af036 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java +++ b/fe/fe-core/src/main/java/org/apache/doris/metric/SystemMetrics.java @@ -92,7 +92,8 @@ public class SystemMetrics { throw new Exception("failed to read metrics of TCP"); } - // eg: Tcp: 1 200 120000 -1 38920626 10487279 105581903 300009 305 18079291213 15411998945 11808180 22905 4174570 0 + // eg: Tcp: 1 200 120000 -1 38920626 10487279 105581903 300009 305 + // 18079291213 15411998945 11808180 22905 4174570 0 String[] parts = line.split(" "); if (parts.length != headerMap.size()) { throw new Exception("invalid tcp metrics: " + line + ". header size: " + headerMap.size()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java index d4948f6fda..9158ece8a8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmPauseMonitor.java @@ -234,9 +234,9 @@ public class JvmPauseMonitor { */ private void checkForDeadlocks() { ThreadMXBean threadMx = ManagementFactory.getThreadMXBean(); - long deadlockedTids[] = threadMx.findDeadlockedThreads(); + long[] deadlockedTids = threadMx.findDeadlockedThreads(); if (deadlockedTids != null) { - ThreadInfo deadlockedThreads[] = + ThreadInfo[] deadlockedThreads = threadMx.getThreadInfo(deadlockedTids, true, true); // Log diagnostics with error before aborting the process with a FATAL log. LOG.error("Found " + deadlockedThreads.length + " threads in deadlock: "); diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java index ae5375df89..1d39bb1c38 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java +++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/jvm/JvmStats.java @@ -90,7 +90,7 @@ public class JvmStats { int threadsWaiting = 0; int threadsTimedWaiting = 0; int threadsTerminated = 0; - long threadIds[] = threadMXBean.getAllThreadIds(); + long[] threadIds = threadMXBean.getAllThreadIds(); for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) { if (threadInfo == null) { continue; // race protection diff --git a/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java b/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java index 0e411665b6..4c4d099aeb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/monitor/unit/TimeValue.java @@ -300,7 +300,8 @@ public class TimeValue implements Comparable { } else if (normalized.endsWith("s")) { return new TimeValue(parse(sValue, normalized, "s"), TimeUnit.SECONDS); } else if (sValue.endsWith("m")) { - // parsing minutes should be case-sensitive as 'M' means "months", not "minutes"; this is the only special case. + // parsing minutes should be case-sensitive as 'M' means "months", not "minutes"; + // this is the only special case. return new TimeValue(parse(sValue, normalized, "m"), TimeUnit.MINUTES); } else if (normalized.endsWith("h")) { return new TimeValue(parse(sValue, normalized, "h"), TimeUnit.HOURS); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPacket.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPacket.java index f18a646226..e667ce3e51 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPacket.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPacket.java @@ -26,5 +26,5 @@ public abstract class MysqlPacket { return false; } - abstract public void writeTo(MysqlSerializer serializer); + public abstract void writeTo(MysqlSerializer serializer); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java index 691f89d481..3c8bafa3ce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlPassword.java @@ -75,11 +75,11 @@ import java.util.Random; public class MysqlPassword { private static final Logger LOG = LogManager.getLogger(MysqlPassword.class); // TODO(zhaochun): this is duplicated with handshake packet. - public static final byte EMPTY_PASSWORD[] = new byte[0]; + public static final byte[] EMPTY_PASSWORD = new byte[0]; public static final int SCRAMBLE_LENGTH = 20; public static final int SCRAMBLE_LENGTH_HEX_LENGTH = 2 * SCRAMBLE_LENGTH + 1; public static final byte PVERSION41_CHAR = '*'; - private static final byte DIG_VEC_UPPER[] = {'0', '1', '2', '3', '4', '5', '6', '7', + private static final byte[] DIG_VEC_UPPER = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; private static Random random = new Random(System.currentTimeMillis()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java index 7751d3f783..2db7fcf81e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java @@ -48,7 +48,8 @@ public class MysqlProto { // scramble: data receive from server. // randomString: data send by server in plug-in data field // user_name#HIGH@cluster_name - private static boolean authenticate(ConnectContext context, byte[] scramble, byte[] randomString, String qualifiedUser) { + private static boolean authenticate(ConnectContext context, byte[] scramble, + byte[] randomString, String qualifiedUser) { String usePasswd = scramble.length == 0 ? "NO" : "YES"; String remoteIp = context.getMysqlChannel().getRemoteIp(); @@ -242,8 +243,8 @@ public class MysqlProto { // with password. // So Doris support the Protocol::AuthSwitchRequest to tell client to keep the default password plugin // which Doris is using now. - // Note: Check the authPacket whether support plugin auth firstly, before we check AuthPlugin between doris and client - // to compatible with older version: like mysql 5.1 + // Note: Check the authPacket whether support plugin auth firstly, + // before we check AuthPlugin between doris and client to compatible with older version: like mysql 5.1 if (authPacket.getCapability().isPluginAuth() && !handshakePacket.checkAuthPluginSameAsDoris(authPacket.getPluginName())) { // 1. clear the serializer diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/nio/AcceptListener.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/nio/AcceptListener.java index 10a4d36f19..6eefd2ad36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/nio/AcceptListener.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/nio/AcceptListener.java @@ -36,7 +36,7 @@ import java.io.IOException; * listener for accept mysql connections. */ public class AcceptListener implements ChannelListener> { - private final static Logger LOG = LogManager.getLogger(AcceptListener.class); + private static final Logger LOG = LogManager.getLogger(AcceptListener.class); private final ConnectScheduler connectScheduler; public AcceptListener(ConnectScheduler connectScheduler) { @@ -70,7 +70,8 @@ public class AcceptListener implements ChannelListener connectScheduler.unregisterConnection(context)); } else { - context.getState().setError(ErrorCode.ERR_TOO_MANY_USER_CONNECTIONS, "Reach limit of connections"); + context.getState().setError(ErrorCode.ERR_TOO_MANY_USER_CONNECTIONS, + "Reach limit of connections"); MysqlProto.sendResponsePacket(context); throw new AfterConnectedException("Reach limit of connections"); } @@ -88,7 +89,8 @@ public class AcceptListener implements ChannelListener server; // default task service. - private ExecutorService taskService = ThreadPoolManager.newDaemonCacheThreadPool(Config.max_mysql_service_task_threads_num, "doris-mysql-nio-pool", true); + private ExecutorService taskService = ThreadPoolManager.newDaemonCacheThreadPool( + Config.max_mysql_service_task_threads_num, "doris-mysql-nio-pool", true); public NMysqlServer(int port, ConnectScheduler connectScheduler) { this.port = port; @@ -65,8 +66,8 @@ public class NMysqlServer extends MysqlServer { @Override public boolean start() { try { - server = xnioWorker.createStreamConnectionServer(new InetSocketAddress(port), - acceptListener, OptionMap.create(Options.TCP_NODELAY, true, Options.BACKLOG, Config.mysql_nio_backlog_num)); + server = xnioWorker.createStreamConnectionServer(new InetSocketAddress(port), acceptListener, + OptionMap.create(Options.TCP_NODELAY, true, Options.BACKLOG, Config.mysql_nio_backlog_num)); server.resumeAccepts(); running = true; LOG.info("Open mysql server success on {}", port); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java index 92a0538766..2dcb317ff8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloAuth.java @@ -435,7 +435,8 @@ public class PaloAuth implements Writable { * This method will check the given privilege levels */ public boolean checkHasPriv(ConnectContext ctx, PrivPredicate priv, PrivLevel... levels) { - return checkHasPrivInternal(ctx.getCurrentUserIdentity(), ctx.getRemoteIP(), ctx.getQualifiedUser(), priv, levels); + return checkHasPrivInternal(ctx.getCurrentUserIdentity(), + ctx.getRemoteIP(), ctx.getQualifiedUser(), priv, levels); } private boolean checkHasPrivInternal(UserIdentity currentUser, String host, String user, PrivPredicate priv, @@ -553,7 +554,8 @@ public class PaloAuth implements Writable { // create user public void createUser(CreateUserStmt stmt) throws DdlException { - createUserInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), stmt.getPassword(), stmt.isIfNotExist(), false); + createUserInternal(stmt.getUserIdent(), stmt.getQualifiedRole(), + stmt.getPassword(), stmt.isIfNotExist(), false); } public void replayCreateUser(PrivInfo privInfo) { @@ -662,7 +664,8 @@ public class PaloAuth implements Writable { dropUserInternal(userIdentity, false /* ignore if non exists */, true /* is replay */); } - private void dropUserInternal(UserIdentity userIdent, boolean ignoreIfNonExists, boolean isReplay) throws DdlException { + private void dropUserInternal(UserIdentity userIdent, boolean ignoreIfNonExists, boolean isReplay) + throws DdlException { writeLock(); try { // check if user exists @@ -773,7 +776,8 @@ public class PaloAuth implements Writable { // update users' privs of this role for (UserIdentity user : existingRole.getUsers()) { - for (Map.Entry entry : existingRole.getResourcePatternToPrivs().entrySet()) { + for (Map.Entry entry + : existingRole.getResourcePatternToPrivs().entrySet()) { // copy the PrivBitSet grantPrivs(user, entry.getKey(), entry.getValue().copy(), errOnNonExist); } @@ -834,7 +838,8 @@ public class PaloAuth implements Writable { public void grantPrivs(UserIdentity userIdent, ResourcePattern resourcePattern, PrivBitSet privs, boolean errOnNonExist) throws DdlException { - LOG.debug("grant {} on resource {} to {}, err on non exist: {}", privs, resourcePattern, userIdent, errOnNonExist); + LOG.debug("grant {} on resource {} to {}, err on non exist: {}", + privs, resourcePattern, userIdent, errOnNonExist); writeLock(); try { @@ -1020,7 +1025,8 @@ public class PaloAuth implements Writable { try { if (userIdent.isDomain()) { // throw exception if this user already contains this domain - propertyMgr.setPasswordForDomain(userIdent, password, true /* err on exist */, errOnNonExist /* err on non exist */); + propertyMgr.setPasswordForDomain(userIdent, password, + true /* err on exist */, errOnNonExist /* err on non exist */); } else { GlobalPrivEntry passwdEntry; try { @@ -1328,7 +1334,8 @@ public class PaloAuth implements Writable { Map ldapDbPrivs = LdapPrivsChecker.getLdapAllDbPrivs(userIdent); for (Map.Entry entry : ldapDbPrivs.entrySet()) { if (!addedDbs.contains(entry.getKey().getQualifiedDb())) { - dbPrivs.add(entry.getKey().getQualifiedDb() + ": " + entry.getValue().toString() + " (" + false + ")"); + dbPrivs.add(entry.getKey().getQualifiedDb() + ": " + + entry.getValue().toString() + " (" + false + ")"); } } } @@ -1502,10 +1509,12 @@ public class PaloAuth implements Writable { try { UserIdentity rootUser = new UserIdentity(ROOT_USER, "%"); rootUser.setIsAnalyzed(); - createUserInternal(rootUser, PaloRole.OPERATOR_ROLE, new byte[0], false /* ignore if exists */, true /* is replay */); + createUserInternal(rootUser, PaloRole.OPERATOR_ROLE, new byte[0], + false /* ignore if exists */, true /* is replay */); UserIdentity adminUser = new UserIdentity(ADMIN_USER, "%"); adminUser.setIsAnalyzed(); - createUserInternal(adminUser, PaloRole.ADMIN_ROLE, new byte[0], false /* ignore if exists */, true /* is replay */); + createUserInternal(adminUser, PaloRole.ADMIN_ROLE, new byte[0], + false /* ignore if exists */, true /* is replay */); } catch (DdlException e) { LOG.error("should not happened", e); } @@ -1545,7 +1554,8 @@ public class PaloAuth implements Writable { continue; } - String grantee = new String("\'").concat(ClusterNamespace.getNameFromFullName(tblPrivEntry.getOrigUser())) + String grantee = new String("\'") + .concat(ClusterNamespace.getNameFromFullName(tblPrivEntry.getOrigUser())) .concat("\'@\'").concat(tblPrivEntry.getOrigHost()).concat("\'"); String isGrantable = tblPrivEntry.getPrivSet().get(2) ? "YES" : "NO"; // GRANT_PRIV for (PaloPrivilege paloPriv : tblPrivEntry.getPrivSet().toPrivilegeList()) { @@ -1580,7 +1590,8 @@ public class PaloAuth implements Writable { continue; } - String grantee = new String("\'").concat(ClusterNamespace.getNameFromFullName(dbPrivEntry.getOrigUser())) + String grantee = new String("\'") + .concat(ClusterNamespace.getNameFromFullName(dbPrivEntry.getOrigUser())) .concat("\'@\'").concat(dbPrivEntry.getOrigHost()).concat("\'"); String isGrantable = dbPrivEntry.getPrivSet().get(2) ? "YES" : "NO"; // GRANT_PRIV for (PaloPrivilege paloPriv : dbPrivEntry.getPrivSet().toPrivilegeList()) { @@ -1609,12 +1620,14 @@ public class PaloAuth implements Writable { } for (PrivEntry userPrivEntry : userPrivTable.getEntries()) { - String grantee = new String("\'").concat(ClusterNamespace.getNameFromFullName(userPrivEntry.getOrigUser())) + String grantee = new String("\'") + .concat(ClusterNamespace.getNameFromFullName(userPrivEntry.getOrigUser())) .concat("\'@\'").concat(userPrivEntry.getOrigHost()).concat("\'"); String isGrantable = userPrivEntry.getPrivSet().get(2) ? "YES" : "NO"; // GRANT_PRIV for (PaloPrivilege paloPriv : userPrivEntry.getPrivSet().toPrivilegeList()) { if (paloPriv == PaloPrivilege.ADMIN_PRIV) { - for (String priv : PaloPrivilege.privInPaloToMysql.values()) { // ADMIN_PRIV includes all privileges of table and resource. + // ADMIN_PRIV includes all privileges of table and resource. + for (String priv : PaloPrivilege.privInPaloToMysql.values()) { TPrivilegeStatus status = new TPrivilegeStatus(); status.setPrivilegeType(priv); status.setGrantee(grantee); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java index 6cfa3069a7..e0b293a456 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PaloRole.java @@ -41,11 +41,11 @@ public class PaloRole implements Writable { public static String ADMIN_ROLE = "admin"; public static PaloRole OPERATOR = new PaloRole(OPERATOR_ROLE, - TablePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV), - ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV)); + TablePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV), + ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.NODE_PRIV, PaloPrivilege.ADMIN_PRIV)); public static PaloRole ADMIN = new PaloRole(ADMIN_ROLE, - TablePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV), - ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV)); + TablePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV), + ResourcePattern.ALL, PrivBitSet.of(PaloPrivilege.ADMIN_PRIV)); private String roleName; private Map tblPatternToPrivs = Maps.newConcurrentMap(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java index 0137e332ac..3e62b8ef91 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/ResourcePrivEntry.java @@ -36,8 +36,8 @@ public class ResourcePrivEntry extends PrivEntry { protected ResourcePrivEntry() { } - protected ResourcePrivEntry(PatternMatcher hostPattern, String origHost, PatternMatcher resourcePattern, String origResource, - PatternMatcher userPattern, String user, boolean isDomain, PrivBitSet privSet) { + protected ResourcePrivEntry(PatternMatcher hostPattern, String origHost, PatternMatcher resourcePattern, + String origResource, PatternMatcher userPattern, String user, boolean isDomain, PrivBitSet privSet) { super(hostPattern, origHost, userPattern, user, isDomain, privSet); this.resourcePattern = resourcePattern; this.origResource = origResource; @@ -46,16 +46,19 @@ public class ResourcePrivEntry extends PrivEntry { } } - public static ResourcePrivEntry create(String host, String resourceName, String user, boolean isDomain, PrivBitSet privs) + public static ResourcePrivEntry create(String host, String resourceName, + String user, boolean isDomain, PrivBitSet privs) throws AnalysisException { PatternMatcher hostPattern = PatternMatcher.createMysqlPattern(host, CaseSensibility.HOST.getCaseSensibility()); - PatternMatcher resourcePattern = PatternMatcher.createMysqlPattern(resourceName.equals(ANY_RESOURCE) ? "%" : resourceName, - CaseSensibility.RESOURCE.getCaseSensibility()); + PatternMatcher resourcePattern = PatternMatcher.createMysqlPattern( + resourceName.equals(ANY_RESOURCE) ? "%" : resourceName, + CaseSensibility.RESOURCE.getCaseSensibility()); PatternMatcher userPattern = PatternMatcher.createMysqlPattern(user, CaseSensibility.USER.getCaseSensibility()); if (privs.containsNodePriv() || privs.containsDbTablePriv()) { throw new AnalysisException("Resource privilege can not contains node or db table privileges: " + privs); } - return new ResourcePrivEntry(hostPattern, host, resourcePattern, resourceName, userPattern, user, isDomain, privs); + return new ResourcePrivEntry(hostPattern, host, resourcePattern, + resourceName, userPattern, user, isDomain, privs); } public PatternMatcher getResourcePattern() { @@ -125,7 +128,8 @@ public class ResourcePrivEntry extends PrivEntry { super.readFields(in); origResource = Text.readString(in); try { - resourcePattern = PatternMatcher.createMysqlPattern(origResource, CaseSensibility.RESOURCE.getCaseSensibility()); + resourcePattern = PatternMatcher.createMysqlPattern(origResource, + CaseSensibility.RESOURCE.getCaseSensibility()); } catch (AnalysisException e) { throw new IOException(e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java index 5343f2908a..82752c7ba3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/TablePrivEntry.java @@ -61,7 +61,8 @@ public class TablePrivEntry extends DbPrivEntry { throw new AnalysisException("Table privilege can not contains global or resource privileges: " + privs); } - return new TablePrivEntry(hostPattern, host, dbPattern, db, userPattern, user, tblPattern, tbl, isDomain, privs); + return new TablePrivEntry(hostPattern, host, dbPattern, db, + userPattern, user, tblPattern, tbl, isDomain, privs); } public PatternMatcher getTblPattern() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java index 22df74a925..5abeaed7e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java @@ -450,7 +450,8 @@ public class UserProperty implements Writable { result.add(Lists.newArrayList(PROP_MAX_USER_CONNECTIONS, String.valueOf(commonProperties.getMaxConn()))); // max query instance - result.add(Lists.newArrayList(PROP_MAX_QUERY_INSTANCES, String.valueOf(commonProperties.getMaxQueryInstances()))); + result.add(Lists.newArrayList(PROP_MAX_QUERY_INSTANCES, + String.valueOf(commonProperties.getMaxQueryInstances()))); // sql block rules result.add(Lists.newArrayList(PROP_SQL_BLOCK_RULES, commonProperties.getSqlBlockRules())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java index 2bda17e8a3..aadf3061ce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java @@ -225,7 +225,8 @@ public class UserPropertyMgr implements Writable { public void addUserPrivEntriesByResolvedIPs(Map> resolvedIPsMap) { for (UserProperty userProperty : propertyMap.values()) { - userProperty.getWhiteList().addUserPrivEntriesByResolvedIPs(userProperty.getQualifiedUser(), resolvedIPsMap); + userProperty.getWhiteList() + .addUserPrivEntriesByResolvedIPs(userProperty.getQualifiedUser(), resolvedIPsMap); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/operators/plans/physical/PhysicalBroadcastHashJoin.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/operators/plans/physical/PhysicalBroadcastHashJoin.java index 3ccbf70657..a5e9f41a48 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/operators/plans/physical/PhysicalBroadcastHashJoin.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/operators/plans/physical/PhysicalBroadcastHashJoin.java @@ -20,7 +20,6 @@ package org.apache.doris.nereids.operators.plans.physical; import org.apache.doris.nereids.operators.OperatorType; import org.apache.doris.nereids.operators.plans.JoinType; import org.apache.doris.nereids.trees.expressions.Expression; -import org.apache.doris.nereids.trees.plans.Plan; import java.util.Objects; import java.util.Optional; diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java index 8016a38cc3..f9e68eeac7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java @@ -50,7 +50,8 @@ public class ColocatePersistInfo implements Writable { } - public static ColocatePersistInfo createForAddTable(GroupId groupId, long tableId, Map>> backendsPerBucketSeq) { + public static ColocatePersistInfo createForAddTable(GroupId groupId, + long tableId, Map>> backendsPerBucketSeq) { return new ColocatePersistInfo(groupId, tableId, backendsPerBucketSeq); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/CreateTableInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/CreateTableInfo.java index a59a68e5f5..5ecf43e147 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/CreateTableInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/CreateTableInfo.java @@ -56,6 +56,7 @@ public class CreateTableInfo implements Writable { Text.writeString(out, dbName); table.write(out); } + public void readFields(DataInput in) throws IOException { dbName = Text.readString(in); table = Table.read(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/DropPartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/DropPartitionInfo.java index cbc57dcfbd..3c425c7c29 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/DropPartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/DropPartitionInfo.java @@ -42,7 +42,8 @@ public class DropPartitionInfo implements Writable { private DropPartitionInfo() { } - public DropPartitionInfo(Long dbId, Long tableId, String partitionName, boolean isTempPartition, boolean forceDrop) { + public DropPartitionInfo(Long dbId, Long tableId, String partitionName, + boolean isTempPartition, boolean forceDrop) { this.dbId = dbId; this.tableId = tableId; this.partitionName = partitionName; diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 219143957a..6f1995677f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -284,8 +284,8 @@ public class EditLog { case OperationType.OP_BATCH_DROP_ROLLUP: { BatchDropInfo batchDropInfo = (BatchDropInfo) journal.getData(); for (long indexId : batchDropInfo.getIndexIdSet()) { - catalog.getMaterializedViewHandler().replayDropRollup( - new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), indexId, false), catalog); + catalog.getMaterializedViewHandler().replayDropRollup(new DropInfo(batchDropInfo.getDbId(), + batchDropInfo.getTableId(), indexId, false), catalog); } break; } @@ -545,7 +545,8 @@ public class EditLog { break; } case OperationType.OP_BATCH_REMOVE_TXNS: { - final BatchRemoveTransactionsOperation operation = (BatchRemoveTransactionsOperation) journal.getData(); + final BatchRemoveTransactionsOperation operation + = (BatchRemoveTransactionsOperation) journal.getData(); Catalog.getCurrentGlobalTransactionMgr().replayBatchRemoveTransactions(operation); break; } @@ -729,17 +730,19 @@ public class EditLog { case OperationType.OP_DYNAMIC_PARTITION: case OperationType.OP_MODIFY_IN_MEMORY: case OperationType.OP_MODIFY_REPLICATION_NUM: { - ModifyTablePropertyOperationLog modifyTablePropertyOperationLog = (ModifyTablePropertyOperationLog) journal.getData(); - catalog.replayModifyTableProperty(opCode, modifyTablePropertyOperationLog); + ModifyTablePropertyOperationLog log = (ModifyTablePropertyOperationLog) journal.getData(); + catalog.replayModifyTableProperty(opCode, log); break; } case OperationType.OP_MODIFY_DISTRIBUTION_BUCKET_NUM: { - ModifyTableDefaultDistributionBucketNumOperationLog modifyTableDefaultDistributionBucketNumOperationLog = (ModifyTableDefaultDistributionBucketNumOperationLog) journal.getData(); - catalog.replayModifyTableDefaultDistributionBucketNum(modifyTableDefaultDistributionBucketNumOperationLog); + ModifyTableDefaultDistributionBucketNumOperationLog log + = (ModifyTableDefaultDistributionBucketNumOperationLog) journal.getData(); + catalog.replayModifyTableDefaultDistributionBucketNum(log); break; } case OperationType.OP_REPLACE_TEMP_PARTITION: { - ReplacePartitionOperationLog replaceTempPartitionLog = (ReplacePartitionOperationLog) journal.getData(); + ReplacePartitionOperationLog replaceTempPartitionLog + = (ReplacePartitionOperationLog) journal.getData(); catalog.replayReplaceTempPartition(replaceTempPartitionLog); break; } @@ -850,7 +853,8 @@ public class EditLog { } } catch (MetaNotFoundException e) { /** - * In the following cases, doris may record metadata modification information for a table that no longer exists. + * In the following cases, doris may record metadata modification information + * for a table that no longer exists. * 1. Thread 1: get TableA object * 2. Thread 2: lock db and drop table and record edit log of the dropped TableA * 3. Thread 1: lock table, modify table and record edit log of the modified TableA diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyCommentOperationLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyCommentOperationLog.java index ed3d737f2a..0b6f1f9845 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyCommentOperationLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ModifyCommentOperationLog.java @@ -47,7 +47,8 @@ public class ModifyCommentOperationLog implements Writable { @SerializedName(value = "tblComment") private String tblComment; - private ModifyCommentOperationLog(Type type, long dbId, long tblId, Map colToComment, String tblComment) { + private ModifyCommentOperationLog(Type type, long dbId, long tblId, + Map colToComment, String tblComment) { this.type = type; this.dbId = dbId; this.tblId = tblId; diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java index f740c3ad74..01cc9e8c6e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java @@ -50,8 +50,8 @@ public class PartitionPersistInfo implements Writable { } public PartitionPersistInfo(long dbId, long tableId, Partition partition, Range range, - PartitionItem listPartitionItem, DataProperty dataProperty, ReplicaAllocation replicaAlloc, - boolean isInMemory, boolean isTempPartition) { + PartitionItem listPartitionItem, DataProperty dataProperty, ReplicaAllocation replicaAlloc, + boolean isInMemory, boolean isTempPartition) { this.dbId = dbId; this.tableId = tableId; this.partition = partition; diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java index be999316ad..6a4fe11ead 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ReplicaPersistInfo.java @@ -36,7 +36,8 @@ public class ReplicaPersistInfo implements Writable { CLEAR_ROLLUPINFO(7), // this default op is used for upgrate to femeta_45, add default op to solve this scenario // the old image and old persist log does not have op field, so the op field is null when upgrate to fe meta 45 - // then fe will dump image and want to write op type to image, op type is null and then throw null pointer exception + // then fe will dump image and want to write op type to image, + // op type is null and then throw null pointer exception // add the default op, when read from image and op type == null ,set op type to default op to skip the exception DEFAULT_OP(8), TABLET_INFO(9); @@ -188,10 +189,10 @@ public class ReplicaPersistInfo implements Writable { dbId, tableId, partitionId, indexId, -1L, -1L, -1L, -1L, -1, -1L, -1L, -1L, -1L); } - public static ReplicaPersistInfo createForReport(long dbId, long tblId, long partitionId, long indexId, long tabletId, - long backendId, long replicaId) { - return new ReplicaPersistInfo(ReplicaOperationType.TABLET_INFO, dbId, tblId, partitionId, indexId, tabletId, backendId, replicaId, - -1L, -1, -1L, -1L, -1L, -1L); + public static ReplicaPersistInfo createForReport(long dbId, long tblId, long partitionId, long indexId, + long tabletId, long backendId, long replicaId) { + return new ReplicaPersistInfo(ReplicaOperationType.TABLET_INFO, dbId, tblId, partitionId, + indexId, tabletId, backendId, replicaId, -1L, -1, -1L, -1L, -1L, -1L); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java index 1fcb098b96..2192a4edbb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java @@ -95,7 +95,8 @@ import java.util.concurrent.atomic.AtomicBoolean; public class GsonUtils { // runtime adapter for class "Type" - private static RuntimeTypeAdapterFactory columnTypeAdapterFactory = RuntimeTypeAdapterFactory + private static RuntimeTypeAdapterFactory columnTypeAdapterFactory + = RuntimeTypeAdapterFactory .of(org.apache.doris.catalog.Type.class, "clazz") // TODO: register other sub type after Doris support more types. .registerSubtype(ScalarType.class, ScalarType.class.getSimpleName()) @@ -104,7 +105,8 @@ public class GsonUtils { .registerSubtype(StructType.class, StructType.class.getSimpleName()); // runtime adapter for class "DistributionInfo" - private static RuntimeTypeAdapterFactory distributionInfoTypeAdapterFactory = RuntimeTypeAdapterFactory + private static RuntimeTypeAdapterFactory distributionInfoTypeAdapterFactory + = RuntimeTypeAdapterFactory .of(DistributionInfo.class, "clazz") .registerSubtype(HashDistributionInfo.class, HashDistributionInfo.class.getSimpleName()) .registerSubtype(RandomDistributionInfo.class, RandomDistributionInfo.class.getSimpleName()); @@ -383,7 +385,7 @@ public class GsonUtils { } } - public final static class ImmutableMapDeserializer implements JsonDeserializer> { + public static final class ImmutableMapDeserializer implements JsonDeserializer> { @Override public ImmutableMap deserialize(final JsonElement json, final Type type, final JsonDeserializationContext context) throws JsonParseException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaFooter.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaFooter.java index bc35027208..0857e30909 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaFooter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaFooter.java @@ -64,7 +64,8 @@ public class MetaFooter { MetaMagicNumber magicNumber = MetaMagicNumber.read(raf); if (!Arrays.equals(MetaMagicNumber.MAGIC, magicNumber.getBytes())) { LOG.warn("Image file {} format mismatch. Expected magic number is {}, actual is {}", - imageFile.getPath(), Arrays.toString(MetaMagicNumber.MAGIC), Arrays.toString(magicNumber.getBytes())); + imageFile.getPath(), Arrays.toString(MetaMagicNumber.MAGIC), + Arrays.toString(magicNumber.getBytes())); // this will compatible with old image long footerIndex = fileLength - CHECKSUM_LENGTH_SIZE; raf.seek(footerIndex); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaHeader.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaHeader.java index 0528f2dc2a..8f21210154 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaHeader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaHeader.java @@ -56,7 +56,8 @@ public class MetaHeader { MetaMagicNumber magicNumber = MetaMagicNumber.read(raf); if (!Arrays.equals(MetaMagicNumber.MAGIC, magicNumber.getBytes())) { LOG.warn("Image file {} format mismatch. Expected magic number is {}, actual is {}", - imageFile.getPath(), Arrays.toString(MetaMagicNumber.MAGIC), Arrays.toString(magicNumber.getBytes())); + imageFile.getPath(), Arrays.toString(MetaMagicNumber.MAGIC), + Arrays.toString(magicNumber.getBytes())); return EMPTY_HEADER; } MetaJsonHeader metaJsonHeader = MetaJsonHeader.read(raf); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaPersistMethod.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaPersistMethod.java index ded0478baa..52daed72e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaPersistMethod.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaPersistMethod.java @@ -130,7 +130,8 @@ public class MetaPersistMethod { metaPersistMethod.readMethod = Catalog.class.getDeclaredMethod("loadBackupHandler", DataInputStream.class, long.class); metaPersistMethod.writeMethod = - Catalog.class.getDeclaredMethod("saveBackupHandler", CountingDataOutputStream.class, long.class); + Catalog.class.getDeclaredMethod("saveBackupHandler", + CountingDataOutputStream.class, long.class); break; case "paloAuth": metaPersistMethod.readMethod = @@ -181,7 +182,8 @@ public class MetaPersistMethod { metaPersistMethod.readMethod = Catalog.class.getDeclaredMethod("loadDeleteHandler", DataInputStream.class, long.class); metaPersistMethod.writeMethod = - Catalog.class.getDeclaredMethod("saveDeleteHandler", CountingDataOutputStream.class, long.class); + Catalog.class.getDeclaredMethod("saveDeleteHandler", + CountingDataOutputStream.class, long.class); break; case "sqlBlockRule": metaPersistMethod.readMethod = diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java index 781394f078..fc0f87e652 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaWriter.java @@ -95,7 +95,8 @@ public class MetaWriter { public static void write(File imageFile, Catalog catalog) throws IOException { // save image does not need any lock. because only checkpoint thread will call this method. - LOG.info("start to save image to {}. is ckpt: {}", imageFile.getAbsolutePath(), Catalog.isCheckpointThread()); + LOG.info("start to save image to {}. is ckpt: {}", + imageFile.getAbsolutePath(), Catalog.isCheckpointThread()); final Reference checksum = new Reference<>(0L); long saveImageStartTime = System.currentTimeMillis(); // MetaHeader should use output stream in the future. diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java index a439ed3f39..5e76b8c9b1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticEvalNode.java @@ -48,7 +48,7 @@ import java.util.List; * Computation of analytic exprs. */ public class AnalyticEvalNode extends PlanNode { - private final static Logger LOG = LoggerFactory.getLogger(AnalyticEvalNode.class); + private static final Logger LOG = LoggerFactory.getLogger(AnalyticEvalNode.class); private List analyticFnCalls; @@ -102,6 +102,7 @@ public class AnalyticEvalNode extends PlanNode { public List getPartitionExprs() { return partitionExprs; } + public List getOrderByElements() { return orderByElements; } @@ -261,6 +262,7 @@ public class AnalyticEvalNode extends PlanNode { return output.toString(); } + public void computeCosts(TQueryOptions queryOptions) { Preconditions.checkNotNull(fragmentId, "PlanNode must be placed into a fragment before calling this method."); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java index e842b57ba9..ef54011bca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AnalyticPlanner.java @@ -71,7 +71,7 @@ import java.util.List; * ... */ public class AnalyticPlanner { - private final static Logger LOG = LoggerFactory.getLogger(AnalyticPlanner.class); + private static final Logger LOG = LoggerFactory.getLogger(AnalyticPlanner.class); private final AnalyticInfo analyticInfo; private final Analyzer analyzer; @@ -654,11 +654,10 @@ public class AnalyticPlanner { for (int i = 0; i < analyticExprs.size(); ++i) { SlotDescriptor logicalOutputSlot = logicalOutputSlots.get(i); - // SlotDescriptor physicalOutputSlot =analyzer.getDescTbl().copySlotDescriptor(logicalOutputSlot, physicalOutputTuple); SlotDescriptor physicalOutputSlot = analyzer.getDescTbl().copySlotDescriptor(physicalOutputTuple, logicalOutputSlot); physicalOutputSlot.setIsMaterialized(true); - // in impala setIntermediateType only used in uda + // in impala setIntermediateType only used in uda if (requiresIntermediateTuple) { SlotDescriptor logicalIntermediateSlot = logicalIntermediateSlots.get(i); @@ -806,6 +805,7 @@ public class AnalyticPlanner { } private static final SizeLt SIZE_LT; + static { SIZE_LT = new SizeLt(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java index 07ec26cae3..f76cdfc425 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java @@ -69,9 +69,6 @@ import com.google.common.collect.Sets; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collections; @@ -316,7 +313,8 @@ public class BrokerScanNode extends LoadScanNode { if (brokerDesc.getStorageType() == StorageBackend.StorageType.BROKER) { FsBroker broker = null; try { - broker = Catalog.getCurrentCatalog().getBrokerMgr().getBroker(brokerDesc.getName(), selectedBackend.getHost()); + broker = Catalog.getCurrentCatalog().getBrokerMgr() + .getBroker(brokerDesc.getName(), selectedBackend.getHost()); } catch (AnalysisException e) { throw new UserException(e.getMessage()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/CrossJoinNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/CrossJoinNode.java index 81accd9810..59c4c15d61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/CrossJoinNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/CrossJoinNode.java @@ -33,11 +33,11 @@ import org.apache.logging.log4j.Logger; * Cross join between left child and right child. */ public class CrossJoinNode extends PlanNode { - private final static Logger LOG = LogManager.getLogger(CrossJoinNode.class); + private static final Logger LOG = LogManager.getLogger(CrossJoinNode.class); // Default per-host memory requirement used if no valid stats are available. // TODO: Come up with a more useful heuristic (e.g., based on scanned partitions). - private final static long DEFAULT_PER_HOST_MEM = 2L * 1024L * 1024L * 1024L; + private static final long DEFAULT_PER_HOST_MEM = 2L * 1024L * 1024L * 1024L; private final TableRef innerRef; public CrossJoinNode(PlanNodeId id, PlanNode outer, PlanNode inner, TableRef innerRef) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanColocateRule.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanColocateRule.java index dc864499b8..1ff492d0b6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanColocateRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanColocateRule.java @@ -25,5 +25,6 @@ public class DistributedPlanColocateRule { public static final String SUPPORT_ONLY_OLAP_TABLE = "Only olap table support colocate plan"; public static final String TABLE_NOT_IN_THE_SAME_GROUP = "Tables are not in the same group"; public static final String COLOCATE_GROUP_IS_NOT_STABLE = "Colocate group is not stable"; - public static final String INCONSISTENT_DISTRIBUTION_OF_TABLE_AND_QUERY = "Inconsistent distribution of table and querie"; + public static final String INCONSISTENT_DISTRIBUTION_OF_TABLE_AND_QUERY + = "Inconsistent distribution of table and queries"; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java index 0e9f511c7f..46f168ab40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java @@ -59,7 +59,7 @@ import java.util.stream.Collectors; * from a single-node plan that can be sent to the backend. */ public class DistributedPlanner { - private final static Logger LOG = LogManager.getLogger(DistributedPlanner.class); + private static final Logger LOG = LogManager.getLogger(DistributedPlanner.class); private final PlannerContext ctx; @@ -491,11 +491,12 @@ public class DistributedPlanner { } } - private boolean dataDistributionMatchEqPredicate(Map, List> scanNodeWithJoinConjuncts, - List cannotReason) { + private boolean dataDistributionMatchEqPredicate(Map, + List> scanNodeWithJoinConjuncts, List cannotReason) { // If left table and right table is same table and they select same single partition or no partition // they are naturally colocate relationship no need to check colocate group - for (Map.Entry, List> entry : scanNodeWithJoinConjuncts.entrySet()) { + for (Map.Entry, List> entry + : scanNodeWithJoinConjuncts.entrySet()) { OlapScanNode leftScanNode = entry.getKey().first; OlapScanNode rightScanNode = entry.getKey().second; List eqPredicates = entry.getValue(); @@ -626,8 +627,8 @@ public class DistributedPlanner { if (leftDistribution instanceof HashDistributionInfo) { // use the table_name + '-' + column_name as check condition List leftDistributeColumns = ((HashDistributionInfo) leftDistribution).getDistributionColumns(); - List leftDistributeColumnNames = leftDistributeColumns.stream(). - map(col -> leftTable.getName() + "." + col.getName()).collect(Collectors.toList()); + List leftDistributeColumnNames = leftDistributeColumns.stream() + .map(col -> leftTable.getName() + "." + col.getName()).collect(Collectors.toList()); List leftJoinColumnNames = new ArrayList<>(); List rightExprs = new ArrayList<>(); @@ -1105,8 +1106,8 @@ public class DistributedPlanner { childFragment.addPlanRoot(node); mergeFragment = childFragment; } else { - DataPartition mergePartition = - partitionExprs == null ? DataPartition.UNPARTITIONED : DataPartition.hashPartitioned(partitionExprs); + DataPartition mergePartition = partitionExprs == null + ? DataPartition.UNPARTITIONED : DataPartition.hashPartitioned(partitionExprs); // Convert the existing node to a preaggregation. AggregationNode preaggNode = (AggregationNode) node.getChild(0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java index cad5eeefd0..9efacdf813 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java @@ -176,10 +176,12 @@ public class EsScanNode extends ScanNode { // only do partition(es index level) prune private List getShardLocations() throws UserException { - // has to get partition info from es state not from table because the partition info is generated from es cluster state dynamically + // has to get partition info from es state not from table because the partition + // info is generated from es cluster state dynamically if (esTablePartitions == null) { if (table.getLastMetaDataSyncException() != null) { - throw new UserException("fetch es table [" + table.getName() + "] metadata failure: " + table.getLastMetaDataSyncException().getLocalizedMessage()); + throw new UserException("fetch es table [" + table.getName() + + "] metadata failure: " + table.getLastMetaDataSyncException().getLocalizedMessage()); } throw new UserException("EsTable metadata has not been synced, Try it later"); } @@ -214,7 +216,8 @@ public class EsScanNode extends ScanNode { int numBe = Math.min(3, size); List shardAllocations = new ArrayList<>(); for (EsShardRouting item : shardRouting) { - shardAllocations.add(EsTable.TRANSPORT_HTTP.equals(table.getTransport()) ? item.getHttpAddress() : item.getAddress()); + shardAllocations.add(EsTable.TRANSPORT_HTTP.equals(table.getTransport()) + ? item.getHttpAddress() : item.getAddress()); } Collections.shuffle(shardAllocations, random); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HashDistributionPruner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HashDistributionPruner.java index 9fc4fa5406..7ec05c002f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/HashDistributionPruner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HashDistributionPruner.java @@ -82,7 +82,8 @@ public class HashDistributionPruner implements DistributionPruner { return Lists.newArrayList(bucketsList); } InPredicate inPredicate = filter.getInPredicate(); - if (null == inPredicate || inPredicate.getInElementNum() * complex > Config.max_distribution_pruner_recursion_depth) { + if (null == inPredicate + || inPredicate.getInElementNum() * complex > Config.max_distribution_pruner_recursion_depth) { // equal one value if (filter.lowerBoundInclusive && filter.upperBoundInclusive && filter.lowerBound != null && filter.upperBound != null diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java index 0bc86e724a..4d149e6cf2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java @@ -65,7 +65,7 @@ import java.util.stream.Collectors; * a single input tuple. */ public class HashJoinNode extends PlanNode { - private final static Logger LOG = LogManager.getLogger(HashJoinNode.class); + private static final Logger LOG = LogManager.getLogger(HashJoinNode.class); private TableRef innerRef; private final JoinOperator joinOp; @@ -477,8 +477,8 @@ public class HashJoinNode extends PlanNode { * - we adjust the NDVs from both sides to account for predicates that may * might have reduce the cardinality and NDVs */ - private long getGenericJoinCardinality(List eqJoinConjunctSlots, long lhsCard, - long rhsCard) { + private long getGenericJoinCardinality(List eqJoinConjunctSlots, + long lhsCard, long rhsCard) { Preconditions.checkState(joinOp.isInnerJoin() || joinOp.isOuterJoin()); Preconditions.checkState(!eqJoinConjunctSlots.isEmpty()); Preconditions.checkState(lhsCard >= 0 && rhsCard >= 0); @@ -768,8 +768,8 @@ public class HashJoinNode extends PlanNode { output.append(detailPrefix).append("equal join conjunct: ").append(eqJoinPredicate.toSql()).append("\n"); } if (!otherJoinConjuncts.isEmpty()) { - output.append(detailPrefix).append("other join predicates: ").append(getExplainString(otherJoinConjuncts)) - .append("\n"); + output.append(detailPrefix).append("other join predicates: ") + .append(getExplainString(otherJoinConjuncts)).append("\n"); } if (!conjuncts.isEmpty()) { output.append(detailPrefix).append("other predicates: ").append(getExplainString(conjuncts)).append("\n"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java index de4f3b5261..6902aa4c11 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java @@ -17,7 +17,6 @@ package org.apache.doris.planner; -import org.apache.commons.lang3.StringUtils; import org.apache.doris.analysis.Analyzer; import org.apache.doris.analysis.BrokerDesc; import org.apache.doris.analysis.Expr; @@ -34,7 +33,7 @@ import org.apache.doris.thrift.TExplainLevel; import com.google.common.base.Strings; import com.google.common.collect.Lists; - +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java index e3612c6c5a..c79c3d416f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java @@ -37,7 +37,7 @@ import org.apache.logging.log4j.Logger; * and result in both "broadcastCost" and "partitionCost" be 0. And this will lead to a SHUFFLE join. */ public class JoinCostEvaluation { - private final static Logger LOG = LogManager.getLogger(JoinCostEvaluation.class); + private static final Logger LOG = LogManager.getLogger(JoinCostEvaluation.class); private final long rhsTreeCardinality; private final float rhsTreeAvgRowSize; diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/LoadScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/LoadScanNode.java index 980876c630..5be209db0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/LoadScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/LoadScanNode.java @@ -62,14 +62,16 @@ public abstract class LoadScanNode extends ScanNode { super(id, desc, planNodeName, nodeType); } - protected void initAndSetWhereExpr(Expr whereExpr, TupleDescriptor tupleDesc, Analyzer analyzer) throws UserException { + protected void initAndSetWhereExpr(Expr whereExpr, TupleDescriptor tupleDesc, + Analyzer analyzer) throws UserException { Expr newWhereExpr = initWhereExpr(whereExpr, tupleDesc, analyzer); if (newWhereExpr != null) { addConjuncts(newWhereExpr.getConjuncts()); } } - protected void initAndSetPrecedingFilter(Expr whereExpr, TupleDescriptor tupleDesc, Analyzer analyzer) throws UserException { + protected void initAndSetPrecedingFilter(Expr whereExpr, + TupleDescriptor tupleDesc, Analyzer analyzer) throws UserException { Expr newWhereExpr = initWhereExpr(whereExpr, tupleDesc, analyzer); if (newWhereExpr != null) { addPreFilterConjuncts(newWhereExpr.getConjuncts()); @@ -88,7 +90,8 @@ public abstract class LoadScanNode extends ScanNode { // substitute SlotRef in filter expression // where expr must be equal first to transfer some predicates(eg: BetweenPredicate to BinaryPredicate) - Expr newWhereExpr = analyzer.getExprRewriter().rewrite(whereExpr, analyzer, ExprRewriter.ClauseType.WHERE_CLAUSE); + Expr newWhereExpr = analyzer.getExprRewriter() + .rewrite(whereExpr, analyzer, ExprRewriter.ClauseType.WHERE_CLAUSE); List slots = Lists.newArrayList(); newWhereExpr.collect(SlotRef.class, slots); @@ -110,7 +113,8 @@ public abstract class LoadScanNode extends ScanNode { return newWhereExpr; } - protected void checkBitmapCompatibility(Analyzer analyzer, SlotDescriptor slotDesc, Expr expr) throws AnalysisException { + protected void checkBitmapCompatibility(Analyzer analyzer, + SlotDescriptor slotDesc, Expr expr) throws AnalysisException { if (slotDesc.getColumn().getAggregationType() == AggregateType.BITMAP_UNION) { expr.analyze(analyzer); if (!expr.getType().isBitmapType()) { @@ -121,11 +125,12 @@ public abstract class LoadScanNode extends ScanNode { } } - protected void checkQuantileStateCompatibility(Analyzer analyzer, SlotDescriptor slotDesc, Expr expr) throws AnalysisException { + protected void checkQuantileStateCompatibility(Analyzer analyzer, + SlotDescriptor slotDesc, Expr expr) throws AnalysisException { if (slotDesc.getColumn().getAggregationType() == AggregateType.QUANTILE_UNION) { expr.analyze(analyzer); if (!expr.getType().isQuantileStateType()) { - String errorMsg = String.format("quantile_state column %s require the function return type is QUANTILE_STATE"); + String errorMsg = "quantile_state column %s require the function return type is QUANTILE_STATE"; throw new AnalysisException(errorMsg); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java index e2e6ec0d41..82196efa9e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java @@ -90,7 +90,7 @@ public class OlapScanNode extends ScanNode { private static final Logger LOG = LogManager.getLogger(OlapScanNode.class); // average compression ratio in doris storage engine - private final static int COMPRESSION_RATIO = 5; + private static final int COMPRESSION_RATIO = 5; private List result = new ArrayList<>(); /* @@ -238,7 +238,8 @@ public class OlapScanNode extends ScanNode { * @param reasonOfDisable * @throws UserException */ - public void updateScanRangeInfoByNewMVSelector(long selectedIndexId, boolean isPreAggregation, String reasonOfDisable) + public void updateScanRangeInfoByNewMVSelector(long selectedIndexId, + boolean isPreAggregation, String reasonOfDisable) throws UserException { if (selectedIndexId == this.selectedIndexId && isPreAggregation == this.isPreAggregation) { return; @@ -366,7 +367,8 @@ public class OlapScanNode extends ScanNode { final MaterializedIndex baseIndex = partition.getBaseIndex(); cardinality += baseIndex.getRowCount(); } - Catalog.getCurrentCatalog().getStatisticsManager().getStatistics().mockTableStatsWithRowCount(tableId, cardinality); + Catalog.getCurrentCatalog().getStatisticsManager() + .getStatistics().mockTableStatsWithRowCount(tableId, cardinality); } @Override @@ -427,7 +429,8 @@ public class OlapScanNode extends ScanNode { cardinality = statsDeriveResult.getRowCount(); } - private Collection partitionPrune(PartitionInfo partitionInfo, PartitionNames partitionNames) throws AnalysisException { + private Collection partitionPrune(PartitionInfo partitionInfo, + PartitionNames partitionNames) throws AnalysisException { PartitionPruner partitionPruner = null; Map keyItemMap; if (partitionNames != null) { @@ -532,7 +535,8 @@ public class OlapScanNode extends ScanNode { continue; } if (needCheckTags && !allowedTags.isEmpty() && !allowedTags.contains(backend.getTag())) { - String err = String.format("Replica on backend %d with tag %s, which is not in user's resource tags: %s", + String err = String.format("Replica on backend %d with tag %s," + + " which is not in user's resource tags: %s", backend.getId(), backend.getTag(), allowedTags); if (LOG.isDebugEnabled()) { LOG.debug(err); @@ -604,7 +608,8 @@ public class OlapScanNode extends ScanNode { for (long id : selectedPartitionIds) { Partition partition = olapTable.getPartition(id); if (partition.getState() == PartitionState.RESTORE) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_PARTITION_STATE, partition.getName(), "RESTORING"); + ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_PARTITION_STATE, + partition.getName(), "RESTORING"); } } LOG.debug("partition prune cost: {} ms, partitions: {}", diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java index b9863033c0..3b462f6d20 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapTableSink.java @@ -102,7 +102,8 @@ public class OlapTableSink extends DataSink { tSink.setLoadChannelTimeoutS(loadChannelTimeoutS); tSink.setSendBatchParallelism(sendBatchParallelism); if (loadToSingleTablet && !(dstTable.getDefaultDistributionInfo() instanceof RandomDistributionInfo)) { - throw new AnalysisException("if load_to_single_tablet set to true, the olap table must be with random distribution"); + throw new AnalysisException("if load_to_single_tablet set to true," + + " the olap table must be with random distribution"); } tSink.setLoadToSingleTablet(loadToSingleTablet); tDataSink = new TDataSink(TDataSinkType.OLAP_TABLE_SINK); @@ -327,11 +328,13 @@ public class OlapTableSink extends DataSink { for (Tablet tablet : index.getTablets()) { Multimap bePathsMap = tablet.getNormalReplicaBackendPathMap(); if (bePathsMap.keySet().size() < quorum) { - throw new UserException(InternalErrorCode.REPLICA_FEW_ERR, - "tablet " + tablet.getId() + " has few replicas: " + bePathsMap.keySet().size() - + ", alive backends: [" + StringUtils.join(bePathsMap.keySet(), ",") + "]"); + throw new UserException(InternalErrorCode.REPLICA_FEW_ERR, "tablet " + tablet.getId() + + " has few replicas: " + bePathsMap.keySet().size() + + ", alive backends: [" + StringUtils.join(bePathsMap.keySet(), ",") + + "]"); } - locationParam.addToTablets(new TTabletLocation(tablet.getId(), Lists.newArrayList(bePathsMap.keySet()))); + locationParam.addToTablets(new TTabletLocation(tablet.getId(), + Lists.newArrayList(bePathsMap.keySet()))); allBePathsMap.putAll(bePathsMap); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java index 5fa1af7832..0e08e06316 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionColumnFilter.java @@ -133,5 +133,4 @@ public class PartitionColumnFilter { } return str; } -}; -/* vim: set ts=4 sw=4 sts=4 tw=100 noet: */ +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruner.java index 2bcd149b18..358dd20a40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruner.java @@ -24,4 +24,4 @@ import java.util.Collection; public interface PartitionPruner { // return partition after pruning Collection prune() throws AnalysisException; -}; +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java index 1ecc3342f3..b8720efe70 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java @@ -78,7 +78,7 @@ import java.util.stream.Collectors; * fix that */ public class PlanFragment extends TreeNode { - private final static Logger LOG = LogManager.getLogger(PlanFragment.class); + private static final Logger LOG = LogManager.getLogger(PlanFragment.class); // id for this plan fragment private PlanFragmentId fragmentId; diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java index 140e6d32c6..6ddca7ac81 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanNode.java @@ -71,8 +71,8 @@ import java.util.Set; * this node, ie, they only reference tuples materialized by this node or one of * its children (= are bound by tupleIds). */ -abstract public class PlanNode extends TreeNode { - private final static Logger LOG = LogManager.getLogger(PlanNode.class); +public abstract class PlanNode extends TreeNode { + private static final Logger LOG = LogManager.getLogger(PlanNode.class); protected String planNodeName; @@ -360,7 +360,8 @@ abstract public class PlanNode extends TreeNode { List args = new ArrayList<>(); args.add(Type.BOOLEAN); args.add(Type.BOOLEAN); - Function function = new Function(new FunctionName("", compoundPredicate.getOp().toString()), args, Type.BOOLEAN, false); + Function function = new Function(new FunctionName("", compoundPredicate.getOp().toString()), + args, Type.BOOLEAN, false); function.setBinaryType(TFunctionBinaryType.BUILTIN); expr.setFn(function); } @@ -375,7 +376,8 @@ abstract public class PlanNode extends TreeNode { while (targetConjuncts.size() > 1) { List newTargetConjuncts = Lists.newArrayList(); for (int i = 0; i < targetConjuncts.size(); i += 2) { - Expr expr = i + 1 < targetConjuncts.size() ? new CompoundPredicate(CompoundPredicate.Operator.AND, targetConjuncts.get(i), + Expr expr = i + 1 < targetConjuncts.size() + ? new CompoundPredicate(CompoundPredicate.Operator.AND, targetConjuncts.get(i), targetConjuncts.get(i + 1)) : targetConjuncts.get(i); newTargetConjuncts.add(expr); } @@ -797,7 +799,7 @@ abstract public class PlanNode extends TreeNode { * The second issue is addressed by an exponential backoff when multiplying each * additional selectivity into the final result. */ - static protected double computeCombinedSelectivity(List conjuncts) { + protected static double computeCombinedSelectivity(List conjuncts) { // Collect all estimated selectivities. List selectivities = new ArrayList<>(); for (Expr e : conjuncts) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java index b29e581e6d..c563ae126c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/Planner.java @@ -232,7 +232,8 @@ public class Planner { // Optimize the transfer of query statistic when query doesn't contain limit. PlanFragment rootFragment = fragments.get(fragments.size() - 1); - QueryStatisticsTransferOptimizer queryStatisticTransferOptimizer = new QueryStatisticsTransferOptimizer(rootFragment); + QueryStatisticsTransferOptimizer queryStatisticTransferOptimizer + = new QueryStatisticsTransferOptimizer(rootFragment); queryStatisticTransferOptimizer.optimizeQueryStatisticsTransfer(); // Create runtime filters. diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java index 86a228eb9f..3ff4d5e7b7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlannerContext.java @@ -35,11 +35,11 @@ import org.apache.logging.log4j.Logger; * parameters and state such as plan-node and plan-fragment id generators. */ public class PlannerContext { - private final static Logger LOG = LogManager.getLogger(PlannerContext.class); + private static final Logger LOG = LogManager.getLogger(PlannerContext.class); // Estimate of the overhead imposed by storing data in a hash tbl; // used for determining whether a broadcast join is feasible. - public final static double HASH_TBL_SPACE_OVERHEAD = 1.1; + public static final double HASH_TBL_SPACE_OVERHEAD = 1.1; private final IdGenerator nodeIdGenerator = PlanNodeId.createGenerator(); private final IdGenerator fragmentIdGenerator = diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java index 094091fd22..649c6d5270 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java @@ -31,9 +31,9 @@ import java.util.List; import java.util.Set; public class ProjectPlanner { - private final static Logger LOG = LogManager.getLogger(PlanNode.class); + private static final Logger LOG = LogManager.getLogger(PlanNode.class); - private Analyzer analyzer; + private final Analyzer analyzer; public ProjectPlanner(Analyzer analyzer) { this.analyzer = analyzer; diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java index 98a8bf2128..c1926bb982 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java @@ -74,7 +74,8 @@ public final class RollupSelector { return v2RollupIndexId; } } - // Get first partition to select best prefix index rollups, because MaterializedIndex ids in one rollup's partitions are all same. + // Get first partition to select best prefix index rollups, + // because MaterializedIndex ids in one rollup's partitions are all same. final List bestPrefixIndexRollups = selectBestPrefixIndexRollup(conjuncts, isPreAggregation); return selectBestRowCountRollup(bestPrefixIndexRollups, partitionIds); } @@ -113,7 +114,7 @@ public final class RollupSelector { return selectedIndexId; } - private List selectBestPrefixIndexRollup(List conjuncts, boolean isPreAggregation) throws UserException { + private List selectBestPrefixIndexRollup(List conjuncts, boolean isPreAggregation) { final List outputColumns = Lists.newArrayList(); for (SlotDescriptor slot : tupleDesc.getMaterializedSlots()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java index 48708e3a27..ea58657155 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilter.java @@ -54,7 +54,7 @@ import java.util.Map; * the filter and the scan nodes that apply the filter (destination nodes). */ public final class RuntimeFilter { - private final static Logger LOG = LogManager.getLogger(RuntimeFilter.class); + private static final Logger LOG = LogManager.getLogger(RuntimeFilter.class); // Identifier of the filter (unique within a query) private final RuntimeFilterId id; @@ -226,8 +226,8 @@ public final class RuntimeFilter { * or null if a runtime filter cannot be generated from the specified predicate. */ public static RuntimeFilter create(IdGenerator idGen, Analyzer analyzer, - Expr joinPredicate, int exprOrder, HashJoinNode filterSrcNode, - TRuntimeFilterType type, RuntimeFilterGenerator.FilterSizeLimits filterSizeLimits) { + Expr joinPredicate, int exprOrder, HashJoinNode filterSrcNode, + TRuntimeFilterType type, RuntimeFilterGenerator.FilterSizeLimits filterSizeLimits) { Preconditions.checkNotNull(idGen); Preconditions.checkNotNull(joinPredicate); Preconditions.checkNotNull(filterSrcNode); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java index 3a05dc2fbf..47bd11d874 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RuntimeFilterGenerator.java @@ -70,7 +70,7 @@ import java.util.Set; * to prune tuples of T2 that cannot be part of the join result. */ public final class RuntimeFilterGenerator { - private final static Logger LOG = LogManager.getLogger(RuntimeFilterGenerator.class); + private static final Logger LOG = LogManager.getLogger(RuntimeFilterGenerator.class); // Map of base table tuple ids to a list of runtime filters that // can be applied at the corresponding scan nodes. @@ -131,9 +131,8 @@ public final class RuntimeFilterGenerator { Preconditions.checkState(maxNumBloomFilters >= 0); RuntimeFilterGenerator filterGenerator = new RuntimeFilterGenerator(analyzer); Preconditions.checkState(runtimeFilterType >= 0, "runtimeFilterType not expected"); - Preconditions.checkState(runtimeFilterType - <= Arrays.stream(TRuntimeFilterType.values()).mapToInt(TRuntimeFilterType::getValue).sum() - , "runtimeFilterType not expected"); + Preconditions.checkState(runtimeFilterType <= Arrays.stream(TRuntimeFilterType.values()) + .mapToInt(TRuntimeFilterType::getValue).sum(), "runtimeFilterType not expected"); filterGenerator.generateFilters(plan); List filters = filterGenerator.getRuntimeFilters(); if (filters.size() > maxNumBloomFilters) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java index e2392ddc7a..2dea7d4c6b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java @@ -53,8 +53,8 @@ import java.util.Set; /** * Representation of the common elements of all scan nodes. */ -abstract public class ScanNode extends PlanNode { - private final static Logger LOG = LogManager.getLogger(ScanNode.class); +public abstract class ScanNode extends PlanNode { + private static final Logger LOG = LogManager.getLogger(ScanNode.class); protected final TupleDescriptor desc; // Use this if partition_prune_algorithm_version is 1. protected Map columnFilters = Maps.newHashMap(); @@ -117,7 +117,7 @@ abstract public class ScanNode extends PlanNode { * only applicable to HDFS; less than or equal to zero means no * maximum. */ - abstract public List getScanRangeLocations(long maxScanRangeLength); + public abstract List getScanRangeLocations(long maxScanRangeLength); // TODO(ML): move it into PrunerOptimizer public void computeColumnFilter() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java index b56880c889..284ad73a49 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java @@ -37,13 +37,14 @@ import java.util.List; * Node that applies conjuncts and a limit clause. Has exactly one child. */ public class SelectNode extends PlanNode { - private final static Logger LOG = LogManager.getLogger(SelectNode.class); + private static final Logger LOG = LogManager.getLogger(SelectNode.class); protected SelectNode(PlanNodeId id, PlanNode child) { super(id, child.getTupleIds(), "SELECT", NodeType.SELECT_NODE); addChild(child); this.nullableTupleIds = child.nullableTupleIds; } + protected SelectNode(PlanNodeId id, PlanNode child, List conjuncts) { super(id, child.getTupleIds(), "SELECT", NodeType.SELECT_NODE); addChild(child); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java index 257ff517cc..7a91ca5b3c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SetOperationNode.java @@ -55,7 +55,7 @@ import java.util.stream.Collectors; * tuples. */ public abstract class SetOperationNode extends PlanNode { - private final static Logger LOG = LoggerFactory.getLogger(SetOperationNode.class); + private static final Logger LOG = LoggerFactory.getLogger(SetOperationNode.class); // List of set operation result exprs of the originating SetOperationStmt. Used for // determining passthrough-compatibility of children. @@ -143,8 +143,9 @@ public abstract class SetOperationNode extends PlanNode { @Override public void finalize(Analyzer analyzer) throws UserException { super.finalize(analyzer); - // In Doris-6380, moved computePassthrough() and the materialized position of resultExprs/constExprs from this.init() - // to this.finalize(), and will not call SetOperationNode::init() again at the end of createSetOperationNodeFragment(). + // In Doris-6380, moved computePassthrough() and the materialized position of resultExprs/constExprs + // from this.init() to this.finalize(), and will not call SetOperationNode::init() again at the end + // of createSetOperationNodeFragment(). // // Reasons for move computePassthrough(): // Because the byteSize of the tuple corresponding to OlapScanNode is updated after @@ -154,10 +155,10 @@ public abstract class SetOperationNode extends PlanNode { // at the end of createSetOperationNodeFragment(). // // Reasons for move materialized position of resultExprs/constExprs: - // Because the output slot is materialized at various positions in the planner stage, this is to ensure that - // eventually the resultExprs/constExprs and the corresponding output slot have the same materialized state. - // And the order of materialized resultExprs must be the same as the order of child adjusted by - // computePassthrough(), so resultExprs materialized must be placed after computePassthrough(). + // Because the output slot is materialized at various positions in the planner stage, this is to ensure that + // eventually the resultExprs/constExprs and the corresponding output slot have the same materialized state. + // And the order of materialized resultExprs must be the same as the order of child adjusted by + // computePassthrough(), so resultExprs materialized must be placed after computePassthrough(). // except Node must not reorder the child if (!(this instanceof ExceptNode)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java index 6a0dc17b25..38711d025f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java @@ -2018,7 +2018,8 @@ public class SingleNodePlanner { * TODO: Simplify the plan of unions with only a single non-empty operand to not * use a union node (this is tricky because a union materializes a new tuple). */ - private PlanNode createSetOperationPlan(SetOperationStmt setOperationStmt, Analyzer analyzer, long defaultOrderByLimit) + private PlanNode createSetOperationPlan( + SetOperationStmt setOperationStmt, Analyzer analyzer, long defaultOrderByLimit) throws UserException, AnalysisException { // TODO(zc): get unassigned conjuncts // List conjuncts = diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java index 0e223c4f1a..02c0150104 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java @@ -52,7 +52,7 @@ import java.util.Set; * Sorting. */ public class SortNode extends PlanNode { - private final static Logger LOG = LogManager.getLogger(SortNode.class); + private static final Logger LOG = LogManager.getLogger(SortNode.class); private final SortInfo info; private final boolean useTopN; private final boolean isDefaultLimit; @@ -67,13 +67,17 @@ public class SortNode extends PlanNode { public void setIsAnalyticSort(boolean v) { isAnalyticSort = v; } + public boolean isAnalyticSort() { return isAnalyticSort; } + private DataPartition inputPartition; + public void setInputPartition(DataPartition inputPartition) { this.inputPartition = inputPartition; } + public DataPartition getInputPartition() { return inputPartition; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java index d5a02ddaae..4f19833302 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java @@ -115,7 +115,8 @@ public class StreamLoadPlanner { } if (destTable.hasSequenceCol() && !taskInfo.hasSequenceCol()) { - throw new UserException("Table " + destTable.getName() + " has sequence column, need to specify the sequence column"); + throw new UserException("Table " + destTable.getName() + + " has sequence column, need to specify the sequence column"); } if (!destTable.hasSequenceCol() && taskInfo.hasSequenceCol()) { throw new UserException("There is no sequence column in the table " + destTable.getName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java index 0049eeaa0e..2391ab7e7b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadScanNode.java @@ -136,7 +136,8 @@ public class StreamLoadScanNode extends LoadScanNode { columnExprDescs.descs.add(ImportColumnDesc.newDeleteSignImportColumnDesc(new IntLiteral(1))); } if (taskInfo.hasSequenceCol()) { - columnExprDescs.descs.add(new ImportColumnDesc(Column.SEQUENCE_COL, new SlotRef(null, taskInfo.getSequenceCol()))); + columnExprDescs.descs.add(new ImportColumnDesc(Column.SEQUENCE_COL, + new SlotRef(null, taskInfo.getSequenceCol()))); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java index 40a42fc035..e075e16b97 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/DynamicPluginLoader.java @@ -39,9 +39,9 @@ import java.util.Map; import java.util.Set; public class DynamicPluginLoader extends PluginLoader { - private final static Logger LOG = LogManager.getLogger(DynamicPluginLoader.class); + private static final Logger LOG = LogManager.getLogger(DynamicPluginLoader.class); - public final static String MD5SUM_KEY = "md5sum"; + public static final String MD5SUM_KEY = "md5sum"; // the final dir which contains all plugin files. // eg: @@ -49,6 +49,7 @@ public class DynamicPluginLoader extends PluginLoader { protected Path installPath; protected String expectedMd5sum; + // for processing install stmt DynamicPluginLoader(String pluginDir, String source, String expectedMd5sum) { super(pluginDir, source); @@ -258,8 +259,8 @@ public class DynamicPluginLoader extends PluginLoader { */ public void movePlugin() throws UserException, IOException { if (installPath == null || !Files.exists(installPath)) { - throw new PluginException("Install plugin " + pluginInfo.getName() + " failed, because install path doesn't " - + "exist."); + throw new PluginException("Install plugin " + pluginInfo.getName() + + " failed, because install path doesn't exist."); } Path targetPath = FileSystems.getDefault().getPath(pluginDir.toString(), pluginInfo.getName()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginMgr.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginMgr.java index 412b59fbff..730e44a2d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginMgr.java @@ -46,9 +46,9 @@ import java.util.Objects; import java.util.Set; public class PluginMgr implements Writable { - private final static Logger LOG = LogManager.getLogger(PluginMgr.class); + private static final Logger LOG = LogManager.getLogger(PluginMgr.class); - public final static String BUILTIN_PLUGIN_PREFIX = "__builtin_"; + public static final String BUILTIN_PLUGIN_PREFIX = "__builtin_"; private final Map[] plugins; // all dynamic plugins should have unique names, @@ -295,7 +295,8 @@ public class PluginMgr implements Writable { } r.add(loader.getStatus().toString()); - r.add(pi != null ? "{" + new PrintableMap<>(pi.getProperties(), "=", true, false, true).toString() + "}" : "UNKNOWN"); + r.add(pi != null ? "{" + new PrintableMap<>(pi.getProperties(), + "=", true, false, true) + "}" : "UNKNOWN"); rows.add(r); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java index 1ef190f405..7037b8a7a6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/PluginZip.java @@ -48,7 +48,7 @@ import java.util.zip.ZipInputStream; * */ class PluginZip { - private final static Logger LOG = LogManager.getLogger(PluginZip.class); + private static final Logger LOG = LogManager.getLogger(PluginZip.class); private static final List DEFAULT_PROTOCOL = ImmutableList.of("https://", "http://"); @@ -128,7 +128,8 @@ class PluginZip { BufferedReader br = new BufferedReader(new InputStreamReader(in)); expectedChecksum = br.readLine(); } catch (IOException e) { - throw new UserException(e.getMessage() + ". you should set md5sum in plugin properties or provide a md5 URI to check plugin file"); + throw new UserException(e.getMessage() + + ". you should set md5sum in plugin properties or provide a md5 URI to check plugin file"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java b/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java index ab567117aa..bb72d75c48 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java @@ -174,7 +174,6 @@ public class PolicyMgr implements Writable { } private void unprotectedDrop(DropPolicyLog log) { - long dbId = log.getDbId(); List policies = getPoliciesByType(log.getType()); policies.removeIf(policy -> policy.matchPolicy(log)); typeToPolicyMap.put(log.getType(), policies); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java index b9e1855ca5..b0bd2275e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogBuilder.java @@ -45,13 +45,13 @@ public class AuditLogBuilder extends Plugin implements AuditPlugin { private final PluginInfo pluginInfo; - private final static String[] LOAD_ANNONATION_NAMES = {"JobId", "Label", "LoadType", "Db", "TableList", + private static final String[] LOAD_ANNONATION_NAMES = {"JobId", "Label", "LoadType", "Db", "TableList", "FilePathList", "BrokerUser", "Timestamp", "LoadStartTime", "LoadFinishTime", "ScanRows", "ScanBytes", "FileNumber"}; private final Set loadAnnotationSet; - private final static String[] STREAM_LOAD_ANNONATION_NAMES = {"Label", "Db", "Table", "User", "ClientIp", + private static final String[] STREAM_LOAD_ANNONATION_NAMES = {"Label", "Db", "Table", "User", "ClientIp", "Status", "Message", "Url", "TotalRows", "LoadedRows", "FilteredRows", "UnselectedRows", "LoadBytes", "StartTime", "FinishTime"}; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java index 4f467649ae..31f71ecb40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java @@ -80,7 +80,8 @@ public class ConnectContext { protected volatile String clusterName = ""; // username@host of current login user protected volatile String qualifiedUser; - // LDAP authenticated but the Doris account does not exist, set the flag, and the user login Doris as Temporary user. + // LDAP authenticated but the Doris account does not exist, + // set the flag, and the user login Doris as Temporary user. protected volatile boolean isTempUser = false; // Save the privs from the ldap groups. protected volatile PaloRole ldapGroupsPrivs = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java index f3b683de51..391ebf0164 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java @@ -502,8 +502,8 @@ public class ConnectProcessor { // and tell the follower the current journalID. TMasterOpResult result = new TMasterOpResult(); if (ctx.queryId() != null - // If none master FE not set query id or query id was reset in StmtExecutor when a query exec more than once, - // return it to none master FE. + // If none master FE not set query id or query id was reset in StmtExecutor + // when a query exec more than once, return it to none master FE. && (!request.isSetQueryId() || !request.getQueryId().equals(ctx.queryId())) ) { result.setQueryId(ctx.queryId()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java index 9a5dd0418b..d5ec0d8462 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java @@ -44,18 +44,19 @@ import java.util.concurrent.atomic.AtomicInteger; // TODO(zhaochun): We should consider if the number of local file connection can >= maximum connections later. public class ConnectScheduler { private static final Logger LOG = LogManager.getLogger(ConnectScheduler.class); - private int maxConnections; - private AtomicInteger numberConnection; - private AtomicInteger nextConnectionId; - private Map connectionMap = Maps.newConcurrentMap(); - private Map connByUser = Maps.newConcurrentMap(); - private ExecutorService executor = ThreadPoolManager.newDaemonCacheThreadPool(Config.max_connection_scheduler_threads_num, "connect-scheduler-pool", true); + private final int maxConnections; + private final AtomicInteger numberConnection; + private final AtomicInteger nextConnectionId; + private final Map connectionMap = Maps.newConcurrentMap(); + private final Map connByUser = Maps.newConcurrentMap(); + private final ExecutorService executor = ThreadPoolManager.newDaemonCacheThreadPool( + Config.max_connection_scheduler_threads_num, "connect-scheduler-pool", true); // Use a thread to check whether connection is timeout. Because // 1. If use a scheduler, the task maybe a huge number when query is messy. // Let timeout is 10m, and 5000 qps, then there are up to 3000000 tasks in scheduler. // 2. Use a thread to poll maybe lose some accurate, but is enough to us. - private ScheduledExecutorService checkTimer = ThreadPoolManager.newDaemonScheduledThreadPool(1, + private final ScheduledExecutorService checkTimer = ThreadPoolManager.newDaemonScheduledThreadPool(1, "Connect-Scheduler-Check-Timer", true); public ConnectScheduler(int maxConnections) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java index 555b68aefa..8585872c84 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java @@ -102,7 +102,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Multiset; import com.google.common.collect.Sets; -import org.apache.commons.collections.map.HashedMap; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.thrift.TException; @@ -115,9 +114,9 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Random; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -132,10 +131,10 @@ public class Coordinator { private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - private static String localIP = FrontendOptions.getLocalHostAddress(); + private static final String localIP = FrontendOptions.getLocalHostAddress(); // Random is used to shuffle instances of partitioned - private static Random instanceRandom = new Random(); + private static final Random instanceRandom = new Random(); // Overall status of the entire query; set to the first reported fragment error // status or to CANCELLED, if Cancel() is called. @@ -147,20 +146,20 @@ public class Coordinator { private ImmutableMap idToBackend = ImmutableMap.of(); // copied from TQueryExecRequest; constant across all fragments - private TDescriptorTable descTable; + private final TDescriptorTable descTable; - private Set alreadySentBackendIds = Sets.newHashSet(); + private final Set alreadySentBackendIds = Sets.newHashSet(); // Why do we use query global? // When `NOW()` function is in sql, we need only one now(), // but, we execute `NOW()` distributed. // So we make a query global value here to make one `now()` value in one query process. - private TQueryGlobals queryGlobals = new TQueryGlobals(); + private final TQueryGlobals queryGlobals = new TQueryGlobals(); private TQueryOptions queryOptions; private TNetworkAddress coordAddress; // protects all fields below - private Lock lock = new ReentrantLock(); + private final Lock lock = new ReentrantLock(); // If true, the query is done returning all results. It is possible that the // coordinator still needs to wait for cleanup on remote fragments (e.g. queries @@ -175,25 +174,25 @@ public class Coordinator { private ProfileWriter profileWriter; // populated in computeFragmentExecParams() - private Map fragmentExecParamsMap = Maps.newHashMap(); + private final Map fragmentExecParamsMap = Maps.newHashMap(); - private List fragments; + private final List fragments; // backend execute state - private List backendExecStates = Lists.newArrayList(); + private final List backendExecStates = Lists.newArrayList(); // backend which state need to be checked when joining this coordinator. // It is supposed to be the subset of backendExecStates. - private List needCheckBackendExecStates = Lists.newArrayList(); + private final List needCheckBackendExecStates = Lists.newArrayList(); private ResultReceiver receiver; - private List scanNodes; + private final List scanNodes; // number of instances of this query, equals to // number of backends executing plan fragments on behalf of this query; // set in computeFragmentExecParams(); // same as backend_exec_states_.size() after Exec() - private Set instanceIds = Sets.newHashSet(); + private final Set instanceIds = Sets.newHashSet(); // instance id -> dummy value private MarkedCountDownLatch profileDoneSignal; - private boolean isBlockQuery; + private final boolean isBlockQuery; private int numReceivedRows = 0; @@ -204,14 +203,14 @@ public class Coordinator { // for export private List exportFiles; - private List commitInfos = Lists.newArrayList(); - private List errorTabletInfos = Lists.newArrayList(); + private final List commitInfos = Lists.newArrayList(); + private final List errorTabletInfos = Lists.newArrayList(); // Input parameter private long jobId = -1; // job which this task belongs to private TUniqueId queryId; - private TResourceInfo tResourceInfo; - private boolean needReport; + private final TResourceInfo tResourceInfo; + private final boolean needReport; // parallel execute private final TUniqueId nextInstanceId; @@ -499,7 +498,7 @@ public class Coordinator { PlanFragmentId topId = fragments.get(0).getFragmentId(); FragmentExecParams topParams = fragmentExecParamsMap.get(topId); DataSink topDataSink = topParams.fragment.getSink(); - this.timeoutDeadline = System.currentTimeMillis() + queryOptions.query_timeout * 1000; + this.timeoutDeadline = System.currentTimeMillis() + queryOptions.query_timeout * 1000L; if (topDataSink instanceof ResultSink || topDataSink instanceof ResultFileSink) { TNetworkAddress execBeAddr = topParams.instanceExecParams.get(0).host; receiver = new ResultReceiver(topParams.instanceExecParams.get(0).instanceId, @@ -612,7 +611,8 @@ public class Coordinator { int instanceId = 0; for (TExecPlanFragmentParams tParam : tParams) { BackendExecState execState = - new BackendExecState(fragment.getFragmentId(), instanceId++, profileFragmentId, tParam, this.addressToBackendID); + new BackendExecState(fragment.getFragmentId(), instanceId++, + profileFragmentId, tParam, this.addressToBackendID); // Each tParam will set the total number of Fragments that need to be executed on the same BE, // and the BE will determine whether all Fragments have been executed based on this information. tParam.setFragmentNumOnHost(hostCounter.count(execState.address)); @@ -623,8 +623,8 @@ public class Coordinator { if (needCheckBackendState) { needCheckBackendExecStates.add(execState); if (LOG.isDebugEnabled()) { - LOG.debug("add need check backend {} for fragment, {} job: {}", execState.backend.getId(), - fragment.getFragmentId().asInt(), jobId); + LOG.debug("add need check backend {} for fragment, {} job: {}", + execState.backend.getId(), fragment.getFragmentId().asInt(), jobId); } } @@ -641,7 +641,8 @@ public class Coordinator { } // end for fragments // 4. send and wait fragments rpc - List>> futures = Lists.newArrayList(); + List>> futures + = Lists.newArrayList(); for (BackendExecStates states : beToExecStates.values()) { states.unsetFields(); futures.add(Pair.create(states, states.execRemoteFragmentsAsync())); @@ -669,7 +670,7 @@ public class Coordinator { throw new UserException("timeout before waiting for " + operation + " RPC. Elapse(sec): " + ( (System.currentTimeMillis() - timeoutDeadline) / 1000 + queryOptions.query_timeout)); } - + for (Pair> pair : futures) { TStatusCode code; String errMsg = null; @@ -751,31 +752,31 @@ public class Coordinator { long numRowsNormal = 0L; String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { - numRowsNormal = Long.valueOf(value); + numRowsNormal = Long.parseLong(value); } long numRowsAbnormal = 0L; value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { - numRowsAbnormal = Long.valueOf(value); + numRowsAbnormal = Long.parseLong(value); } long numRowsUnselected = 0L; value = this.loadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { - numRowsUnselected = Long.valueOf(value); + numRowsUnselected = Long.parseLong(value); } // new load counters value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { - numRowsNormal += Long.valueOf(value); + numRowsNormal += Long.parseLong(value); } value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { - numRowsAbnormal += Long.valueOf(value); + numRowsAbnormal += Long.parseLong(value); } value = newLoadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { - numRowsUnselected += Long.valueOf(value); + numRowsUnselected += Long.parseLong(value); } this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal); @@ -823,7 +824,8 @@ public class Coordinator { } queryStatus.setStatus(status); - LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}", + LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}," + + " query id: {}, instance id: {}", jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN"); cancelInternal(Types.PPlanFragmentCancelReason.INTERNAL_ERROR); } finally { @@ -916,7 +918,8 @@ public class Coordinator { if (profileDoneSignal != null) { // count down to zero to notify all objects waiting for this profileDoneSignal.countDownToZero(new Status()); - LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e -> DebugUtil.printId(e.getKey())).toArray()); + LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks() + .stream().map(e -> DebugUtil.printId(e.getKey())).toArray()); } } @@ -934,7 +937,8 @@ public class Coordinator { instanceIds.clear(); for (FragmentExecParams params : fragmentExecParamsMap.values()) { if (LOG.isDebugEnabled()) { - LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size()); + LOG.debug("fragment {} has instances {}", + params.fragment.getFragmentId(), params.instanceExecParams.size()); } for (int j = 0; j < params.instanceExecParams.size(); ++j) { @@ -980,8 +984,8 @@ public class Coordinator { if (sink.getOutputPartition() != null && sink.getOutputPartition().isBucketShuffleHashPartition()) { // the destFragment must be bucket shuffle - Preconditions.checkState(bucketShuffleJoinController. - isBucketShuffleJoin(destFragment.getFragmentId().asInt()), "Sink is" + Preconditions.checkState(bucketShuffleJoinController + .isBucketShuffleJoin(destFragment.getFragmentId().asInt()), "Sink is" + "Bucket Shuffle Partition, The destFragment must have bucket shuffle join node "); int bucketSeq = 0; @@ -990,7 +994,8 @@ public class Coordinator { // when left table is empty, it's bucketset is empty. // set right table destination address to the address of left table - if (destParams.instanceExecParams.size() == 1 && destParams.instanceExecParams.get(0).bucketSeqSet.isEmpty()) { + if (destParams.instanceExecParams.size() == 1 + && destParams.instanceExecParams.get(0).bucketSeqSet.isEmpty()) { bucketNum = 1; destParams.instanceExecParams.get(0).bucketSeqSet.add(0); } @@ -1139,9 +1144,10 @@ public class Coordinator { TNetworkAddress execHostport; if (ConnectContext.get() != null && ConnectContext.get().isResourceTagsSet() && !addressToBackendID.isEmpty()) { - // In this case, we only use the BE where the replica selected by the tag is located to execute this query. - // Otherwise, except for the scan node, the rest of the execution nodes of the query can be executed on any BE. - // addressToBackendID can be empty when this is a constant select stmt like: + // In this case, we only use the BE where the replica selected by the tag is located to execute + // this query. Otherwise, except for the scan node, the rest of the execution nodes of the query + // can be executed on any BE. addressToBackendID can be empty when this is a constant + // select stmt like: // SELECT @@session.auto_increment_increment AS auto_increment_increment; execHostport = SimpleScheduler.getHostByCurrentBackend(addressToBackendID); } else { @@ -1180,12 +1186,15 @@ public class Coordinator { int inputFragmentIndex = 0; int maxParallelism = 0; - // If the fragment has three children, then the first child and the second child are the children(both exchange node) of shuffle HashJoinNode, + // If the fragment has three children, then the first child and the second child are + // the children(both exchange node) of shuffle HashJoinNode, // and the third child is the right child(ExchangeNode) of broadcast HashJoinNode. - // We only need to pay attention to the maximum parallelism among the two ExchangeNodes of shuffle HashJoinNode. + // We only need to pay attention to the maximum parallelism among + // the two ExchangeNodes of shuffle HashJoinNode. int childrenCount = (fatherNode != null) ? fatherNode.getChildren().size() : 1; for (int j = 0; j < childrenCount; j++) { - int currentChildFragmentParallelism = fragmentExecParamsMap.get(fragment.getChild(j).getFragmentId()).instanceExecParams.size(); + int currentChildFragmentParallelism + = fragmentExecParamsMap.get(fragment.getChild(j).getFragmentId()).instanceExecParams.size(); if (currentChildFragmentParallelism > maxParallelism) { maxParallelism = currentChildFragmentParallelism; inputFragmentIndex = j; @@ -1198,21 +1207,26 @@ public class Coordinator { if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) { exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel(); } - if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentId).instanceExecParams.size() > exchangeInstances) { + if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentId) + .instanceExecParams.size() > exchangeInstances) { // random select some instance - // get distinct host, when parallel_fragment_exec_instance_num > 1, single host may execute several instances + // get distinct host, when parallel_fragment_exec_instance_num > 1, + // single host may execute several instances Set hostSet = Sets.newHashSet(); - for (FInstanceExecParam execParams : fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { + for (FInstanceExecParam execParams : + fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { hostSet.add(execParams.host); } List hosts = Lists.newArrayList(hostSet); Collections.shuffle(hosts, instanceRandom); for (int index = 0; index < exchangeInstances; index++) { - FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params); + FInstanceExecParam instanceParam = new FInstanceExecParam(null, + hosts.get(index % hosts.size()), 0, params); params.instanceExecParams.add(instanceParam); } } else { - for (FInstanceExecParam execParams : fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { + for (FInstanceExecParam execParams + : fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params); params.instanceExecParams.add(instanceParam); } @@ -1230,18 +1244,19 @@ public class Coordinator { int parallelExecInstanceNum = fragment.getParallelExecNum(); //for ColocateJoin fragment - if ((isColocateFragment(fragment, fragment.getPlanRoot()) && fragmentIdToSeqToAddressMap.containsKey(fragment.getFragmentId()) + if ((isColocateFragment(fragment, fragment.getPlanRoot()) + && fragmentIdToSeqToAddressMap.containsKey(fragment.getFragmentId()) && fragmentIdToSeqToAddressMap.get(fragment.getFragmentId()).size() > 0)) { computeColocateJoinInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); } else if (bucketShuffleJoinController.isBucketShuffleJoin(fragment.getFragmentId().asInt())) { - bucketShuffleJoinController.computeInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); + bucketShuffleJoinController.computeInstanceParam(fragment.getFragmentId(), + parallelExecInstanceNum, params); } else { // case A - Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator(); - while (iter.hasNext()) { - Map.Entry entry = (Map.Entry) iter.next(); - TNetworkAddress key = (TNetworkAddress) entry.getKey(); - Map> value = (Map>) entry.getValue(); + for (Entry>> entry : fragmentExecParamsMap.get( + fragment.getFragmentId()).scanRangeAssignment.entrySet()) { + TNetworkAddress key = entry.getKey(); + Map> value = entry.getValue(); for (Integer planNodeId : value.keySet()) { List perNodeScanRanges = value.get(planNodeId); @@ -1267,10 +1282,13 @@ public class Coordinator { if (params.instanceExecParams.isEmpty()) { Reference backendIdRef = new Reference(); TNetworkAddress execHostport; - if (ConnectContext.get() != null && !ConnectContext.get().isResourceTagsSet() && !addressToBackendID.isEmpty()) { - // In this case, we only use the BE where the replica selected by the tag is located to execute this query. - // Otherwise, except for the scan node, the rest of the execution nodes of the query can be executed on any BE. - // addressToBackendID can be empty when this is a constant select stmt like: + if (ConnectContext.get() != null + && !ConnectContext.get().isResourceTagsSet() + && !addressToBackendID.isEmpty()) { + // In this case, we only use the BE where the replica selected by the tag is located to + // execute this query. Otherwise, except for the scan node, the rest of the execution nodes + // of the query can be executed on any BE. addressToBackendID can be empty when this is a constant + // select stmt like: // SELECT @@session.auto_increment_increment AS auto_increment_increment; execHostport = SimpleScheduler.getHostByCurrentBackend(addressToBackendID); } else { @@ -1371,13 +1389,15 @@ public class Coordinator { return value; } - private void computeColocateJoinInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { + private void computeColocateJoinInstanceParam(PlanFragmentId fragmentId, + int parallelExecInstanceNum, FragmentExecParams params) { Map bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdTobucketSeqToScanRangeMap.get(fragmentId); Set scanNodeIds = fragmentIdToScanNodeIds.get(fragmentId); // 1. count each node in one fragment should scan how many tablet, gather them in one list - Map>>>> addressToScanRanges = Maps.newHashMap(); + Map>>>> addressToScanRanges + = Maps.newHashMap(); for (Map.Entry>> scanRanges : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map> nodeScanRanges = scanRanges.getValue(); @@ -1395,7 +1415,8 @@ public class Coordinator { // 1. same bucket in some address be // 2. different scanNode id scan different scanRange which belong to the scanNode id // 3. split how many scanRange one instance should scan, same bucket do not spilt to different instance - Pair>> filteredScanRanges = Pair.create(scanRanges.getKey(), filteredNodeScanRanges); + Pair>> filteredScanRanges + = Pair.create(scanRanges.getKey(), filteredNodeScanRanges); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); @@ -1403,9 +1424,11 @@ public class Coordinator { addressToScanRanges.get(address).add(filteredScanRanges); } FragmentScanRangeAssignment assignment = params.scanRangeAssignment; - for (Map.Entry>>>> addressScanRange : addressToScanRanges.entrySet()) { + for (Map.Entry>>>> addressScanRange + : addressToScanRanges.entrySet()) { List>>> scanRange = addressScanRange.getValue(); - Map> range = findOrInsert(assignment, addressScanRange.getKey(), new HashMap>()); + Map> range + = findOrInsert(assignment, addressScanRange.getKey(), new HashMap<>()); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { //the scan instance num should not larger than the tablets num @@ -1413,16 +1436,18 @@ public class Coordinator { } // 2.split how many scanRange one instance should scan - List>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, - expectedInstanceNum); + List>>>> perInstanceScanRanges + = ListUtil.splitBySize(scanRange, expectedInstanceNum); // 3.construct instanceExecParam add the scanRange should be scan by instance - for (List>>> perInstanceScanRange : perInstanceScanRanges) { + for (List>>> perInstanceScanRange + : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); for (Pair>> nodeScanRangeMap : perInstanceScanRange) { instanceParam.bucketSeqSet.add(nodeScanRangeMap.first); - for (Map.Entry> nodeScanRange : nodeScanRangeMap.second.entrySet()) { + for (Map.Entry> nodeScanRange + : nodeScanRangeMap.second.entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { range.put(nodeScanRange.getKey(), Lists.newArrayList()); instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), Lists.newArrayList()); @@ -1449,16 +1474,16 @@ public class Coordinator { continue; } - Set scanNodeIds = fragmentIdToScanNodeIds.get(scanNode.getFragmentId()); - if (scanNodeIds == null) { - scanNodeIds = Sets.newHashSet(); - fragmentIdToScanNodeIds.put(scanNode.getFragmentId(), scanNodeIds); - } + Set scanNodeIds = fragmentIdToScanNodeIds.computeIfAbsent(scanNode.getFragmentId(), + k -> Sets.newHashSet()); scanNodeIds.add(scanNode.getId().asInt()); - FragmentScanRangeAssignment assignment = fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment; - boolean fragmentContainsColocateJoin = isColocateFragment(scanNode.getFragment(), scanNode.getFragment().getPlanRoot()); - boolean fragmentContainsBucketShuffleJoin = bucketShuffleJoinController.isBucketShuffleJoin(scanNode.getFragmentId().asInt(), scanNode.getFragment().getPlanRoot()); + FragmentScanRangeAssignment assignment + = fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment; + boolean fragmentContainsColocateJoin = isColocateFragment(scanNode.getFragment(), + scanNode.getFragment().getPlanRoot()); + boolean fragmentContainsBucketShuffleJoin = bucketShuffleJoinController + .isBucketShuffleJoin(scanNode.getFragmentId().asInt(), scanNode.getFragment().getPlanRoot()); // A fragment may contain both colocate join and bucket shuffle join // on need both compute scanRange to init basic data for query coordinator @@ -1466,7 +1491,8 @@ public class Coordinator { computeScanRangeAssignmentByColocate((OlapScanNode) scanNode); } if (fragmentContainsBucketShuffleJoin) { - bucketShuffleJoinController.computeScanRangeAssignmentByBucket((OlapScanNode) scanNode, idToBackend, addressToBackendID); + bucketShuffleJoinController.computeScanRangeAssignmentByBucket((OlapScanNode) scanNode, + idToBackend, addressToBackendID); } if (!(fragmentContainsColocateJoin | fragmentContainsBucketShuffleJoin)) { computeScanRangeAssignmentByScheduler(scanNode, locations, assignment, assignedBytesPerHost); @@ -1478,7 +1504,7 @@ public class Coordinator { private void computeScanRangeAssignmentByColocate( final OlapScanNode scanNode) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { - fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); + fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashMap<>()); fragmentIdTobucketSeqToScanRangeMap.put(scanNode.getFragmentId(), new BucketSeqToScanRange()); } Map bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); @@ -1489,15 +1515,16 @@ public class Coordinator { //fill scanRangeParamsList List locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { - getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq, assignedBytesPerHost); + getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), + scanNode.getFragmentId(), bucketSeq, assignedBytesPerHost); } for (TScanRangeLocations location : locations) { Map> scanRanges = - findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap>()); + findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<>()); List scanRangeParamsList = - findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList()); + findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<>()); // add scan range TScanRangeParams scanRangeParams = new TScanRangeParams(); @@ -1508,8 +1535,9 @@ public class Coordinator { } //ensure bucket sequence distribued to every host evenly - private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq, - HashMap assignedBytesPerHost) throws Exception { + private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, + PlanFragmentId fragmentId, Integer bucketSeq, HashMap assignedBytesPerHost) + throws Exception { Reference backendIdRef = new Reference(); selectBackendsByRoundRobin(seqLocation, assignedBytesPerHost, backendIdRef); Backend backend = this.idToBackend.get(backendIdRef.getRef()); @@ -1547,8 +1575,7 @@ public class Coordinator { } public TScanRangeLocation selectBackendsByRoundRobin(List locations, - HashMap assignedBytesPerHost, - Reference backendIdRef) throws UserException { + HashMap assignedBytesPerHost, Reference backendIdRef) throws UserException { Long minAssignedBytes = Long.MAX_VALUE; TScanRangeLocation minLocation = null; Long step = 1L; @@ -1559,7 +1586,8 @@ public class Coordinator { minLocation = location; } } - TScanRangeLocation location = SimpleScheduler.getLocation(minLocation, locations, this.idToBackend, backendIdRef); + TScanRangeLocation location = SimpleScheduler.getLocation(minLocation, locations, + this.idToBackend, backendIdRef); if (assignedBytesPerHost.containsKey(location.server)) { assignedBytesPerHost.put(location.server, assignedBytesPerHost.get(location.server) + step); @@ -1576,7 +1604,8 @@ public class Coordinator { HashMap assignedBytesPerHost) throws Exception { for (TScanRangeLocations scanRangeLocations : locations) { Reference backendIdRef = new Reference(); - TScanRangeLocation minLocation = selectBackendsByRoundRobin(scanRangeLocations, assignedBytesPerHost, backendIdRef); + TScanRangeLocation minLocation = selectBackendsByRoundRobin(scanRangeLocations, + assignedBytesPerHost, backendIdRef); Backend backend = this.idToBackend.get(backendIdRef.getRef()); TNetworkAddress execHostPort = new TNetworkAddress(backend.getHost(), backend.getBePort()); this.addressToBackendID.put(execHostPort, backendIdRef.getRef()); @@ -1723,7 +1752,8 @@ public class Coordinator { private boolean checkBackendState() { for (BackendExecState backendExecState : needCheckBackendExecStates) { if (!backendExecState.isBackendStateHealthy()) { - queryStatus = new Status(TStatusCode.INTERNAL_ERROR, "backend " + backendExecState.backend.getId() + " is down"); + queryStatus = new Status(TStatusCode.INTERNAL_ERROR, "backend " + + backendExecState.backend.getId() + " is down"); return false; } } @@ -1747,18 +1777,19 @@ public class Coordinator { class BucketShuffleJoinController { // fragment_id -> < bucket_seq -> < scannode_id -> scan_range_params >> - private Map fragmentIdBucketSeqToScanRangeMap = Maps.newHashMap(); + private final Map fragmentIdBucketSeqToScanRangeMap = Maps.newHashMap(); // fragment_id -> < bucket_seq -> be_addresss > - private Map> fragmentIdToSeqToAddressMap = Maps.newHashMap(); + private final Map> fragmentIdToSeqToAddressMap + = Maps.newHashMap(); // fragment_id -> < be_id -> bucket_count > - private Map> fragmentIdToBuckendIdBucketCountMap = Maps.newHashMap(); + private final Map> fragmentIdToBuckendIdBucketCountMap = Maps.newHashMap(); // fragment_id -> bucket_num - private Map fragmentIdToBucketNumMap = Maps.newHashMap(); + private final Map fragmentIdToBucketNumMap = Maps.newHashMap(); // cache the bucketShuffleFragmentIds - private Set bucketShuffleFragmentIds = new HashSet<>(); + private final Set bucketShuffleFragmentIds = new HashSet<>(); - private Map> fragmentIdToScanNodeIds; + private final Map> fragmentIdToScanNodeIds; // TODO(cmy): Should refactor this Controller to unify bucket shuffle join and colocate join public BucketShuffleJoinController(Map> fragmentIdToScanNodeIds) { @@ -1808,8 +1839,9 @@ public class Coordinator { } // make sure each host have average bucket to scan - private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq, - ImmutableMap idToBackend, Map addressToBackendID) throws Exception { + private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, + PlanFragmentId fragmentId, Integer bucketSeq, ImmutableMap idToBackend, + Map addressToBackendID) throws Exception { Map buckendIdToBucketCountMap = fragmentIdToBuckendIdBucketCountMap.get(fragmentId); int maxBucketNum = Integer.MAX_VALUE; long buckendId = Long.MAX_VALUE; @@ -1827,7 +1859,8 @@ public class Coordinator { } } Reference backendIdRef = new Reference(); - TNetworkAddress execHostPort = SimpleScheduler.getHost(buckendId, seqLocation.locations, idToBackend, backendIdRef); + TNetworkAddress execHostPort = SimpleScheduler.getHost(buckendId, + seqLocation.locations, idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG); } @@ -1837,7 +1870,8 @@ public class Coordinator { if (!buckendIdToBucketCountMap.containsKey(backendIdRef.getRef())) { buckendIdToBucketCountMap.put(backendIdRef.getRef(), 1); } else { //buckendIdToBucketCountMap contains the new backend, update it - buckendIdToBucketCountMap.put(backendIdRef.getRef(), buckendIdToBucketCountMap.get(backendIdRef.getRef()) + 1); + buckendIdToBucketCountMap.put(backendIdRef.getRef(), + buckendIdToBucketCountMap.get(backendIdRef.getRef()) + 1); } } else { //the backend with buckendId is alive, update buckendIdToBucketCountMap directly buckendIdToBucketCountMap.put(buckendId, buckendIdToBucketCountMap.get(buckendId) + 1); @@ -1848,7 +1882,8 @@ public class Coordinator { // to ensure the same bucketSeq tablet to the same execHostPort private void computeScanRangeAssignmentByBucket( - final OlapScanNode scanNode, ImmutableMap idToBackend, Map addressToBackendID) throws Exception { + final OlapScanNode scanNode, ImmutableMap idToBackend, + Map addressToBackendID) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { // In bucket shuffle join, we have 2 situation. // 1. Only one partition: in this case, we use scanNode.getTotalTabletsNum() to get the right bucket num @@ -1863,26 +1898,28 @@ public class Coordinator { bucketNum = (int) (scanNode.getTotalTabletsNum()); } fragmentIdToBucketNumMap.put(scanNode.getFragmentId(), bucketNum); - fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); + fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashMap<>()); fragmentIdBucketSeqToScanRangeMap.put(scanNode.getFragmentId(), new BucketSeqToScanRange()); fragmentIdToBuckendIdBucketCountMap.put(scanNode.getFragmentId(), new HashMap<>()); } - Map bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); + Map bucketSeqToAddress + = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(scanNode.getFragmentId()); for (Integer bucketSeq : scanNode.bucketSeq2locations.keySet()) { //fill scanRangeParamsList List locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { - getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq, idToBackend, addressToBackendID); + getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), + bucketSeq, idToBackend, addressToBackendID); } for (TScanRangeLocations location : locations) { Map> scanRanges = - findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap>()); + findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<>()); List scanRangeParamsList = - findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList()); + findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<>()); // add scan range TScanRangeParams scanRangeParams = new TScanRangeParams(); @@ -1892,14 +1929,17 @@ public class Coordinator { } } - private void computeInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { + private void computeInstanceParam(PlanFragmentId fragmentId, + int parallelExecInstanceNum, FragmentExecParams params) { Map bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(fragmentId); Set scanNodeIds = fragmentIdToScanNodeIds.get(fragmentId); // 1. count each node in one fragment should scan how many tablet, gather them in one list - Map>>>> addressToScanRanges = Maps.newHashMap(); - for (Map.Entry>> scanRanges : bucketSeqToScanRange.entrySet()) { + Map>>>> addressToScanRanges + = Maps.newHashMap(); + for (Map.Entry>> scanRanges + : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map> nodeScanRanges = scanRanges.getValue(); // We only care about the node scan ranges of scan nodes which belong to this fragment @@ -1909,7 +1949,8 @@ public class Coordinator { filteredNodeScanRanges.put(scanNodeId, nodeScanRanges.get(scanNodeId)); } } - Pair>> filteredScanRanges = Pair.create(scanRanges.getKey(), filteredNodeScanRanges); + Pair>> filteredScanRanges + = Pair.create(scanRanges.getKey(), filteredNodeScanRanges); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); @@ -1917,9 +1958,11 @@ public class Coordinator { addressToScanRanges.get(address).add(filteredScanRanges); } FragmentScanRangeAssignment assignment = params.scanRangeAssignment; - for (Map.Entry>>>> addressScanRange : addressToScanRanges.entrySet()) { + for (Map.Entry>>>> addressScanRange + : addressToScanRanges.entrySet()) { List>>> scanRange = addressScanRange.getValue(); - Map> range = findOrInsert(assignment, addressScanRange.getKey(), new HashMap>()); + Map> range + = findOrInsert(assignment, addressScanRange.getKey(), new HashMap<>()); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { //the scan instance num should not larger than the tablets num @@ -1927,22 +1970,26 @@ public class Coordinator { } // 2. split how many scanRange one instance should scan - List>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, - expectedInstanceNum); + List>>>> perInstanceScanRanges + = ListUtil.splitBySize(scanRange, expectedInstanceNum); // 3.construct instanceExecParam add the scanRange should be scan by instance - for (List>>> perInstanceScanRange : perInstanceScanRanges) { - FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); + for (List>>> perInstanceScanRange + : perInstanceScanRanges) { + FInstanceExecParam instanceParam = new FInstanceExecParam( + null, addressScanRange.getKey(), 0, params); for (Pair>> nodeScanRangeMap : perInstanceScanRange) { instanceParam.addBucketSeq(nodeScanRangeMap.first); - for (Map.Entry> nodeScanRange : nodeScanRangeMap.second.entrySet()) { + for (Map.Entry> nodeScanRange + : nodeScanRangeMap.second.entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { range.put(nodeScanRange.getKey(), Lists.newArrayList()); instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), Lists.newArrayList()); } range.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); - instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); + instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()) + .addAll(nodeScanRange.getValue()); } } params.instanceExecParams.add(instanceParam); @@ -1951,12 +1998,13 @@ public class Coordinator { } } - private Map fragmentIdTobucketSeqToScanRangeMap = Maps.newHashMap(); - private Map> fragmentIdToSeqToAddressMap = Maps.newHashMap(); + private final Map fragmentIdTobucketSeqToScanRangeMap = Maps.newHashMap(); + private final Map> fragmentIdToSeqToAddressMap = Maps.newHashMap(); // cache the fragment id to its scan node ids. Used for colocate join. - private Map> fragmentIdToScanNodeIds = Maps.newHashMap(); - private Set colocateFragmentIds = new HashSet<>(); - private BucketShuffleJoinController bucketShuffleJoinController = new BucketShuffleJoinController(fragmentIdToScanNodeIds); + private final Map> fragmentIdToScanNodeIds = Maps.newHashMap(); + private final Set colocateFragmentIds = new HashSet<>(); + private final BucketShuffleJoinController bucketShuffleJoinController + = new BucketShuffleJoinController(fragmentIdToScanNodeIds); // record backend execute state // TODO(zhaochun): add profile information and others @@ -2034,7 +2082,8 @@ public class Coordinator { // return true if cancel success. Otherwise, return false public synchronized boolean cancelFragmentInstance(Types.PPlanFragmentCancelReason cancelReason) { if (LOG.isDebugEnabled()) { - LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} backend: {}, fragment instance id={}, reason: {}", + LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} backend: {}," + + " fragment instance id={}, reason: {}", this.initiated, this.done, this.hasCanceled, backend.getId(), DebugUtil.printId(fragmentInstanceId()), cancelReason.name()); } @@ -2078,7 +2127,8 @@ public class Coordinator { public boolean isBackendStateHealthy() { if (backend.getLastMissingHeartbeatTime() > lastMissingHeartbeatTime) { - LOG.warn("backend {} is down while joining the coordinator. job id: {}", backend.getId(), jobId); + LOG.warn("backend {} is down while joining the coordinator. job id: {}", + backend.getId(), jobId); return false; } return true; @@ -2240,7 +2290,8 @@ public class Coordinator { params.params.setRuntimeFilterParams(new TRuntimeFilterParams()); params.params.runtime_filter_params.setRuntimeFilterMergeAddr(runtimeFilterMergeAddr); if (instanceExecParam.instanceId.equals(runtimeFilterMergeInstanceId)) { - for (Map.Entry> entry : ridToTargetParam.entrySet()) { + for (Map.Entry> entry + : ridToTargetParam.entrySet()) { List targetParams = Lists.newArrayList(); for (FRuntimeFilterTargetParam targetParam : entry.getValue()) { targetParams.add(new TRuntimeFilterTargetParams(targetParam.targetFragmentInstanceId, @@ -2249,10 +2300,12 @@ public class Coordinator { params.params.runtime_filter_params.putToRidToTargetParam(entry.getKey().asInt(), targetParams); } for (Map.Entry entry : ridToBuilderNum.entrySet()) { - params.params.runtime_filter_params.putToRuntimeFilterBuilderNum(entry.getKey().asInt(), entry.getValue()); + params.params.runtime_filter_params.putToRuntimeFilterBuilderNum( + entry.getKey().asInt(), entry.getValue()); } for (RuntimeFilter rf : assignedRuntimeFilters) { - params.params.runtime_filter_params.putToRidToRuntimeFilter(rf.getFilterId().asInt(), rf.toThrift()); + params.params.runtime_filter_params.putToRidToRuntimeFilter( + rf.getFilterId().asInt(), rf.toThrift()); } } if (queryOptions.getQueryType() == TQueryType.LOAD) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java index 1974384dac..cbf048d959 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java @@ -64,7 +64,7 @@ public final class GlobalVariable { // A string to be executed by the server for each client that connects @VariableMgr.VarAttr(name = INIT_CONNECT, flag = VariableMgr.GLOBAL) - public volatile static String initConnect = ""; + public static volatile String initConnect = ""; // A string to be executed by the server for each client that connects @VariableMgr.VarAttr(name = SYSTEM_TIME_ZONE, flag = VariableMgr.READ_ONLY) @@ -72,10 +72,10 @@ public final class GlobalVariable { // The amount of memory allocated for caching query results @VariableMgr.VarAttr(name = QUERY_CACHE_SIZE, flag = VariableMgr.GLOBAL) - public volatile static long queryCacheSize = 1048576; + public static volatile long queryCacheSize = 1048576; @VariableMgr.VarAttr(name = DEFAULT_ROWSET_TYPE, flag = VariableMgr.GLOBAL) - public volatile static String defaultRowsetType = "beta"; + public static volatile String defaultRowsetType = "beta"; // add performance schema to support MYSQL JDBC 8.0.16 or later versions. @VariableMgr.VarAttr(name = PERFORMANCE_SCHEMA, flag = VariableMgr.READ_ONLY) diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/HelpObjectIface.java b/fe/fe-core/src/main/java/org/apache/doris/qe/HelpObjectIface.java index 79cab5cc02..7541971cc3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/HelpObjectIface.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/HelpObjectIface.java @@ -22,5 +22,6 @@ import java.util.Map; // Help object interface public interface HelpObjectIface { public String getName(); + public void loadFrom(Map.Entry> doc); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/InsertStreamTxnExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/InsertStreamTxnExecutor.java index 41689a9b6b..3c826b6db5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/InsertStreamTxnExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/InsertStreamTxnExecutor.java @@ -62,7 +62,8 @@ public class InsertStreamTxnExecutor { InterruptedException, ExecutionException { TTxnParams txnConf = txnEntry.getTxnConf(); StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request); - StreamLoadPlanner planner = new StreamLoadPlanner(txnEntry.getDb(), (OlapTable) txnEntry.getTable(), streamLoadTask); + StreamLoadPlanner planner = new StreamLoadPlanner( + txnEntry.getDb(), (OlapTable) txnEntry.getTable(), streamLoadTask); TExecPlanFragmentParams tRequest = planner.plan(streamLoadTask.getId()); BeSelectionPolicy policy = new BeSelectionPolicy.Builder().setCluster(txnEntry.getDb().getClusterName()) .needLoadAvailable().needQueryAvailable().build(); @@ -110,7 +111,8 @@ public class InsertStreamTxnExecutor { Backend backend = txnEntry.getBackend(); TNetworkAddress address = new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); try { - Future future = BackendServiceProxy.getInstance().commit(address, fragmentInstanceId); + Future future = BackendServiceProxy + .getInstance().commit(address, fragmentInstanceId); InternalService.PCommitResult result = future.get(5, TimeUnit.SECONDS); TStatusCode code = TStatusCode.findByValue(result.getStatus().getStatusCode()); if (code != TStatusCode.OK) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterTxnExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterTxnExecutor.java index 67efb64fb7..f4594a57a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterTxnExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterTxnExecutor.java @@ -62,6 +62,7 @@ public class MasterTxnExecutor { throw new TException("Failed to get master client.", e); } } + // Send request to Master public TLoadTxnBeginResult beginTxn(TLoadTxnBeginRequest request) throws TException { TNetworkAddress thriftAddress = getMasterAddress(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java index a9cc6713ca..da945704eb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MultiLoadMgr.java @@ -113,8 +113,8 @@ public class MultiLoadMgr { throw new DdlException("files count and file size count not match: [" + request.getFileSize().size() + "!=" + request.getFiles().size() + "]"); } - List> files = Streams.zip(request.getFiles().stream(), request.getFileSize().stream(), Pair::create) - .collect(Collectors.toList()); + List> files = Streams.zip(request.getFiles().stream(), + request.getFileSize().stream(), Pair::create).collect(Collectors.toList()); load(request.getDb(), request.getLabel(), request.getSubLabel(), request.getTbl(), files, request.getBackend(), request.getProperties(), request.getTimestamp()); } @@ -472,10 +472,12 @@ public class MultiLoadMgr { } } if (properties.get(LoadStmt.KEY_IN_PARAM_PARTITIONS) != null) { - String[] partNames = properties.get(LoadStmt.KEY_IN_PARAM_PARTITIONS).trim().split("\\s*,\\s*"); + String[] partNames = properties.get(LoadStmt.KEY_IN_PARAM_PARTITIONS) + .trim().split("\\s*,\\s*"); partitionNames = new PartitionNames(false, Lists.newArrayList(partNames)); } else if (properties.get(LoadStmt.KEY_IN_PARAM_TEMP_PARTITIONS) != null) { - String[] partNames = properties.get(LoadStmt.KEY_IN_PARAM_TEMP_PARTITIONS).trim().split("\\s*,\\s*"); + String[] partNames = properties.get(LoadStmt.KEY_IN_PARAM_TEMP_PARTITIONS) + .trim().split("\\s*,\\s*"); partitionNames = new PartitionNames(true, Lists.newArrayList(partNames)); } if (properties.get(LoadStmt.KEY_IN_PARAM_MERGE_TYPE) != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetail.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetail.java index 0f5bc2c707..0884782ba9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetail.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetail.java @@ -23,7 +23,7 @@ public class QueryDetail { FINISHED, FAILED, CANCELLED - }; + } // When query received, FE will construct a QueryDetail // object. This object will set queryId, startTime, sql diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java index 859225acb5..85f09798a3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryDetailQueue.java @@ -66,4 +66,4 @@ public class QueryDetailQueue { return results; } -}; +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStateException.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStateException.java index ae46d9bc4d..8ad3161ac1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStateException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QueryStateException.java @@ -24,6 +24,7 @@ import com.google.common.base.Strings; public class QueryStateException extends UserException { private QueryState queryState; + public QueryStateException(MysqlStateType stateType, String msg) { super(Strings.nullToEmpty(msg)); createQueryState(stateType, msg); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ResultReceiver.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ResultReceiver.java index 04b4850cf5..d7c9070421 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ResultReceiver.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ResultReceiver.java @@ -68,7 +68,8 @@ public class ResultReceiver { .build(); currentThread = Thread.currentThread(); - Future future = BackendServiceProxy.getInstance().fetchDataAsync(address, request); + Future future + = BackendServiceProxy.getInstance().fetchDataAsync(address, request); InternalService.PFetchDataResult pResult = null; while (pResult == null) { long currentTs = System.currentTimeMillis(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java b/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java index 94aaa81802..64a136fca1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/RuntimeFilterTypeHelper.java @@ -39,12 +39,12 @@ import java.util.Map; public class RuntimeFilterTypeHelper { private static final Logger LOG = LogManager.getLogger(RuntimeFilterTypeHelper.class); - public final static long ALLOWED_MASK = (TRuntimeFilterType.IN.getValue() + public static final long ALLOWED_MASK = (TRuntimeFilterType.IN.getValue() | TRuntimeFilterType.BLOOM.getValue() | TRuntimeFilterType.MIN_MAX.getValue() | TRuntimeFilterType.IN_OR_BLOOM.getValue()); - private final static Map varValueSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + private static final Map varValueSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); static { varValueSet.put("IN", (long) TRuntimeFilterType.IN.getValue()); @@ -60,7 +60,8 @@ public class RuntimeFilterTypeHelper { return ""; } if ((varValue & ~ALLOWED_MASK) != 0) { - ErrorReport.reportDdlException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, varValue); + ErrorReport.reportDdlException( + ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, varValue); } List names = new ArrayList(); @@ -86,12 +87,14 @@ public class RuntimeFilterTypeHelper { } else { code = getCodeFromString(key); if (code == 0) { - ErrorReport.reportDdlException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, key); + ErrorReport.reportDdlException( + ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, key); } } resultCode |= code; if ((resultCode & ~ALLOWED_MASK) != 0) { - ErrorReport.reportDdlException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, key); + ErrorReport.reportDdlException( + ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.RUNTIME_FILTER_TYPE, key); } } return resultCode; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java index 0e3870da32..384401e5b3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java @@ -187,8 +187,8 @@ public class SessionVariable implements Serializable, Writable { public static final String ENABLE_PROJECTION = "enable_projection"; - public static final String TRIM_TAILING_SPACES_FOR_EXTERNAL_TABLE_QUERY = - "trim_tailing_spaces_for_external_table_query"; + public static final String TRIM_TAILING_SPACES_FOR_EXTERNAL_TABLE_QUERY + = "trim_tailing_spaces_for_external_table_query"; static final String ENABLE_ARRAY_TYPE = "enable_array_type"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index ac5770956b..7b1cba596e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -397,7 +397,8 @@ public class ShowExecutor { List> rowSet = Lists.newArrayList(); rowSet.add(Lists.newArrayList("Olap engine", "YES", "Default storage engine of palo", "NO", "NO", "NO")); rowSet.add(Lists.newArrayList("MySQL", "YES", "MySQL server which data is in it", "NO", "NO", "NO")); - rowSet.add(Lists.newArrayList("ELASTICSEARCH", "YES", "ELASTICSEARCH cluster which data is in it", "NO", "NO", "NO")); + rowSet.add(Lists.newArrayList("ELASTICSEARCH", "YES", "ELASTICSEARCH cluster which data is in it", + "NO", "NO", "NO")); rowSet.add(Lists.newArrayList("HIVE", "YES", "HIVE database which data is in it", "NO", "NO", "NO")); rowSet.add(Lists.newArrayList("ICEBERG", "YES", "ICEBERG data lake which data is in it", "NO", "NO", "NO")); rowSet.add(Lists.newArrayList("ODBC", "YES", "ODBC driver which data we can connect", "NO", "NO", "NO")); @@ -1062,7 +1063,8 @@ public class ShowExecutor { Database db = catalog.getDbOrAnalysisException(showStmt.getDbName()); long dbId = db.getId(); - List> streamLoadRecords = catalog.getStreamLoadRecordMgr().getStreamLoadRecordByDb(dbId, showStmt.getLabelValue(), showStmt.isAccurateMatch(), showStmt.getState()); + List> streamLoadRecords = catalog.getStreamLoadRecordMgr().getStreamLoadRecordByDb( + dbId, showStmt.getLabelValue(), showStmt.isAccurateMatch(), showStmt.getState()); // order the result of List by orderByPairs in show stmt List orderByPairs = showStmt.getOrderByPairs(); @@ -1292,8 +1294,9 @@ public class ShowExecutor { // if job exists RoutineLoadJob routineLoadJob; try { - routineLoadJob = Catalog.getCurrentCatalog().getRoutineLoadManager().getJob(showRoutineLoadTaskStmt.getDbFullName(), - showRoutineLoadTaskStmt.getJobName()); + routineLoadJob = Catalog.getCurrentCatalog().getRoutineLoadManager().getJob( + showRoutineLoadTaskStmt.getDbFullName(), + showRoutineLoadTaskStmt.getJobName()); } catch (MetaNotFoundException e) { LOG.warn(e.getMessage(), e); throw new AnalysisException(e.getMessage()); @@ -1309,7 +1312,8 @@ public class ShowExecutor { try { tableName = routineLoadJob.getTableName(); } catch (MetaNotFoundException e) { - throw new AnalysisException("The table metadata of job has been changed. The job will be cancelled automatically", e); + throw new AnalysisException("The table metadata of job has been changed." + + " The job will be cancelled automatically", e); } if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbFullName, @@ -1699,7 +1703,8 @@ public class ShowExecutor { ShowBackupStmt showStmt = (ShowBackupStmt) stmt; Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(showStmt.getDbName()); - List jobs = Catalog.getCurrentCatalog().getBackupHandler().getJobs(db.getId(), showStmt.getLabelPredicate()); + List jobs = Catalog.getCurrentCatalog().getBackupHandler() + .getJobs(db.getId(), showStmt.getLabelPredicate()); List backupJobs = jobs.stream().filter(job -> job instanceof BackupJob) .map(job -> (BackupJob) job).collect(Collectors.toList()); @@ -1713,7 +1718,8 @@ public class ShowExecutor { ShowRestoreStmt showStmt = (ShowRestoreStmt) stmt; Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(showStmt.getDbName()); - List jobs = Catalog.getCurrentCatalog().getBackupHandler().getJobs(db.getId(), showStmt.getLabelPredicate()); + List jobs = Catalog.getCurrentCatalog().getBackupHandler() + .getJobs(db.getId(), showStmt.getLabelPredicate()); List restoreJobs = jobs.stream().filter(job -> job instanceof RestoreJob) .map(job -> (RestoreJob) job).collect(Collectors.toList()); @@ -1828,7 +1834,8 @@ public class ShowExecutor { continue; } - DynamicPartitionScheduler dynamicPartitionScheduler = Catalog.getCurrentCatalog().getDynamicPartitionScheduler(); + DynamicPartitionScheduler dynamicPartitionScheduler + = Catalog.getCurrentCatalog().getDynamicPartitionScheduler(); OlapTable olapTable = (OlapTable) tbl; olapTable.readLock(); try { @@ -1843,7 +1850,8 @@ public class ShowExecutor { PrivPredicate.SHOW)) { continue; } - DynamicPartitionProperty dynamicPartitionProperty = olapTable.getTableProperty().getDynamicPartitionProperty(); + DynamicPartitionProperty dynamicPartitionProperty + = olapTable.getTableProperty().getDynamicPartitionProperty(); String tableName = olapTable.getName(); ReplicaAllocation replicaAlloc = dynamicPartitionProperty.getReplicaAllocation(); if (replicaAlloc.isNotSet()) { @@ -1861,12 +1869,18 @@ public class ShowExecutor { String.valueOf(replicaAlloc.getTotalReplicaNum()), replicaAlloc.toCreateStmt(), dynamicPartitionProperty.getStartOfInfo(), - dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), DynamicPartitionScheduler.LAST_UPDATE_TIME), - dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), DynamicPartitionScheduler.LAST_SCHEDULER_TIME), - dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), DynamicPartitionScheduler.DYNAMIC_PARTITION_STATE), - dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), DynamicPartitionScheduler.CREATE_PARTITION_MSG), - dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), DynamicPartitionScheduler.DROP_PARTITION_MSG), - dynamicPartitionProperty.getSortedReservedHistoryPeriods(unsortedReservedHistoryPeriods, dynamicPartitionProperty.getTimeUnit().toUpperCase()))); + dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), + DynamicPartitionScheduler.LAST_UPDATE_TIME), + dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), + DynamicPartitionScheduler.LAST_SCHEDULER_TIME), + dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), + DynamicPartitionScheduler.DYNAMIC_PARTITION_STATE), + dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), + DynamicPartitionScheduler.CREATE_PARTITION_MSG), + dynamicPartitionScheduler.getRuntimeInfo(olapTable.getId(), + DynamicPartitionScheduler.DROP_PARTITION_MSG), + dynamicPartitionProperty.getSortedReservedHistoryPeriods(unsortedReservedHistoryPeriods, + dynamicPartitionProperty.getTimeUnit().toUpperCase()))); } catch (DdlException e) { e.printStackTrace(); } finally { @@ -1886,7 +1900,8 @@ public class ShowExecutor { TransactionStatus status = showStmt.getStatus(); GlobalTransactionMgr transactionMgr = Catalog.getCurrentGlobalTransactionMgr(); if (status != TransactionStatus.UNKNOWN) { - resultSet = new ShowResultSet(showStmt.getMetaData(), transactionMgr.getDbTransInfoByStatus(db.getId(), status)); + resultSet = new ShowResultSet(showStmt.getMetaData(), + transactionMgr.getDbTransInfoByStatus(db.getId(), status)); } else { Long txnId = showStmt.getTxnId(); String label = showStmt.getLabel(); @@ -1931,7 +1946,8 @@ public class ShowExecutor { = ProfileManager.getInstance().getFragmentInstanceList( showStmt.getQueryId(), showStmt.getQueryId(), showStmt.getFragmentId()); if (instanceList == null) { - throw new AnalysisException("Failed to get instance list for fragment: " + showStmt.getFragmentId()); + throw new AnalysisException("Failed to get instance list for fragment: " + + showStmt.getFragmentId()); } for (Triple triple : instanceList) { List row = Lists.newArrayList(triple.getLeft(), triple.getMiddle(), @@ -1946,7 +1962,8 @@ public class ShowExecutor { ProfileTreeNode treeRoot = ProfileManager.getInstance().getInstanceProfileTree(showStmt.getQueryId(), showStmt.getQueryId(), showStmt.getFragmentId(), showStmt.getInstanceId()); if (treeRoot == null) { - throw new AnalysisException("Failed to get instance tree for instance: " + showStmt.getInstanceId()); + throw new AnalysisException("Failed to get instance tree for instance: " + + showStmt.getInstanceId()); } List row = Lists.newArrayList(ProfileTreePrinter.printInstanceTree(treeRoot)); rows.add(row); @@ -1993,7 +2010,8 @@ public class ShowExecutor { ProfileTreeNode treeRoot = ProfileManager.getInstance().getInstanceProfileTree(showStmt.getJobId(), showStmt.getTaskId(), "0", showStmt.getInstanceId()); if (treeRoot == null) { - throw new AnalysisException("Failed to get instance tree for instance: " + showStmt.getInstanceId()); + throw new AnalysisException("Failed to get instance tree for instance: " + + showStmt.getInstanceId()); } List row = Lists.newArrayList(ProfileTreePrinter.printInstanceTree(treeRoot)); rows.add(row); @@ -2015,7 +2033,8 @@ public class ShowExecutor { if (showCreateRoutineLoadStmt.isIncludeHistory()) { List routineLoadJobList = new ArrayList<>(); try { - routineLoadJobList = Catalog.getCurrentCatalog().getRoutineLoadManager().getJob(dbName, labelName, true, null); + routineLoadJobList = Catalog.getCurrentCatalog() + .getRoutineLoadManager().getJob(dbName, labelName, true, null); } catch (MetaNotFoundException e) { LOG.warn(new LogBuilder(LogKey.ROUTINE_LOAD_JOB, labelName) .add("error_msg", "Routine load cannot be found by this name") @@ -2041,15 +2060,18 @@ public class ShowExecutor { resultSet = new ShowResultSet(showCreateRoutineLoadStmt.getMetaData(), rows); continue; } - rows.add(Lists.newArrayList(String.valueOf(job.getId()), showCreateRoutineLoadStmt.getLabel(), job.getShowCreateInfo())); + rows.add(Lists.newArrayList(String.valueOf(job.getId()), + showCreateRoutineLoadStmt.getLabel(), job.getShowCreateInfo())); } } else { // if job exists RoutineLoadJob routineLoadJob; try { - routineLoadJob = Catalog.getCurrentCatalog().getRoutineLoadManager().checkPrivAndGetJob(dbName, labelName); + routineLoadJob = Catalog.getCurrentCatalog() + .getRoutineLoadManager().checkPrivAndGetJob(dbName, labelName); // get routine load info - rows.add(Lists.newArrayList(String.valueOf(routineLoadJob.getId()), showCreateRoutineLoadStmt.getLabel(), routineLoadJob.getShowCreateInfo())); + rows.add(Lists.newArrayList(String.valueOf(routineLoadJob.getId()), + showCreateRoutineLoadStmt.getLabel(), routineLoadJob.getShowCreateInfo())); } catch (MetaNotFoundException | DdlException e) { LOG.warn(e.getMessage(), e); throw new AnalysisException(e.getMessage()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java index d69b7e2efb..bee05f98d3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java @@ -237,7 +237,8 @@ public class SimpleScheduler { iterator.remove(); LOG.warn("remove backend {} from black list. reach max try time", backendId); } else { - LOG.debug("blacklistBackends backendID={} retryTimes={}", backendId, entry.getValue().first); + LOG.debug("blacklistBackends backendID={} retryTimes={}", + backendId, entry.getValue().first); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java index 80e4597300..f014ebd631 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SqlModeHelper.java @@ -69,12 +69,12 @@ public class SqlModeHelper { public static final long MODE_ANSI = 1L << 18; public static final long MODE_TRADITIONAL = 1L << 27; - public final static long MODE_LAST = 1L << 33; + public static final long MODE_LAST = 1L << 33; /* When a new session is create, its sql mode is set to MODE_DEFAULT */ - public final static long MODE_DEFAULT = 0L; + public static final long MODE_DEFAULT = 0L; - public final static long MODE_ALLOWED_MASK = + public static final long MODE_ALLOWED_MASK = (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NOT_USED | MODE_ONLY_FULL_GROUP_BY | MODE_NO_UNSIGNED_SUBTRACTION | MODE_NO_DIR_IN_CREATE | MODE_NO_AUTO_VALUE_ON_ZERO | MODE_NO_BACKSLASH_ESCAPES | MODE_STRICT_TRANS_TABLES @@ -82,11 +82,11 @@ public class SqlModeHelper { | MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_HIGH_NOT_PRECEDENCE | MODE_NO_ENGINE_SUBSTITUTION | MODE_PAD_CHAR_TO_FULL_LENGTH | MODE_TRADITIONAL | MODE_ANSI | MODE_TIME_TRUNCATE_FRACTIONAL); - public final static long MODE_COMBINE_MASK = (MODE_ANSI | MODE_TRADITIONAL); + public static final long MODE_COMBINE_MASK = (MODE_ANSI | MODE_TRADITIONAL); - private final static Map sqlModeSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + private static final Map sqlModeSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); - private final static Map combineModeSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + private static final Map combineModeSet = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); static { sqlModeSet.put("REAL_AS_FLOAT", MODE_REAL_AS_FLOAT); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index a3fd1fed5e..b93573689d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -359,7 +359,8 @@ public class StmtExecutor implements ProfileWriter { if (!((QueryStmt) parsedStmt).isExplain()) { // sql/sqlHash block try { - Catalog.getCurrentCatalog().getSqlBlockRuleMgr().matchSql(originStmt.originStmt, context.getSqlHash(), context.getQualifiedUser()); + Catalog.getCurrentCatalog().getSqlBlockRuleMgr().matchSql( + originStmt.originStmt, context.getSqlHash(), context.getQualifiedUser()); } catch (AnalysisException e) { LOG.warn(e.getMessage()); context.getState().setError(e.getMysqlErrorCode(), e.getMessage()); @@ -386,8 +387,10 @@ public class StmtExecutor implements ProfileWriter { //reset query id for each retry if (i > 0) { UUID uuid = UUID.randomUUID(); - TUniqueId newQueryId = new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); - AuditLog.getQueryAudit().log("Query {} {} times with new query id: {}", DebugUtil.printId(queryId), i, DebugUtil.printId(newQueryId)); + TUniqueId newQueryId = new TUniqueId(uuid.getMostSignificantBits(), + uuid.getLeastSignificantBits()); + AuditLog.getQueryAudit().log("Query {} {} times with new query id: {}", + DebugUtil.printId(queryId), i, DebugUtil.printId(newQueryId)); context.setQueryId(newQueryId); } handleQueryStmt(); @@ -539,12 +542,14 @@ public class StmtExecutor implements ProfileWriter { // Analyze one statement to structure in memory. public void analyze(TQueryOptions tQueryOptions) throws UserException { if (LOG.isDebugEnabled()) { - LOG.debug("begin to analyze stmt: {}, forwarded stmt id: {}", context.getStmtId(), context.getForwardedStmtId()); + LOG.debug("begin to analyze stmt: {}, forwarded stmt id: {}", + context.getStmtId(), context.getForwardedStmtId()); } parse(); - // yiguolei: insert stmt's grammar analysis will write editlog, so that we check if the stmt should be forward to master here + // yiguolei: insert stmt's grammar analysis will write editlog, + // so that we check if the stmt should be forward to master here // if the stmt should be forward to master, then just return here and the master will do analysis again if (isForwardToMaster()) { return; @@ -585,8 +590,9 @@ public class StmtExecutor implements ProfileWriter { analyzeAndGenerateQueryPlan(tQueryOptions); break; } catch (MVSelectFailedException e) { - /** - * If there is MVSelectFailedException after the first planner, there will be error mv rewritten in query. + /* + * If there is MVSelectFailedException after the first planner, + * there will be error mv rewritten in query. * So, the query should be reanalyzed without mv rewritten and planner again. * Attention: Only error rewritten tuple is forbidden to mv rewrite in the second time. */ @@ -627,7 +633,8 @@ public class StmtExecutor implements ProfileWriter { // parsedStmt may already by set when constructing this StmtExecutor(); if (parsedStmt == null) { // Parse statement with parser generated by CUP&FLEX - SqlScanner input = new SqlScanner(new StringReader(originStmt.originStmt), context.getSessionVariable().getSqlMode()); + SqlScanner input = new SqlScanner(new StringReader(originStmt.originStmt), + context.getSessionVariable().getSqlMode()); SqlParser parser = new SqlParser(input); try { parsedStmt = SqlParserUtils.getStmt(parser, originStmt.idx); @@ -745,7 +752,8 @@ public class StmtExecutor implements ProfileWriter { parsedStmt.reset(); // DORIS-7361 - // Need to reset selectList before second-round analyze, because exprs in selectList could be rewritten by mvExprRewriter + // Need to reset selectList before second-round analyze, + // because exprs in selectList could be rewritten by mvExprRewriter // in first-round analyze, which could cause analyze failure. if (parsedStmt instanceof QueryStmt) { ((QueryStmt) parsedStmt).resetSelectList(); @@ -835,7 +843,8 @@ public class StmtExecutor implements ProfileWriter { if (isEos) { if (batch != null) { - statisticsForAuditLog = batch.getQueryStatistics() == null ? null : batch.getQueryStatistics().toBuilder(); + statisticsForAuditLog = batch.getQueryStatistics() == null + ? null : batch.getQueryStatistics().toBuilder(); } if (!isSend) { sendFields(selectStmt.getColLabels(), exprToType(selectStmt.getResultExprs())); @@ -849,7 +858,8 @@ public class StmtExecutor implements ProfileWriter { /** * Handle the SelectStmt via Cache. */ - private void handleCacheStmt(CacheAnalyzer cacheAnalyzer, MysqlChannel channel, SelectStmt selectStmt) throws Exception { + private void handleCacheStmt(CacheAnalyzer cacheAnalyzer, + MysqlChannel channel, SelectStmt selectStmt) throws Exception { RowBatch batch = null; InternalService.PFetchCacheResult cacheResult = cacheAnalyzer.getCacheData(); CacheMode mode = cacheAnalyzer.getCacheMode(); @@ -864,7 +874,8 @@ public class StmtExecutor implements ProfileWriter { // rewrite sql if (mode == CacheMode.Partition) { if (cacheAnalyzer.getHitRange() == Cache.HitRange.Left) { - isSendFields = sendCachedValues(channel, cacheResult.getValuesList(), newSelectStmt, isSendFields, false); + isSendFields = sendCachedValues(channel, cacheResult.getValuesList(), + newSelectStmt, isSendFields, false); } newSelectStmt = cacheAnalyzer.getRewriteStmt(); newSelectStmt.reset(); @@ -998,7 +1009,8 @@ public class StmtExecutor implements ProfileWriter { batch = coord.getNext(); // for outfile query, there will be only one empty batch send back with eos flag if (batch.getBatch() != null) { - // For some language driver, getting error packet after fields packet will be recognized as a success result + // For some language driver, getting error packet after fields packet + // will be recognized as a success result // so We need to send fields after first batch arrived if (!isSendFields) { if (!isOutfileQuery) { @@ -1194,7 +1206,8 @@ public class StmtExecutor implements ProfileWriter { TTxnParams txnConf = txnEntry.getTxnConf(); long timeoutSecond = ConnectContext.get().getSessionVariable().getQueryTimeoutS(); TransactionState.LoadJobSourceType sourceType = TransactionState.LoadJobSourceType.INSERT_STREAMING; - Database dbObj = Catalog.getCurrentCatalog().getDbOrException(dbName, s -> new TException("database is invalid for dbName: " + s)); + Database dbObj = Catalog.getCurrentCatalog().getDbOrException( + dbName, s -> new TException("database is invalid for dbName: " + s)); Table tblObj = dbObj.getTableOrException(tblName, s -> new TException("table is invalid: " + s)); txnConf.setDbId(dbObj.getId()).setTbl(tblName).setDb(dbName); txnEntry.setTable(tblObj); @@ -1303,7 +1316,8 @@ public class StmtExecutor implements ProfileWriter { coord.cancel(); if (notTimeout) { errMsg = coord.getExecStatus().getErrorMsg(); - ErrorReport.reportDdlException("There exists unhealthy backend. " + errMsg, ErrorCode.ERR_FAILED_WHEN_INSERT); + ErrorReport.reportDdlException("There exists unhealthy backend. " + + errMsg, ErrorCode.ERR_FAILED_WHEN_INSERT); } else { ErrorReport.reportDdlException(ErrorCode.ERR_EXECUTE_TIMEOUT); } @@ -1341,7 +1355,8 @@ public class StmtExecutor implements ProfileWriter { } if (Catalog.getCurrentGlobalTransactionMgr().commitAndPublishTransaction( - insertStmt.getDbObj(), Lists.newArrayList(insertStmt.getTargetTable()), insertStmt.getTransactionId(), + insertStmt.getDbObj(), Lists.newArrayList(insertStmt.getTargetTable()), + insertStmt.getTransactionId(), TabletCommitInfo.fromThrift(coord.getCommitInfos()), context.getSessionVariable().getInsertVisibleTimeoutMs())) { txnStatus = TransactionStatus.VISIBLE; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java index 522c6bef0c..6800df1797 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java @@ -552,7 +552,8 @@ public class VariableMgr { public static void createDefaultSessionVariableForCkpt() { defaultSessionVariableForCkpt = new SessionVariable(); - ImmutableSortedMap.Builder builder = getStringVarContextBuilder(defaultSessionVariableForCkpt); + ImmutableSortedMap.Builder builder + = getStringVarContextBuilder(defaultSessionVariableForCkpt); ctxByVarNameForCkpt = builder.build(); } @@ -562,7 +563,8 @@ public class VariableMgr { } @NotNull - private static ImmutableSortedMap.Builder getStringVarContextBuilder(SessionVariable sessionVariable) { + private static ImmutableSortedMap.Builder getStringVarContextBuilder( + SessionVariable sessionVariable) { ImmutableSortedMap.Builder builder = ImmutableSortedMap.orderedBy(String.CASE_INSENSITIVE_ORDER); for (Field field : SessionVariable.class.getDeclaredFields()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java index 8c7d794123..51ae3e2977 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java @@ -153,7 +153,8 @@ public class CacheAnalyzer { } public void debug() { - LOG.debug("table {}, partition id {}, ver {}, time {}", olapTable.getName(), latestPartitionId, latestVersion, latestTime); + LOG.debug("table {}, partition id {}, ver {}, time {}", olapTable.getName(), + latestPartitionId, latestVersion, latestTime); } } @@ -239,7 +240,7 @@ public class CacheAnalyzer { //Check if selectStmt matches partition key //Only one table can be updated in Config.cache_last_version_interval_second range for (int i = 1; i < tblTimeList.size(); i++) { - if ((now - tblTimeList.get(i).latestTime) < Config.cache_last_version_interval_second * 1000) { + if ((now - tblTimeList.get(i).latestTime) < Config.cache_last_version_interval_second * 1000L) { LOG.debug("the time of other tables is newer than {} s, queryid {}", Config.cache_last_version_interval_second, DebugUtil.printId(queryId)); return CacheMode.None; @@ -254,20 +255,23 @@ public class CacheAnalyzer { List columns = partitionInfo.getPartitionColumns(); //Partition key has only one column if (columns.size() != 1) { - LOG.debug("more than one partition column, queryid {}", columns.size(), DebugUtil.printId(queryId)); + LOG.debug("more than one partition column {}, queryid {}", columns.size(), + DebugUtil.printId(queryId)); return CacheMode.None; } partColumn = columns.get(0); //Check if group expr contain partition column if (!checkGroupByPartitionKey(this.selectStmt, partColumn)) { - LOG.debug("group by columns does not contains all partition column, queryid {}", DebugUtil.printId(queryId)); + LOG.debug("group by columns does not contains all partition column, queryid {}", + DebugUtil.printId(queryId)); return CacheMode.None; } //Check if whereClause have one CompoundPredicate of partition column List compoundPredicates = Lists.newArrayList(); getPartitionKeyFromSelectStmt(this.selectStmt, partColumn, compoundPredicates); if (compoundPredicates.size() != 1) { - LOG.debug("empty or more than one predicates contain partition column, queryid {}", DebugUtil.printId(queryId)); + LOG.debug("empty or more than one predicates contain partition column, queryid {}", + DebugUtil.printId(queryId)); return CacheMode.None; } partitionPredicate = compoundPredicates.get(0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java index bb65e69bcc..9be5aacef1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheBeProxy.java @@ -120,12 +120,14 @@ public class CacheBeProxy extends CacheProxy { } } - protected boolean clearCache(InternalService.PClearCacheRequest request, Backend backend, int timeoutMs, Status status) { + protected boolean clearCache(InternalService.PClearCacheRequest request, + Backend backend, int timeoutMs, Status status) { TNetworkAddress address = new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); try { request = request.toBuilder().setClearType(InternalService.PClearType.CLEAR_ALL).build(); LOG.info("clear all backend cache, backendId {}", backend.getId()); - Future future = BackendServiceProxy.getInstance().clearCache(address, request); + Future future + = BackendServiceProxy.getInstance().clearCache(address, request); InternalService.PCacheResponse response = future.get(timeoutMs, TimeUnit.MILLISECONDS); if (response.getStatus() == InternalService.PCacheStatus.CACHE_OK) { status.setStatus(new Status(TStatusCode.OK, "CACHE_OK")); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java index 91f3f93660..8a30ad82a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java @@ -37,7 +37,8 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** - * Use consistent hashing to find the BE corresponding to the key to avoid the change of BE leading to failure to hit the Cache + * Use consistent hashing to find the BE corresponding to the key to + * avoid the change of BE leading to failure to hit the Cache */ public class CacheCoordinator { private static final Logger LOG = LogManager.getLogger(CacheCoordinator.class); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/RowBatchBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/RowBatchBuilder.java index 37b91b6f89..6d5a635a30 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/RowBatchBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/RowBatchBuilder.java @@ -99,7 +99,8 @@ public class RowBatchBuilder { } } - public InternalService.PUpdateCacheRequest buildSqlUpdateRequest(String sql, long partitionKey, long lastVersion, long lastestTime) { + public InternalService.PUpdateCacheRequest buildSqlUpdateRequest( + String sql, long partitionKey, long lastVersion, long lastestTime) { if (updateRequest == null) { updateRequest = InternalService.PUpdateCacheRequest.newBuilder() .setSqlKey(CacheProxy.getMd5(sql)) diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java index 64b4d95f6f..9cd6c365dc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java @@ -98,7 +98,8 @@ public class SqlCache extends Cache { dataSize += value.getDataSize(); } LOG.info("update cache model {}, queryid {}, sqlkey {}, value count {}, row count {}, data size {}", - CacheAnalyzer.CacheMode.Sql, DebugUtil.printId(queryId), DebugUtil.printId(updateRequest.getSqlKey()), + CacheAnalyzer.CacheMode.Sql, DebugUtil.printId(queryId), + DebugUtil.printId(updateRequest.getSqlKey()), updateRequest.getValuesCount(), rowCount, dataSize); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java index 1daa160671..2b54b986f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExprRewriter.java @@ -86,7 +86,7 @@ public class ExprRewriter { do { oldNumChanges = numChanges; for (ExprRewriteRule rule : rules) { - // when foldConstantByBe is on, fold all constant expr by BE instead of applying FoldConstantsRule in FE. + // when foldConstantByBe is on, fold all constant expr by BE instead of applying FoldConstantsRule in FE if (rule instanceof FoldConstantsRule && analyzer.safeIsEnableFoldConstantByBe()) { continue; } @@ -100,7 +100,8 @@ public class ExprRewriter { return rewrittenExpr; } - private Expr applyRuleOnce(Expr expr, ExprRewriteRule rule, Analyzer analyzer, ClauseType clauseType) throws AnalysisException { + private Expr applyRuleOnce(Expr expr, ExprRewriteRule rule, Analyzer analyzer, ClauseType clauseType) + throws AnalysisException { Expr rewrittenExpr = rule.apply(expr, analyzer, clauseType); if (rewrittenExpr != expr) { numChanges++; diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java index a7d7d7fa7c..0ec42b7c59 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java @@ -61,7 +61,7 @@ import java.util.Set; * But, we should remove redundant conjuncts generated by redundant conjuncts in this rule. */ public class ExtractCommonFactorsRule implements ExprRewriteRule { - private final static Logger LOG = LogManager.getLogger(ExtractCommonFactorsRule.class); + private static final Logger LOG = LogManager.getLogger(ExtractCommonFactorsRule.class); public static ExtractCommonFactorsRule INSTANCE = new ExtractCommonFactorsRule(); @Override @@ -132,13 +132,16 @@ public class ExtractCommonFactorsRule implements ExprRewriteRule { exprList.removeAll(commonFactorList); if (exprList.size() == 0) { // For example, the sql is "where (a = 1) or (a = 1 and B = 2)" - // if "(a = 1)" is extracted as a common factor expression, then the first expression "(a = 1)" has no expression - // other than a common factor expression, and the second expression "(a = 1 and B = 2)" has an expression of "(B = 2)" + // if "(a = 1)" is extracted as a common factor expression, then the first expression "(a = 1)" + // has no expression other than a common factor expression, and the second expression + // "(a = 1 and B = 2)" has an expression of "(B = 2)" // - // In this case, the common factor expression ("a = 1") can be directly used to replace the whole CompoundOrPredicate. - // In Fact, the common factor expression is actually the parent set of expression "(a = 1)" and expression "(a = 1 and B = 2)" + // In this case, the common factor expression ("a = 1") can be directly used to + // replace the whole CompoundOrPredicate. In Fact, the common factor expression is actually + // the parent set of expression "(a = 1)" and expression "(a = 1 and B = 2)" // - // exprList.size() == 0 means one child of CompoundOrPredicate has no expression other than a common factor expression. + // exprList.size() == 0 means one child of CompoundOrPredicate has no expression + // other than a common factor expression. isReturnCommonFactorExpr = true; break; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java index 1f9f695a34..fb69a19a4c 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FEFunctions.java @@ -44,7 +44,8 @@ import java.math.BigInteger; /** * compute functions in FE. * - * when you add a new function, please ensure the name, argTypes , returnType and compute logic are consistent with BE's function + * when you add a new function, please ensure the name, argTypes, + * returnType and compute logic are consistent with BE's function */ public class FEFunctions { private static final Logger LOG = LogManager.getLogger(FEFunctions.class); @@ -71,7 +72,8 @@ public class FEFunctions { // DATEDIFF function only uses the date part for calculations and ignores the time part firstDate.castToDate(); secondDate.castToDate(); - long datediff = (firstDate.unixTimestamp(TimeUtils.getTimeZone()) - secondDate.unixTimestamp(TimeUtils.getTimeZone())) / 1000 / 60 / 60 / 24; + long datediff = (firstDate.unixTimestamp(TimeUtils.getTimeZone()) + - secondDate.unixTimestamp(TimeUtils.getTimeZone())) / 1000 / 60 / 60 / 24; return new IntLiteral(datediff, Type.INT); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java index 0db26aadf0..355dcef2f8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/FoldConstantsRule.java @@ -231,9 +231,9 @@ public class FoldConstantsRule implements ExprRewriteRule { } } - private void recursiveGetChildrenConstExpr(Expr expr, Map constExprMap, Map oriConstMap, - Analyzer analyzer, Map sysVarMap, - Map infoFnMap)throws AnalysisException { + private void recursiveGetChildrenConstExpr(Expr expr, Map constExprMap, + Map oriConstMap, Analyzer analyzer, Map sysVarMap, Map infoFnMap) + throws AnalysisException { for (int i = 0; i < expr.getChildren().size(); i++) { final Expr child = expr.getChildren().get(i); getConstExpr(child, constExprMap, oriConstMap, analyzer, sysVarMap, infoFnMap); @@ -363,13 +363,16 @@ public class FoldConstantsRule implements ExprRewriteRule { TFoldConstantParams tParams = new TFoldConstantParams(map, queryGlobals); tParams.setVecExec(VectorizedUtil.isVectorized()); - Future future = BackendServiceProxy.getInstance().foldConstantExpr(brpcAddress, tParams); + Future future + = BackendServiceProxy.getInstance().foldConstantExpr(brpcAddress, tParams); InternalService.PConstantExprResult result = future.get(5, TimeUnit.SECONDS); if (result.getStatus().getStatusCode() == 0) { - for (Map.Entry entry : result.getExprResultMapMap().entrySet()) { + for (Map.Entry entry + : result.getExprResultMapMap().entrySet()) { Map tmp = new HashMap<>(); - for (Map.Entry entry1 : entry.getValue().getMapMap().entrySet()) { + for (Map.Entry entry1 + : entry.getValue().getMapMap().entrySet()) { TPrimitiveType type = TPrimitiveType.findByValue(entry1.getValue().getType().getType()); Expr retExpr = null; if (entry1.getValue().getSuccess()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java index ef8f12e5b3..4332714e31 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/InferFiltersRule.java @@ -55,7 +55,7 @@ import java.util.Set; * 4. Construct additional numerical connections and isNullPredicate. */ public class InferFiltersRule implements ExprRewriteRule { - private final static Logger LOG = LogManager.getLogger(InferFiltersRule.class); + private static final Logger LOG = LogManager.getLogger(InferFiltersRule.class); public static InferFiltersRule INSTANCE = new InferFiltersRule(); @Override @@ -72,15 +72,15 @@ public class InferFiltersRule implements ExprRewriteRule { List slotEqSlotExpr = analyzer.getOnSlotEqSlotExpr(); // slotEqSlotDeDuplication: De-Duplication for slotEqSlotExpr - Set> slotEqSlotDeDuplication = - (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) ? analyzer.getOnSlotEqSlotDeDuplication() : Sets.newHashSet(); + Set> slotEqSlotDeDuplication = (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) + ? analyzer.getOnSlotEqSlotDeDuplication() : Sets.newHashSet(); // slotToLiteralExpr: Record existing and infer expr which slot and literal are equal List slotToLiteralExpr = analyzer.getOnSlotToLiteralExpr(); // slotToLiteralDeDuplication: De-Duplication for slotToLiteralExpr - Set> slotToLiteralDeDuplication = - (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) ? analyzer.getOnSlotToLiteralDeDuplication() : Sets.newHashSet(); + Set> slotToLiteralDeDuplication = (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) + ? analyzer.getOnSlotToLiteralDeDuplication() : Sets.newHashSet(); // newExprWithState: just record infer expr which slot and literal are equal and which is not null predicate // false : Unexecutable intermediate results will be produced during the derivation process. @@ -91,8 +91,8 @@ public class InferFiltersRule implements ExprRewriteRule { List isNullExpr = analyzer.getOnIsNullExpr(); // isNullDeDuplication: De-Duplication for isNullExpr - Set isNullDeDuplication = - (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) ? analyzer.getOnIsNullDeDuplication() : Sets.newHashSet(); + Set isNullDeDuplication = (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) + ? analyzer.getOnIsNullDeDuplication() : Sets.newHashSet(); // inExpr: Record existing and infer in predicate List inExpr = analyzer.getInExpr(); @@ -101,7 +101,8 @@ public class InferFiltersRule implements ExprRewriteRule { Set inDeDuplication = (clauseType == ExprRewriter.ClauseType.ON_CLAUSE) ? analyzer.getInDeDuplication() : Sets.newHashSet(); - // exprToWarshallArraySubscript/warshallArraySubscriptToExpr: function is easy to build warshall and newExprWithState + // exprToWarshallArraySubscript/warshallArraySubscriptToExpr: + // function is easy to build warshall and newExprWithState Map exprToWarshallArraySubscript = new HashMap<>(); Map warshallArraySubscriptToExpr = new HashMap<>(); @@ -244,7 +245,7 @@ public class InferFiltersRule implements ExprRewriteRule { Analyzer analyzer, ExprRewriter.ClauseType clauseType) { int arrayMaxSize = slotEqSlotExpr.size() * 2; - int warshall[][] = new int[arrayMaxSize][arrayMaxSize]; + int[][] warshall = new int[arrayMaxSize][arrayMaxSize]; for (int index = 0; index < arrayMaxSize; index++) { warshall[index] = new int[arrayMaxSize]; Arrays.fill(warshall[index], 0); @@ -261,7 +262,8 @@ public class InferFiltersRule implements ExprRewriteRule { /** * Initialize warshall array. - * Specify a corresponding array_id for each slot, and add the two slots in slotEqSlotExpr to the array in rows and columns + * Specify a corresponding array_id for each slot, and add the two slots in slotEqSlotExpr + * to the array in rows and columns * * @param warshall: Two-dimensional array * @param arrayMaxSize: slotEqSlotExpr.size() * 2 @@ -270,7 +272,7 @@ public class InferFiltersRule implements ExprRewriteRule { * @param warshallArraySubscriptToExpr * @return needGenWarshallArray. True:needGen; False:don't needGen */ - private boolean initWarshallArray(int warshall[][], + private boolean initWarshallArray(int[][] warshall, int arrayMaxSize, List slotEqSlotExpr, Map exprToWarshallArraySubscript, @@ -312,7 +314,7 @@ public class InferFiltersRule implements ExprRewriteRule { return needGenWarshallArray; } - private void genWarshallArray(int warshall[][], int arrayMaxSize, List> newSlotsArray) { + private void genWarshallArray(int[][] warshall, int arrayMaxSize, List> newSlotsArray) { for (int k = 0; k < arrayMaxSize; k++) { for (int i = 0; i < arrayMaxSize; i++) { if (warshall[i][k] == 0) { @@ -418,7 +420,8 @@ public class InferFiltersRule implements ExprRewriteRule { * @param checkSlot: t2.id * @return needInfer. True: needInfer. False: not needInfer */ - private boolean isNeedInfer(SlotRef newSlot, SlotRef checkSlot, Analyzer analyzer, ExprRewriter.ClauseType clauseType) { + private boolean isNeedInfer(SlotRef newSlot, SlotRef checkSlot, Analyzer analyzer, + ExprRewriter.ClauseType clauseType) { boolean ret = false; TupleId newTid = newSlot.getDesc().getParent().getRef().getId(); TupleId checkTid = checkSlot.getDesc().getParent().getRef().getId(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRule.java index de3bae9057..4ed232b4b1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRule.java @@ -34,15 +34,16 @@ public class RewriteBinaryPredicatesRule implements ExprRewriteRule { public static ExprRewriteRule INSTANCE = new RewriteBinaryPredicatesRule(); /** - * Convert the binary predicate of the form > to the binary + * Convert the binary predicate of the form + * > to the binary * predicate of , thereby allowing the binary predicate * The predicate pushes down and completes the bucket clipped. * * Examples & background * For query "select * from T where t1 = 2.0", when the ResultType of column t1 is equal to BIGINT, in the binary * predicate analyze, the type will be unified to DECIMALV2, so the binary predicate will be converted to - * > , because Cast wraps the t1 column, it cannot be pushed down, - * resulting in poor performance. + * > , + * because Cast wraps the t1 column, it cannot be pushed down, resulting in poor performance. * We convert it to the equivalent query "select * from T where t1 = 2" to push down and improve performance. * * Applicable scene: diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteDateLiteralRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteDateLiteralRule.java index c3eee2f3f6..cda54130de 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteDateLiteralRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteDateLiteralRule.java @@ -36,7 +36,7 @@ import org.apache.doris.common.AnalysisException; * and be converted to be NULL when in other clause */ public class RewriteDateLiteralRule implements ExprRewriteRule { - public final static ExprRewriteRule INSTANCE = new RewriteDateLiteralRule(); + public static final ExprRewriteRule INSTANCE = new RewriteDateLiteralRule(); @Override public Expr apply(Expr expr, Analyzer analyzer, ExprRewriter.ClauseType clauseType) throws AnalysisException { @@ -54,7 +54,8 @@ public class RewriteDateLiteralRule implements ExprRewriteRule { if (!valueExpr.isConstant()) { return expr; } - // Only consider CastExpr and try our best to convert non-date_literal to date_literal,to be compatible with MySQL + // Only consider CastExpr and try our best to convert non-date_literal + // to date_literal,to be compatible with MySQL if (valueExpr instanceof CastExpr) { Expr childExpr = valueExpr.getChild(0); if (childExpr instanceof LiteralExpr) { @@ -67,7 +68,8 @@ public class RewriteDateLiteralRule implements ExprRewriteRule { if (clauseType == ExprRewriter.ClauseType.OTHER_CLAUSE) { return new NullLiteral(); } else { - throw new AnalysisException("Incorrect datetime value: " + valueExpr.toSql() + " in expression: " + expr.toSql()); + throw new AnalysisException("Incorrect datetime value: " + + valueExpr.toSql() + " in expression: " + expr.toSql()); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteFromUnixTimeRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteFromUnixTimeRule.java index 0377fde9d8..a40b33b2a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteFromUnixTimeRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/RewriteFromUnixTimeRule.java @@ -55,6 +55,7 @@ public class RewriteFromUnixTimeRule implements ExprRewriteRule { // Here, we just support these three format. private final ImmutableMap beSupportFormatMap; private final ImmutableMap> parseMillisFunctionMap; + public RewriteFromUnixTimeRule() { beSupportFormatMap = ImmutableMap.builder() .put("%Y%m%d", "yyyyMMdd") @@ -62,11 +63,13 @@ public class RewriteFromUnixTimeRule implements ExprRewriteRule { .put("%Y-%m-%d %H:%i:%s", "yyyy-MM-dd HH:mm:ss") .build(); parseMillisFunctionMap = ImmutableMap.>builder() - .put("yyyyMMdd", (str) -> LocalDate.parse(str, DateTimeFormatter.ofPattern("yyyyMMdd")).atStartOfDay().toEpochSecond(OffsetDateTime.now().getOffset())) + .put("yyyyMMdd", (str) -> LocalDate.parse(str, DateTimeFormatter.ofPattern("yyyyMMdd")).atStartOfDay() + .toEpochSecond(OffsetDateTime.now().getOffset())) .put("yyyy-MM-dd", (str) -> LocalDate.parse(str, DateTimeFormatter.ofPattern("yyyy-MM-dd")) .atStartOfDay().toEpochSecond(OffsetDateTime.now().getOffset())) - .put("yyyy-MM-dd HH:mm:ss", (str) -> LocalDateTime.parse(str, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) - .toEpochSecond(OffsetDateTime.now().getOffset())) + .put("yyyy-MM-dd HH:mm:ss", + (str) -> LocalDateTime.parse(str, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + .toEpochSecond(OffsetDateTime.now().getOffset())) .build(); } @@ -132,16 +135,21 @@ public class RewriteFromUnixTimeRule implements ExprRewriteRule { } try { - Expr literalExpr = LiteralExpr.create(String.valueOf(parseSecondsFunction.apply(le.getStringValue())), Type.BIGINT); - // it must adds low bound 0, because when a field contains negative data like -100, it will be queried as a result + Expr literalExpr = LiteralExpr.create( + String.valueOf(parseSecondsFunction.apply(le.getStringValue())), Type.BIGINT); + // it must adds low bound 0, because when a field contains negative data like -100, + // it will be queried as a result if (bp.getOp() == BinaryPredicate.Operator.LT || bp.getOp() == BinaryPredicate.Operator.LE) { BinaryPredicate r = new BinaryPredicate(bp.getOp(), sr, literalExpr); - BinaryPredicate l = new BinaryPredicate(BinaryPredicate.Operator.GE, sr, LiteralExpr.create("0", Type.BIGINT)); + BinaryPredicate l = new BinaryPredicate(BinaryPredicate.Operator.GE, sr, + LiteralExpr.create("0", Type.BIGINT)); return new CompoundPredicate(CompoundPredicate.Operator.AND, r, l); } else if (bp.getOp() == BinaryPredicate.Operator.GT || bp.getOp() == BinaryPredicate.Operator.GE) { - // also it must adds upper bound 253402271999, because from_unixtime support time range is [1970-01-01 00:00:00 ~ 9999-12-31 23:59:59] + // also it must adds upper bound 253402271999, because from_unixtime support time range is + // [1970-01-01 00:00:00 ~ 9999-12-31 23:59:59] BinaryPredicate l = new BinaryPredicate(bp.getOp(), sr, literalExpr); - BinaryPredicate r = new BinaryPredicate(BinaryPredicate.Operator.LE, sr, LiteralExpr.create("253402271999", Type.BIGINT)); + BinaryPredicate r = new BinaryPredicate(BinaryPredicate.Operator.LE, sr, + LiteralExpr.create("253402271999", Type.BIGINT)); return new CompoundPredicate(CompoundPredicate.Operator.AND, r, l); } else { return new BinaryPredicate(bp.getOp(), sr, literalExpr); diff --git a/fe/fe-core/src/main/java/org/apache/doris/rpc/AttachmentRequest.java b/fe/fe-core/src/main/java/org/apache/doris/rpc/AttachmentRequest.java index 568ba29527..a67e56c4b1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rpc/AttachmentRequest.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rpc/AttachmentRequest.java @@ -31,18 +31,23 @@ public class AttachmentRequest { TSerializer serializer = new TSerializer(); serializedRequest = serializer.serialize(request); } + public void setSerializedRequest(byte[] request) { this.serializedRequest = request; } + public byte[] getSerializedRequest() { return serializedRequest; } + public void setSerializedResult(byte[] result) { this.serializedResult = result; } + public byte[] getSerializedResult() { return serializedResult; } + public void getResult(TBase result) throws TException { TDeserializer deserializer = new TDeserializer(); deserializer.deserialize(result, serializedResult); diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index be9f8b463e..67aa2baff0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -670,9 +670,10 @@ public class FrontendServiceImpl implements FrontendService.Iface { private void checkAuthCodeUuid(String dbName, long txnId, String authCodeUuid) throws AuthenticationException { - Database db = Catalog.getCurrentCatalog().getDbOrException(dbName, s -> new AuthenticationException("invalid db name: " + s)); - TransactionState transactionState = Catalog.getCurrentGlobalTransactionMgr(). - getTransactionState(db.getId(), txnId); + Database db = Catalog.getCurrentCatalog().getDbOrException( + dbName, s -> new AuthenticationException("invalid db name: " + s)); + TransactionState transactionState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(db.getId(), txnId); if (transactionState == null) { throw new AuthenticationException("invalid transactionState: " + txnId); } @@ -741,7 +742,8 @@ public class FrontendServiceImpl implements FrontendService.Iface { result.setTxnId(tmpRes.getTxnId()).setDbId(tmpRes.getDbId()); } catch (DuplicatedRequestException e) { // this is a duplicate request, just return previous txn id - LOG.warn("duplicate request for stream load. request id: {}, txn: {}", e.getDuplicatedRequestId(), e.getTxnId()); + LOG.warn("duplicate request for stream load. request id: {}, txn: {}", + e.getDuplicatedRequestId(), e.getTxnId()); result.setTxnId(e.getTxnId()); } catch (LabelAlreadyUsedException e) { status.setStatusCode(TStatusCode.LABEL_ALREADY_EXISTS); @@ -910,7 +912,8 @@ public class FrontendServiceImpl implements FrontendService.Iface { throw new UserException("unknown database, database=" + fullDbName); } - DatabaseTransactionMgr dbTransactionMgr = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(database.getId()); + DatabaseTransactionMgr dbTransactionMgr = Catalog.getCurrentGlobalTransactionMgr() + .getDatabaseTransactionMgr(database.getId()); TransactionState transactionState = dbTransactionMgr.getTransactionState(request.getTxnId()); if (transactionState == null) { throw new UserException("transaction [" + request.getTxnId() + "] not found"); @@ -925,7 +928,8 @@ public class FrontendServiceImpl implements FrontendService.Iface { String txnOperation = request.getOperation().trim(); if (txnOperation.equalsIgnoreCase("commit")) { - Catalog.getCurrentGlobalTransactionMgr().commitTransaction2PC(database, tableList, request.getTxnId(), 5000); + Catalog.getCurrentGlobalTransactionMgr() + .commitTransaction2PC(database, tableList, request.getTxnId(), 5000); } else if (txnOperation.equalsIgnoreCase("abort")) { Catalog.getCurrentGlobalTransactionMgr().abortTransaction2PC(database.getId(), request.getTxnId()); } else { @@ -1101,14 +1105,16 @@ public class FrontendServiceImpl implements FrontendService.Iface { long timeoutMs = request.isSetThriftRpcTimeoutMs() ? request.getThriftRpcTimeoutMs() : 5000; Table table = db.getTableOrMetaException(request.getTbl(), TableType.OLAP); if (!table.tryReadLock(timeoutMs, TimeUnit.MILLISECONDS)) { - throw new UserException("get table read lock timeout, database=" + fullDbName + ",table=" + table.getName()); + throw new UserException("get table read lock timeout, database=" + + fullDbName + ",table=" + table.getName()); } try { StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request); StreamLoadPlanner planner = new StreamLoadPlanner(db, (OlapTable) table, streamLoadTask); TExecPlanFragmentParams plan = planner.plan(streamLoadTask.getId()); // add table indexes to transaction state - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), request.getTxnId()); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(db.getId(), request.getTxnId()); if (txnState == null) { throw new UserException("txn does not exist: " + request.getTxnId()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java index aed76f127e..95d12cd48a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobManager.java @@ -108,7 +108,8 @@ public class StatisticsJobManager { for (Long tableId : tableIds) { Table table = db.getTableOrAnalysisException(tableId); if (table.getType() != Table.TableType.OLAP) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_NOT_OLAP_TABLE, db.getFullName(), table.getName(), "ANALYZE"); + ErrorReport.reportAnalysisException(ErrorCode.ERR_NOT_OLAP_TABLE, db.getFullName(), + table.getName(), "ANALYZE"); } } } finally { diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobScheduler.java index f78d5ef56b..65862c0e6e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsJobScheduler.java @@ -62,7 +62,8 @@ public class StatisticsJobScheduler extends MasterDaemon { * and normal query services may be affected. Therefore, we put the jobs into the queue * and schedule them one by one, and finally divide each job to several subtasks and execute them. */ - public final Queue pendingJobQueue = Queues.newLinkedBlockingQueue(Config.cbo_max_statistics_job_num); + public final Queue pendingJobQueue + = Queues.newLinkedBlockingQueue(Config.cbo_max_statistics_job_num); public StatisticsJobScheduler() { super("Statistics job scheduler", 0); @@ -183,8 +184,10 @@ public class StatisticsJobScheduler extends MasterDaemon { for (Long partitionId : partitionIds) { StatsCategoryDesc columnCategory = getColStatsCategoryDesc(dbId, tblId, columnName); StatsGranularityDesc columnGranularity = getPartitionStatsGranularityDesc(tblId, partitionId); - List statsTypes = Arrays.asList(StatsType.MIN_VALUE, StatsType.MAX_VALUE, StatsType.NDV); - SQLStatisticsTask sqlTask = new SQLStatisticsTask(jobId, columnGranularity, columnCategory, statsTypes); + List statsTypes = Arrays.asList( + StatsType.MIN_VALUE, StatsType.MAX_VALUE, StatsType.NDV); + SQLStatisticsTask sqlTask = new SQLStatisticsTask( + jobId, columnGranularity, columnCategory, statsTypes); tasks.add(sqlTask); } }); @@ -193,7 +196,8 @@ public class StatisticsJobScheduler extends MasterDaemon { StatsCategoryDesc columnCategory = getColStatsCategoryDesc(dbId, tblId, columnName); StatsGranularityDesc columnGranularity = getTblStatsGranularityDesc(tblId); List statsTypes = Arrays.asList(StatsType.MIN_VALUE, StatsType.MAX_VALUE, StatsType.NDV); - SQLStatisticsTask sqlTask = new SQLStatisticsTask(jobId, columnGranularity, columnCategory, statsTypes); + SQLStatisticsTask sqlTask = new SQLStatisticsTask( + jobId, columnGranularity, columnCategory, statsTypes); tasks.add(sqlTask); } } @@ -215,10 +219,12 @@ public class StatisticsJobScheduler extends MasterDaemon { Column column = tbl.getColumn(columnName); Type colType = column.getType(); if (colType.isStringType()) { - SQLStatisticsTask sampleSqlTask = new SampleSQLStatisticsTask(jobId, columnGranularity, columnCategory, statsTypes); + SQLStatisticsTask sampleSqlTask = new SampleSQLStatisticsTask( + jobId, columnGranularity, columnCategory, statsTypes); tasks.add(sampleSqlTask); } else { - MetaStatisticsTask metaTask = new MetaStatisticsTask(jobId, columnGranularity, columnCategory, statsTypes); + MetaStatisticsTask metaTask = new MetaStatisticsTask( + jobId, columnGranularity, columnCategory, statsTypes); tasks.add(metaTask); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsManager.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsManager.java index 8e0f3eff46..3c67f9e9a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsManager.java @@ -40,7 +40,7 @@ import java.util.List; import java.util.Map; public class StatisticsManager { - private final static Logger LOG = LogManager.getLogger(StatisticsManager.class); + private static final Logger LOG = LogManager.getLogger(StatisticsManager.class); private Statistics statistics; diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java index 17ebc20918..4b644e77be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsTaskScheduler.java @@ -48,7 +48,7 @@ import java.util.concurrent.TimeoutException; Schedule statistics task */ public class StatisticsTaskScheduler extends MasterDaemon { - private final static Logger LOG = LogManager.getLogger(StatisticsTaskScheduler.class); + private static final Logger LOG = LogManager.getLogger(StatisticsTaskScheduler.class); private final Queue queue = Queues.newLinkedBlockingQueue(); @@ -178,7 +178,8 @@ public class StatisticsTaskScheduler extends MasterDaemon { // update the task and job info statisticsJob.updateJobInfoByTaskId(taskId, errorMsg); } catch (DdlException e) { - LOG.info("Failed to update statistics job info. jobId: {}, taskId: {}, e: {}", jobId, taskId, e); + LOG.info("Failed to update statistics job info. jobId: {}, taskId: {}, e: {}", + jobId, taskId, e); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsType.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsType.java index e805bff1a2..93b1104e5c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsType.java @@ -29,9 +29,11 @@ public enum StatsType { MAX_COL_LENS("max_col_lens"), AVG_COL_LENS("avg_col_lens"); private final String value; + StatsType(String value) { this.value = value; } + public String getValue() { return value; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java index 1388a6acc8..d8267c21e3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java @@ -417,7 +417,8 @@ public class Backend implements Writable { ImmutableMap diskInfos = disksRef; boolean exceedLimit = true; for (DiskInfo diskInfo : diskInfos.values()) { - if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getStorageMedium() == storageMedium && !diskInfo.exceedLimit(true)) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getStorageMedium() + == storageMedium && !diskInfo.exceedLimit(true)) { exceedLimit = false; break; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java b/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java index c7538a533f..6c34af95f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java @@ -39,7 +39,8 @@ public class BackendHbResponse extends HeartbeatResponse implements Writable { super(HeartbeatResponse.Type.BACKEND); } - public BackendHbResponse(long beId, int bePort, int httpPort, int brpcPort, long hbTime, long beStartTime, String version) { + public BackendHbResponse(long beId, int bePort, int httpPort, int brpcPort, + long hbTime, long beStartTime, String version) { super(HeartbeatResponse.Type.BACKEND); this.beId = beId; this.status = HbStatus.OK; diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java b/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java index e995c0aff5..e65c3feec3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/BeSelectionPolicy.java @@ -47,6 +47,7 @@ public class BeSelectionPolicy { public static class Builder { private BeSelectionPolicy policy; + public Builder() { policy = new BeSelectionPolicy(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java b/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java index d62e47bdf2..083b476558 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Diagnoser.java @@ -86,7 +86,8 @@ public class Diagnoser { results.add(Lists.newArrayList("MaterializedIndex", "Not exist", "")); return results; } - results.add(Lists.newArrayList("MaterializedIndex", tbl.getIndexNameById(mIndex.getId()) + ": " + mIndex.getId(), "")); + results.add(Lists.newArrayList("MaterializedIndex", + tbl.getIndexNameById(mIndex.getId()) + ": " + mIndex.getId(), "")); // replica info Tablet tablet = mIndex.getTablet(tabletId); List replicas = tablet.getReplicas(); @@ -98,7 +99,8 @@ public class Diagnoser { // replica short replicaNum = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum(); if (replicas.size() != replicaNum) { - results.add(Lists.newArrayList("ReplicasNum", "Replica num is " + replicas.size() + ", expected: " + replicaNum, "")); + results.add(Lists.newArrayList("ReplicasNum", "Replica num is " + + replicas.size() + ", expected: " + replicaNum, "")); } else { results.add(Lists.newArrayList("ReplicasNum", "OK", "")); } @@ -153,10 +155,14 @@ public class Diagnoser { + replica.getVersionCount()); } } - results.add(Lists.newArrayList("ReplicaBackendStatus", (backendErr.length() == 0 ? "OK" : backendErr.toString()), "")); - results.add(Lists.newArrayList("ReplicaVersionStatus", (versionErr.length() == 0 ? "OK" : versionErr.toString()), "")); - results.add(Lists.newArrayList("ReplicaStatus", (statusErr.length() == 0 ? "OK" : statusErr.toString()), "")); - results.add(Lists.newArrayList("ReplicaCompactionStatus", (compactionErr.length() == 0 ? "OK" : compactionErr.toString()), "")); + results.add(Lists.newArrayList("ReplicaBackendStatus", (backendErr.length() == 0 + ? "OK" : backendErr.toString()), "")); + results.add(Lists.newArrayList("ReplicaVersionStatus", (versionErr.length() == 0 + ? "OK" : versionErr.toString()), "")); + results.add(Lists.newArrayList("ReplicaStatus", (statusErr.length() == 0 + ? "OK" : statusErr.toString()), "")); + results.add(Lists.newArrayList("ReplicaCompactionStatus", (compactionErr.length() == 0 + ? "OK" : compactionErr.toString()), "")); return results; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/FrontendHbResponse.java b/fe/fe-core/src/main/java/org/apache/doris/system/FrontendHbResponse.java index f9b6dbd9cf..6bb77495ea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/FrontendHbResponse.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/FrontendHbResponse.java @@ -40,7 +40,8 @@ public class FrontendHbResponse extends HeartbeatResponse implements Writable { super(HeartbeatResponse.Type.FRONTEND); } - public FrontendHbResponse(String name, int queryPort, int rpcPort, long replayedJournalId, long hbTime, String version) { + public FrontendHbResponse(String name, int queryPort, int rpcPort, + long replayedJournalId, long hbTime, String version) { super(HeartbeatResponse.Type.FRONTEND); this.status = HbStatus.OK; this.name = name; diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java index 8026f14d96..855e95c0f1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java @@ -170,7 +170,8 @@ public class HeartbeatMgr extends MasterDaemon { // invalid all connections cached in ClientPool ClientPool.backendPool.clearPool(new TNetworkAddress(be.getHost(), be.getBePort())); if (!isReplay) { - Catalog.getCurrentCatalog().getGlobalTransactionMgr().abortTxnWhenCoordinateBeDown(be.getHost(), 100); + Catalog.getCurrentCatalog().getGlobalTransactionMgr() + .abortTxnWhenCoordinateBeDown(be.getHost(), 100); } } return isChanged; @@ -248,12 +249,15 @@ public class HeartbeatMgr extends MasterDaemon { if (tBackendInfo.isSetVersion()) { version = tBackendInfo.getVersion(); } - long beStartTime = tBackendInfo.isSetBeStartTime() ? tBackendInfo.getBeStartTime() : System.currentTimeMillis(); + long beStartTime = tBackendInfo.isSetBeStartTime() + ? tBackendInfo.getBeStartTime() : System.currentTimeMillis(); // backend.updateOnce(bePort, httpPort, beRpcPort, brpcPort); - return new BackendHbResponse(backendId, bePort, httpPort, brpcPort, System.currentTimeMillis(), beStartTime, version); + return new BackendHbResponse(backendId, bePort, httpPort, brpcPort, + System.currentTimeMillis(), beStartTime, version); } else { - return new BackendHbResponse(backendId, backend.getHost(), result.getStatus().getErrorMsgs().isEmpty() ? "Unknown error" - : result.getStatus().getErrorMsgs().get(0)); + return new BackendHbResponse(backendId, backend.getHost(), + result.getStatus().getErrorMsgs().isEmpty() + ? "Unknown error" : result.getStatus().getErrorMsgs().get(0)); } } catch (Exception e) { LOG.warn("backend heartbeat got exception", e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java index b4b7f8b140..65368abc9b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java @@ -866,7 +866,8 @@ public class SystemInfoService { return; } atomicLong.set(newReportVersion); - LOG.debug("update backend {} report version: {}, db: {}, table: {}", backendId, newReportVersion, dbId, tableId); + LOG.debug("update backend {} report version: {}, db: {}, table: {}", + backendId, newReportVersion, dbId, tableId); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTask.java index abef257c55..f878fc521f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTask.java @@ -43,7 +43,7 @@ public abstract class AgentTask { protected long createTime; public AgentTask(TResourceInfo resourceInfo, long backendId, TTaskType taskType, - long dbId, long tableId, long partitionId, long indexId, long tabletId, long signature, long createTime) { + long dbId, long tableId, long partitionId, long indexId, long tabletId, long signature, long createTime) { this.backendId = backendId; this.signature = signature; this.taskType = taskType; diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskExecutor.java index 28e19dc490..8297ef2fff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskExecutor.java @@ -24,7 +24,8 @@ import java.util.concurrent.ExecutorService; public class AgentTaskExecutor { - private static final ExecutorService EXECUTOR = ThreadPoolManager.newDaemonCacheThreadPool(Config.max_agent_task_threads_num, "agent-task-pool", true); + private static final ExecutorService EXECUTOR = ThreadPoolManager.newDaemonCacheThreadPool( + Config.max_agent_task_threads_num, "agent-task-pool", true); public AgentTaskExecutor() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/CloneTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/CloneTask.java index d17affa8c0..d614bbb24e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/CloneTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/CloneTask.java @@ -99,6 +99,8 @@ public class CloneTask extends AgentTask { .append(schemaHash); sb.append(", storageMedium: ").append(storageMedium.name()); sb.append(", visible version(hash): ").append(visibleVersion); + sb.append(", src backend: ").append(srcBackends.get(0).getHost()) + .append(", src path hash: ").append(srcPathHash); sb.append(", src backend: ").append(srcBackends.get(0).getHost()).append(", src path hash: ") .append(srcPathHash); sb.append(", dest backend: ").append(backendId).append(", dest path hash: ").append(destPathHash); diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java index 536ba3caa0..3e40527394 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java @@ -89,12 +89,14 @@ public class HadoopLoadPendingTask extends LoadPendingTask { Preconditions.checkNotNull(etlTaskConf); // add table indexes to transaction state - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(job.getDbId(), job.getTransactionId()); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr() + .getTransactionState(job.getDbId(), job.getTransactionId()); if (txnState == null) { throw new LoadException("txn does not exist: " + job.getTransactionId()); } for (long tableId : job.getIdToTableLoadInfo().keySet()) { - OlapTable table = (OlapTable) db.getTableOrException(tableId, s -> new LoadException("table does not exist. id: " + s)); + OlapTable table = (OlapTable) db.getTableOrException( + tableId, s -> new LoadException("table does not exist. id: " + s)); table.readLock(); try { txnState.addTableIndexes(table); @@ -128,7 +130,8 @@ public class HadoopLoadPendingTask extends LoadPendingTask { long tableId = tableEntry.getKey(); TableLoadInfo tableLoadInfo = tableEntry.getValue(); - OlapTable table = (OlapTable) db.getTableOrException(tableId, s -> new LoadException("table does not exist. id: " + s)); + OlapTable table = (OlapTable) db.getTableOrException( + tableId, s -> new LoadException("table does not exist. id: " + s)); table.readLock(); try { // columns diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/LoadEtlTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/LoadEtlTask.java index dcf6842b67..c7bfbdb19e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/LoadEtlTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/LoadEtlTask.java @@ -229,12 +229,14 @@ public abstract class LoadEtlTask extends MasterTask { Map idToTableLoadInfo = job.getIdToTableLoadInfo(); for (Entry tableEntry : idToTableLoadInfo.entrySet()) { long tableId = tableEntry.getKey(); - OlapTable table = (OlapTable) db.getTableOrException(tableId, s -> new LoadException("table does not exist. id: " + s)); + OlapTable table = (OlapTable) db.getTableOrException(tableId, + s -> new LoadException("table does not exist. id: " + s)); table.readLock(); try { TableLoadInfo tableLoadInfo = tableEntry.getValue(); - for (Entry partitionEntry : tableLoadInfo.getIdToPartitionLoadInfo().entrySet()) { + for (Entry partitionEntry + : tableLoadInfo.getIdToPartitionLoadInfo().entrySet()) { long partitionId = partitionEntry.getKey(); boolean needLoad = false; @@ -305,6 +307,8 @@ public abstract class LoadEtlTask extends MasterTask { } protected abstract boolean updateJobEtlStatus(); + protected abstract void processEtlRunning() throws LoadException; + protected abstract Map> getFilePathMap() throws LoadException; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java index 0bb4fb885f..cc28a5b15f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java @@ -30,40 +30,65 @@ import com.google.common.collect.Lists; import java.util.List; public interface LoadTaskInfo { - public boolean getNegative(); - public long getTxnId(); - public int getTimeout(); - public long getMemLimit(); - public String getTimezone(); - public PartitionNames getPartitions(); - public LoadTask.MergeType getMergeType(); - public Expr getDeleteCondition(); - public boolean hasSequenceCol(); - public String getSequenceCol(); - public TFileType getFileType(); - public TFileFormatType getFormatType(); - public String getJsonPaths(); - public String getJsonRoot(); - public boolean isStripOuterArray(); - public boolean isFuzzyParse(); - public boolean isNumAsString(); - public boolean isReadJsonByLine(); - public String getPath(); + boolean getNegative(); - public double getMaxFilterRatio(); + long getTxnId(); - public ImportColumnDescs getColumnExprDescs(); - public boolean isStrictMode(); + int getTimeout(); - public Expr getPrecedingFilter(); - public Expr getWhereExpr(); - public Separator getColumnSeparator(); - public Separator getLineDelimiter(); - public int getSendBatchParallelism(); - public boolean isLoadToSingleTablet(); - public String getHeaderType(); + long getMemLimit(); - public static class ImportColumnDescs { + String getTimezone(); + + PartitionNames getPartitions(); + + LoadTask.MergeType getMergeType(); + + Expr getDeleteCondition(); + + boolean hasSequenceCol(); + + String getSequenceCol(); + + TFileType getFileType(); + + TFileFormatType getFormatType(); + + String getJsonPaths(); + + String getJsonRoot(); + + boolean isStripOuterArray(); + + boolean isFuzzyParse(); + + boolean isNumAsString(); + + boolean isReadJsonByLine(); + + String getPath(); + + double getMaxFilterRatio(); + + ImportColumnDescs getColumnExprDescs(); + + boolean isStrictMode(); + + Expr getPrecedingFilter(); + + Expr getWhereExpr(); + + Separator getColumnSeparator(); + + Separator getLineDelimiter(); + + int getSendBatchParallelism(); + + boolean isLoadToSingleTablet(); + + String getHeaderType(); + + class ImportColumnDescs { public List descs = Lists.newArrayList(); public boolean isColumnDescsRewrited = false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/MasterTaskExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/task/MasterTaskExecutor.java index 8847cffcc0..938fb44f58 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/MasterTaskExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/MasterTaskExecutor.java @@ -40,15 +40,19 @@ public class MasterTaskExecutor { public ScheduledThreadPoolExecutor scheduledThreadPool; public MasterTaskExecutor(String name, int threadNum, boolean needRegisterMetric) { - executor = ThreadPoolManager.newDaemonFixedThreadPool(threadNum, threadNum * 2, name + "_pool", needRegisterMetric); + executor = ThreadPoolManager.newDaemonFixedThreadPool( + threadNum, threadNum * 2, name + "_pool", needRegisterMetric); runningTasks = Maps.newHashMap(); - scheduledThreadPool = ThreadPoolManager.newDaemonScheduledThreadPool(1, name + "_scheduler_thread_pool", needRegisterMetric); + scheduledThreadPool = ThreadPoolManager.newDaemonScheduledThreadPool( + 1, name + "_scheduler_thread_pool", needRegisterMetric); } public MasterTaskExecutor(String name, int threadNum, int queueSize, boolean needRegisterMetric) { - executor = ThreadPoolManager.newDaemonFixedThreadPool(threadNum, queueSize, name + "_pool", needRegisterMetric); + executor = ThreadPoolManager.newDaemonFixedThreadPool( + threadNum, queueSize, name + "_pool", needRegisterMetric); runningTasks = Maps.newHashMap(); - scheduledThreadPool = ThreadPoolManager.newDaemonScheduledThreadPool(1, name + "_scheduler_thread_pool", needRegisterMetric); + scheduledThreadPool = ThreadPoolManager.newDaemonScheduledThreadPool( + 1, name + "_scheduler_thread_pool", needRegisterMetric); } public boolean hasIdleThread() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java index 7a01ff9e35..55cc6b17aa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java @@ -210,6 +210,7 @@ public class StreamLoadTask implements LoadTaskInfo { public void setJsonRoot(String jsonRoot) { this.jsonRoot = jsonRoot; } + public LoadTask.MergeType getMergeType() { return mergeType; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java index d2bfa158cf..1e1b9d8ec9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java @@ -110,7 +110,8 @@ public class UpdateTabletMetaInfoTask extends AgentTask { TTabletMetaInfo metaInfo = new TTabletMetaInfo(); metaInfo.setTabletId(pair.first); metaInfo.setSchemaHash(pair.second); - TabletMeta tabletMeta = Catalog.getCurrentCatalog().getTabletInvertedIndex().getTabletMeta(pair.first); + TabletMeta tabletMeta = Catalog.getCurrentCatalog() + .getTabletInvertedIndex().getTabletMeta(pair.first); if (tabletMeta == null) { LOG.warn("could not find tablet [{}] in meta ignore it", pair.second); continue; diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractTxnStateChangeCallback.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractTxnStateChangeCallback.java index 4c3120f258..e8fe483f9c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractTxnStateChangeCallback.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractTxnStateChangeCallback.java @@ -41,7 +41,8 @@ public abstract class AbstractTxnStateChangeCallback implements TxnStateChangeCa } @Override - public void afterAborted(TransactionState txnState, boolean txnOperated, String txnStatusChangeReason) throws UserException { + public void afterAborted(TransactionState txnState, + boolean txnOperated, String txnStatusChangeReason) throws UserException { } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java index 3c00f114a8..f65189cf1c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java @@ -94,24 +94,26 @@ public class DatabaseTransactionMgr { // set it to avoid holding lock too long when removing too many txns per round. private static final int MAX_REMOVE_TXN_PER_ROUND = 10000; - private long dbId; + private final long dbId; // the lock is used to control the access to transaction states // no other locks should be inside this lock - private ReentrantReadWriteLock transactionLock = new ReentrantReadWriteLock(true); + private final ReentrantReadWriteLock transactionLock = new ReentrantReadWriteLock(true); // transactionId -> running TransactionState - private Map idToRunningTransactionState = Maps.newHashMap(); + private final Map idToRunningTransactionState = Maps.newHashMap(); // transactionId -> final status TransactionState - private Map idToFinalStatusTransactionState = Maps.newHashMap(); + private final Map idToFinalStatusTransactionState = Maps.newHashMap(); // The following 2 queues are to store transactionStates with final status - // These queues are mainly used to avoid traversing all txns and speed up the cleaning time when cleaning up expired txs. - // The "Short" queue is used to store the txns of the expire time controlled by Config.streaming_label_keep_max_second. + // These queues are mainly used to avoid traversing all txns and speed up the cleaning time + // when cleaning up expired txs. + // The "Short" queue is used to store the txns of the expire time + // controlled by Config.streaming_label_keep_max_second. // The "Long" queue is used to store the txns of the expire time controlled by Config.label_keep_max_second. - private ArrayDeque finalStatusTransactionStateDequeShort = new ArrayDeque<>(); - private ArrayDeque finalStatusTransactionStateDequeLong = new ArrayDeque<>(); + private final ArrayDeque finalStatusTransactionStateDequeShort = new ArrayDeque<>(); + private final ArrayDeque finalStatusTransactionStateDequeLong = new ArrayDeque<>(); // label -> txn ids // this is used for checking if label already used. a label may correspond to multiple txns, @@ -119,7 +121,7 @@ public class DatabaseTransactionMgr { // this member should be consistent with idToTransactionState, // which means if a txn exist in idToRunningTransactionState or idToFinalStatusTransactionState // it must exists in dbIdToTxnLabels, and vice versa - private Map> labelToTxnIds = Maps.newHashMap(); + private final Map> labelToTxnIds = Maps.newHashMap(); // count the number of running txns of database, except for the routine load txn @@ -128,13 +130,13 @@ public class DatabaseTransactionMgr { // count only the number of running routine load txns of database private volatile int runningRoutineLoadTxnNums = 0; - private Catalog catalog; + private final Catalog catalog; - private EditLog editLog; + private final EditLog editLog; - private TransactionIdGenerator idGenerator; + private final TransactionIdGenerator idGenerator; - private List clearTransactionTasks = Lists.newArrayList(); + private final List clearTransactionTasks = Lists.newArrayList(); // not realtime usedQuota value to make a fast check for database data quota private volatile long usedQuotaDataBytes = -1; @@ -276,9 +278,10 @@ public class DatabaseTransactionMgr { } public long beginTransaction(List tableIdList, String label, TUniqueId requestId, - TransactionState.TxnCoordinator coordinator, TransactionState.LoadJobSourceType sourceType, long listenerId, long timeoutSecond) - throws DuplicatedRequestException, LabelAlreadyUsedException, BeginTransactionException, AnalysisException, - QuotaExceedException, MetaNotFoundException { + TransactionState.TxnCoordinator coordinator, TransactionState.LoadJobSourceType sourceType, + long listenerId, long timeoutSecond) + throws DuplicatedRequestException, LabelAlreadyUsedException, BeginTransactionException, + AnalysisException, QuotaExceedException, MetaNotFoundException { checkDatabaseDataQuota(); writeLock(); try { @@ -323,8 +326,8 @@ public class DatabaseTransactionMgr { long tid = idGenerator.getNextTransactionId(); LOG.info("begin transaction: txn id {} with label {} from coordinator {}, listner id: {}", tid, label, coordinator, listenerId); - TransactionState transactionState = new TransactionState(dbId, tableIdList, tid, label, requestId, sourceType, - coordinator, listenerId, timeoutSecond * 1000); + TransactionState transactionState = new TransactionState(dbId, tableIdList, + tid, label, requestId, sourceType, coordinator, listenerId, timeoutSecond * 1000); transactionState.setPrepareTime(System.currentTimeMillis()); unprotectUpsertTransactionState(transactionState, false); @@ -362,8 +365,8 @@ public class DatabaseTransactionMgr { this.usedQuotaDataBytes = usedQuotaDataBytes; } - public void preCommitTransaction2PC(List
tableList, long transactionId, List tabletCommitInfos, - TxnCommitAttachment txnCommitAttachment) + public void preCommitTransaction2PC(List
tableList, long transactionId, + List tabletCommitInfos, TxnCommitAttachment txnCommitAttachment) throws UserException { // check status // the caller method already own db lock, we do not obtain db lock here @@ -403,7 +406,8 @@ public class DatabaseTransactionMgr { checkCommitStatus(tableList, transactionState, tabletCommitInfos, txnCommitAttachment, errorReplicaIds, tableToPartition, totalInvolvedBackends); - unprotectedPreCommitTransaction2PC(transactionState, errorReplicaIds, tableToPartition, totalInvolvedBackends, db); + unprotectedPreCommitTransaction2PC(transactionState, errorReplicaIds, tableToPartition, + totalInvolvedBackends, db); LOG.info("transaction:[{}] successfully pre-committed", transactionState); } @@ -429,8 +433,8 @@ public class DatabaseTransactionMgr { // if index is dropped, it does not matter. // if table or partition is dropped during load, just ignore that tablet, // because we should allow dropping rollup or partition during load - List tabletIds = tabletCommitInfos.stream().map( - tabletCommitInfo -> tabletCommitInfo.getTabletId()).collect(Collectors.toList()); + List tabletIds = tabletCommitInfos.stream() + .map(TabletCommitInfo::getTabletId).collect(Collectors.toList()); List tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds); for (int i = 0; i < tabletMetaList.size(); i++) { TabletMeta tabletMeta = tabletMetaList.get(i); @@ -487,7 +491,8 @@ public class DatabaseTransactionMgr { } } - if (table.getState() == OlapTable.OlapTableState.ROLLUP || table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE) { + if (table.getState() == OlapTable.OlapTableState.ROLLUP + || table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE) { /* * This is just a optimization that do our best to not let publish version tasks * timeout if table is under rollup or schema change. Because with a short @@ -505,7 +510,8 @@ public class DatabaseTransactionMgr { transactionState.prolongPublishTimeout(); } - int quorumReplicaNum = table.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum() / 2 + 1; + int quorumReplicaNum = table.getPartitionInfo() + .getReplicaAllocation(partition.getId()).getTotalReplicaNum() / 2 + 1; for (MaterializedIndex index : allIndices) { for (Tablet tablet : index.getTablets()) { int successReplicaNum = 0; @@ -522,10 +528,12 @@ public class DatabaseTransactionMgr { throw new TransactionCommitFailedException("could not find replica for tablet [" + tabletId + "], backend [" + tabletBackend + "]"); } - // if the tablet have no replica's to commit or the tablet is a rolling up tablet, the commit backends maybe null + // if the tablet have no replica's to commit or the tablet is a rolling up tablet, + // the commit backends maybe null // if the commit backends is null, set all replicas as error replicas if (commitBackends != null && commitBackends.contains(tabletBackend)) { - // if the backend load success but the backend has some errors previously, then it is not a normal replica + // if the backend load success but the backend has some errors previously, + // then it is not a normal replica // ignore it but not log it // for example, a replica is in clone state if (replica.getLastFailedVersion() < 0) { @@ -542,8 +550,8 @@ public class DatabaseTransactionMgr { if (successReplicaNum < quorumReplicaNum) { LOG.warn("Failed to commit txn [{}]. " + "Tablet [{}] success replica num is {} < quorum replica num {} " - + "while error backends {}", - transactionState.getTransactionId(), tablet.getId(), successReplicaNum, quorumReplicaNum, + + "while error backends {}", transactionState.getTransactionId(), + tablet.getId(), successReplicaNum, quorumReplicaNum, Joiner.on(",").join(errorBackendIdsForTablet)); throw new TabletQuorumFailedException(transactionState.getTransactionId(), tablet.getId(), successReplicaNum, quorumReplicaNum, @@ -629,7 +637,8 @@ public class DatabaseTransactionMgr { if (is2PC) { unprotectedCommitTransaction2PC(transactionState, db); } else { - unprotectedCommitTransaction(transactionState, errorReplicaIds, tableToPartition, totalInvolvedBackends, db); + unprotectedCommitTransaction(transactionState, errorReplicaIds, + tableToPartition, totalInvolvedBackends, db); } txnOperated = true; } finally { @@ -643,7 +652,8 @@ public class DatabaseTransactionMgr { LOG.info("transaction:[{}] successfully committed", transactionState); } - public boolean waitForTransactionFinished(Database db, long transactionId, long timeoutMillis) throws TransactionCommitFailedException { + public boolean waitForTransactionFinished(Database db, long transactionId, long timeoutMillis) + throws TransactionCommitFailedException { TransactionState transactionState = null; readLock(); try { @@ -684,7 +694,8 @@ public class DatabaseTransactionMgr { // it must at the front of the finalStatusTransactionStateDeque. // check both "short" and "long" queue. if (!finalStatusTransactionStateDequeShort.isEmpty() - && transactionState.getTransactionId() == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) { + && transactionState.getTransactionId() + == finalStatusTransactionStateDequeShort.getFirst().getTransactionId()) { finalStatusTransactionStateDequeShort.pop(); clearTransactionState(transactionState.getTransactionId()); } else if (!finalStatusTransactionStateDequeLong.isEmpty() @@ -754,7 +765,8 @@ public class DatabaseTransactionMgr { try { // only send task to preCommitted transaction return idToRunningTransactionState.values().stream() - .filter(transactionState -> (transactionState.getTransactionStatus() == TransactionStatus.PRECOMMITTED)) + .filter(transactionState + -> (transactionState.getTransactionStatus() == TransactionStatus.PRECOMMITTED)) .sorted(Comparator.comparing(TransactionState::getPreCommitTime)) .collect(Collectors.toList()); } finally { @@ -767,7 +779,8 @@ public class DatabaseTransactionMgr { try { // only send task to committed transaction return idToRunningTransactionState.values().stream() - .filter(transactionState -> (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED)) + .filter(transactionState -> + (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED)) .sorted(Comparator.comparing(TransactionState::getCommitTime)) .collect(Collectors.toList()); } finally { @@ -792,20 +805,22 @@ public class DatabaseTransactionMgr { errorReplicaIds.addAll(originalErrorReplicas); } - // case 1 If database is dropped, then we just throw MetaNotFoundException, because all related tables are already force dropped, - // we just ignore the transaction with all tables been force dropped. - // case 2 If at least one table lock successfully, which means that the transaction should be finished for the existed tables - // while just ignore tables which have been dropped forcefully. - // case 3 Database exist and all tables already been dropped, this case is same with case1, just finish the transaction with empty commit info - // only three cases mentioned above may happen, because user cannot drop table without force while there are committed transactions on table - // and writeLockTablesIfExist is a blocking function, the returned result would be the existed table list which hold write lock + // case 1 If database is dropped, then we just throw MetaNotFoundException, because all related tables are + // already force dropped, we just ignore the transaction with all tables been force dropped. + // case 2 If at least one table lock successfully, which means that the transaction should be finished for + // the existed tables while just ignore tables which have been dropped forcefully. + // case 3 Database exist and all tables already been dropped, this case is same with case1, just finish + // the transaction with empty commit info only three cases mentioned above may happen, because user cannot + // drop table without force while there are committed transactions on table and writeLockTablesIfExist is + // a blocking function, the returned result would be the existed table list which hold write lock Database db = catalog.getDbOrMetaException(transactionState.getDbId()); List tableIdList = transactionState.getTableIdList(); List
tableList = db.getTablesOnIdOrderIfExist(tableIdList); tableList = MetaLockUtils.writeLockTablesIfExist(tableList); try { boolean hasError = false; - Iterator tableCommitInfoIterator = transactionState.getIdToTableCommitInfos().values().iterator(); + Iterator tableCommitInfoIterator + = transactionState.getIdToTableCommitInfos().values().iterator(); while (tableCommitInfoIterator.hasNext()) { TableCommitInfo tableCommitInfo = tableCommitInfoIterator.next(); long tableId = tableCommitInfo.getTableId(); @@ -819,7 +834,8 @@ public class DatabaseTransactionMgr { continue; } PartitionInfo partitionInfo = table.getPartitionInfo(); - Iterator partitionCommitInfoIterator = tableCommitInfo.getIdToPartitionCommitInfo().values().iterator(); + Iterator partitionCommitInfoIterator + = tableCommitInfo.getIdToPartitionCommitInfo().values().iterator(); while (partitionCommitInfoIterator.hasNext()) { PartitionCommitInfo partitionCommitInfo = partitionCommitInfoIterator.next(); long partitionId = partitionCommitInfo.getPartitionId(); @@ -827,9 +843,8 @@ public class DatabaseTransactionMgr { // partition maybe dropped between commit and publish version, ignore this error if (partition == null) { partitionCommitInfoIterator.remove(); - LOG.warn("partition {} is dropped, skip version check and remove it from transaction state {}", - partitionId, - transactionState); + LOG.warn("partition {} is dropped, skip version check" + + " and remove it from transaction state {}", partitionId, transactionState); continue; } if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) { @@ -838,8 +853,9 @@ public class DatabaseTransactionMgr { transactionId, partitionCommitInfo.getVersion(), partition.getVisibleVersion()); - String errMsg = String.format("wait for publishing partition %d version %d. self version: %d. table %d", - partitionId, partition.getVisibleVersion() + 1, partitionCommitInfo.getVersion(), tableId); + String errMsg = String.format("wait for publishing partition %d version %d." + + " self version: %d. table %d", partitionId, partition.getVisibleVersion() + 1, + partitionCommitInfo.getVersion(), tableId); transactionState.setErrorMsg(errMsg); return; } @@ -873,8 +889,8 @@ public class DatabaseTransactionMgr { ++healthReplicaNum; } } else if (replica.getVersion() >= partitionCommitInfo.getVersion()) { - // the replica's version is larger than or equal to current transaction partition's version - // the replica is normal, then remove it from error replica ids + // the replica's version is larger than or equal to current transaction + // partition's version the replica is normal, then remove it from error replica ids // TODO(cmy): actually I have no idea why we need this check errorReplicaIds.remove(replica.getId()); ++healthReplicaNum; @@ -882,11 +898,14 @@ public class DatabaseTransactionMgr { } if (healthReplicaNum < quorumReplicaNum) { - LOG.info("publish version failed for transaction {} on tablet {}, with only {} replicas less than quorum {}", + LOG.info("publish version failed for transaction {} on tablet {}," + + " with only {} replicas less than quorum {}", transactionState, tablet, healthReplicaNum, quorumReplicaNum); - String errMsg = String.format("publish on tablet %d failed. succeed replica num %d less than quorum %d." + String errMsg = String.format("publish on tablet %d failed." + + " succeed replica num %d less than quorum %d." + " table: %d, partition: %d, publish version: %d", - tablet.getId(), healthReplicaNum, quorumReplicaNum, tableId, partitionId, partition.getVisibleVersion() + 1); + tablet.getId(), healthReplicaNum, quorumReplicaNum, tableId, + partitionId, partition.getVisibleVersion() + 1); transactionState.setErrorMsg(errMsg); hasError = true; } @@ -995,7 +1014,8 @@ public class DatabaseTransactionMgr { transactionState.setCommitTime(System.currentTimeMillis()); transactionState.setTransactionStatus(TransactionStatus.COMMITTED); - Iterator tableCommitInfoIterator = transactionState.getIdToTableCommitInfos().values().iterator(); + Iterator tableCommitInfoIterator + = transactionState.getIdToTableCommitInfos().values().iterator(); while (tableCommitInfoIterator.hasNext()) { TableCommitInfo tableCommitInfo = tableCommitInfoIterator.next(); long tableId = tableCommitInfo.getTableId(); @@ -1008,7 +1028,8 @@ public class DatabaseTransactionMgr { transactionState); continue; } - Iterator partitionCommitInfoIterator = tableCommitInfo.getIdToPartitionCommitInfo().values().iterator(); + Iterator partitionCommitInfoIterator + = tableCommitInfo.getIdToPartitionCommitInfo().values().iterator(); while (partitionCommitInfoIterator.hasNext()) { PartitionCommitInfo partitionCommitInfo = partitionCommitInfoIterator.next(); long partitionId = partitionCommitInfo.getPartitionId(); @@ -1107,9 +1128,11 @@ public class DatabaseTransactionMgr { abortTransaction(transactionId, reason, null); } - public void abortTransaction(long transactionId, String reason, TxnCommitAttachment txnCommitAttachment) throws UserException { + public void abortTransaction(long transactionId, String reason, TxnCommitAttachment txnCommitAttachment) + throws UserException { if (transactionId < 0) { - LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation", transactionId); + LOG.info("transaction id is {}, less than 0, maybe this is an old type load job," + + " ignore abort operation", transactionId); return; } TransactionState transactionState = null; @@ -1154,7 +1177,8 @@ public class DatabaseTransactionMgr { public void abortTransaction2PC(long transactionId) throws UserException { LOG.info("begin to abort txn {}", transactionId); if (transactionId < 0) { - LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation", transactionId); + LOG.info("transaction id is {}, less than 0, maybe this is an old type load job," + + " ignore abort operation", transactionId); return; } TransactionState transactionState; @@ -1221,7 +1245,8 @@ public class DatabaseTransactionMgr { AgentBatchTask batchTask = null; synchronized (clearTransactionTasks) { for (Long beId : allBeIds) { - ClearTransactionTask task = new ClearTransactionTask(beId, transactionState.getTransactionId(), Lists.newArrayList()); + ClearTransactionTask task = new ClearTransactionTask( + beId, transactionState.getTransactionId(), Lists.newArrayList()); clearTransactionTasks.add(task); } @@ -1292,8 +1317,10 @@ public class DatabaseTransactionMgr { int leftNum = MAX_REMOVE_TXN_PER_ROUND; writeLock(); try { - leftNum = unprotectedRemoveExpiredTxns(currentMillis, expiredTxnIds, finalStatusTransactionStateDequeShort, leftNum); - leftNum = unprotectedRemoveExpiredTxns(currentMillis, expiredTxnIds, finalStatusTransactionStateDequeLong, leftNum); + leftNum = unprotectedRemoveExpiredTxns(currentMillis, expiredTxnIds, + finalStatusTransactionStateDequeShort, leftNum); + leftNum = unprotectedRemoveExpiredTxns(currentMillis, expiredTxnIds, + finalStatusTransactionStateDequeLong, leftNum); if (!expiredTxnIds.isEmpty()) { Map> dbExpiredTxnIds = Maps.newHashMap(); @@ -1437,13 +1464,14 @@ public class DatabaseTransactionMgr { return infos; } - protected void checkRunningTxnExceedLimit(TransactionState.LoadJobSourceType sourceType) throws BeginTransactionException { + protected void checkRunningTxnExceedLimit(TransactionState.LoadJobSourceType sourceType) + throws BeginTransactionException { switch (sourceType) { case ROUTINE_LOAD_TASK: // no need to check limit for routine load task: // 1. the number of running routine load tasks is limited by Config.max_routine_load_task_num_per_be - // 2. if we add routine load txn to runningTxnNums, runningTxnNums will always be occupied by routine load, - // and other txn may not be able to submitted. + // 2. if we add routine load txn to runningTxnNums, runningTxnNums will always be occupied by routine + // load, and other txn may not be able to submitted. break; default: if (runningTxnNums >= Config.max_running_txn_num_per_db) { @@ -1468,11 +1496,13 @@ public class DatabaseTransactionMgr { long partitionId = partitionCommitInfo.getPartitionId(); Partition partition = table.getPartition(partitionId); if (partition == null) { - LOG.warn("partition {} of table {} does not exist when update catalog after committed. transaction: {}, db: {}", + LOG.warn("partition {} of table {} does not exist when update catalog after committed." + + " transaction: {}, db: {}", partitionId, tableId, transactionState.getTransactionId(), db.getId()); continue; } - List allIndices = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + List allIndices = partition + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); for (MaterializedIndex index : allIndices) { List tablets = index.getTablets(); for (Tablet tablet : tablets) { @@ -1505,11 +1535,13 @@ public class DatabaseTransactionMgr { long newCommitVersion = partitionCommitInfo.getVersion(); Partition partition = table.getPartition(partitionId); if (partition == null) { - LOG.warn("partition {} in table {} does not exist when update catalog after visible. transaction: {}, db: {}", + LOG.warn("partition {} in table {} does not exist when update catalog after visible." + + " transaction: {}, db: {}", partitionId, tableId, transactionState.getTransactionId(), db.getId()); continue; } - List allIndices = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + List allIndices = partition + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); for (MaterializedIndex index : allIndices) { for (Tablet tablet : index.getTablets()) { for (Replica replica : tablet.getReplicas()) { @@ -1522,10 +1554,11 @@ public class DatabaseTransactionMgr { newVersion = replica.getVersion(); } else if (!replica.checkVersionCatchUp(partition.getVisibleVersion(), true)) { // this means the replica has error in the past, but we did not observe it - // during upgrade, one job maybe in quorum finished state, for example, A,B,C 3 replica - // A,B 's version is 10, C's version is 10 but C' 10 is abnormal should be rollback - // then we will detect this and set C's last failed version to 10 and last success version to 11 - // this logic has to be replayed in checkpoint thread + // during upgrade, one job maybe in quorum finished state, for example, + // A,B,C 3 replica A,B 's version is 10, C's version is 10 but C' 10 is abnormal + // should be rollback then we will detect this and set C's last failed version to + // 10 and last success version to 11 this logic has to be replayed + // in checkpoint thread lastFailedVersion = partition.getVisibleVersion(); newVersion = replica.getVersion(); } @@ -1533,11 +1566,12 @@ public class DatabaseTransactionMgr { // success version always move forward lastSuccessVersion = newCommitVersion; } else { - // for example, A,B,C 3 replicas, B,C failed during publish version, then B C will be set abnormal - // all loading will failed, B,C will have to recovery by clone, it is very inefficient and maybe lost data - // Using this method, B,C will publish failed, and fe will publish again, not update their last failed version - // if B is publish successfully in next turn, then B is normal and C will be set abnormal so that quorum is maintained - // and loading will go on. + // for example, A,B,C 3 replicas, B,C failed during publish version, + // then B C will be set abnormal all loading will failed, B,C will have to recovery + // by clone, it is very inefficient and maybe lost data Using this method, B,C will + // publish failed, and fe will publish again, not update their last failed version + // if B is publish successfully in next turn, then B is normal and C will be set + // abnormal so that quorum is maintained and loading will go on. newVersion = replica.getVersion(); if (newCommitVersion > lastFailedVersion) { lastFailedVersion = newCommitVersion; diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java index ceb2c857c0..03513c4833 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java @@ -61,7 +61,8 @@ import java.util.concurrent.TimeoutException; * 1. begin * 2. commit * 3. abort - * Attention: all api in txn manager should get db lock or load lock first, then get txn manager's lock, or there will be dead lock + * Attention: all api in txn manager should get db lock or load lock first, then get txn manager's lock, + * or there will be dead lock */ public class GlobalTransactionMgr implements Writable { private static final Logger LOG = LogManager.getLogger(GlobalTransactionMgr.class); @@ -90,7 +91,8 @@ public class GlobalTransactionMgr implements Writable { } public void addDatabaseTransactionMgr(Long dbId) { - if (dbIdToDatabaseTransactionMgrs.putIfAbsent(dbId, new DatabaseTransactionMgr(dbId, catalog, idGenerator)) == null) { + if (dbIdToDatabaseTransactionMgrs.putIfAbsent(dbId, + new DatabaseTransactionMgr(dbId, catalog, idGenerator)) == null) { LOG.debug("add database transaction manager for db {}", dbId); } } @@ -101,8 +103,8 @@ public class GlobalTransactionMgr implements Writable { } } - public long beginTransaction(long dbId, List tableIdList, String label, TxnCoordinator coordinator, LoadJobSourceType sourceType, - long timeoutSecond) + public long beginTransaction(long dbId, List tableIdList, String label, TxnCoordinator coordinator, + LoadJobSourceType sourceType, long timeoutSecond) throws AnalysisException, LabelAlreadyUsedException, BeginTransactionException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { return beginTransaction(dbId, tableIdList, label, null, coordinator, sourceType, -1, timeoutSecond); @@ -122,7 +124,7 @@ public class GlobalTransactionMgr implements Writable { * @throws IllegalTransactionParameterException */ public long beginTransaction(long dbId, List tableIdList, String label, TUniqueId requestId, - TxnCoordinator coordinator, LoadJobSourceType sourceType, long listenerId, long timeoutSecond) + TxnCoordinator coordinator, LoadJobSourceType sourceType, long listenerId, long timeoutSecond) throws AnalysisException, LabelAlreadyUsedException, BeginTransactionException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { @@ -132,17 +134,20 @@ public class GlobalTransactionMgr implements Writable { switch (sourceType) { case BACKEND_STREAMING: - checkValidTimeoutSecond(timeoutSecond, Config.max_stream_load_timeout_second, Config.min_load_timeout_second); + checkValidTimeoutSecond(timeoutSecond, Config.max_stream_load_timeout_second, + Config.min_load_timeout_second); break; default: checkValidTimeoutSecond(timeoutSecond, Config.max_load_timeout_second, Config.min_load_timeout_second); } DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); - return dbTransactionMgr.beginTransaction(tableIdList, label, requestId, coordinator, sourceType, listenerId, timeoutSecond); + return dbTransactionMgr.beginTransaction(tableIdList, label, requestId, + coordinator, sourceType, listenerId, timeoutSecond); } - private void checkValidTimeoutSecond(long timeoutSecond, int maxLoadTimeoutSecond, int minLoadTimeOutSecond) throws AnalysisException { + private void checkValidTimeoutSecond(long timeoutSecond, int maxLoadTimeoutSecond, + int minLoadTimeOutSecond) throws AnalysisException { if (timeoutSecond > maxLoadTimeoutSecond || timeoutSecond < minLoadTimeOutSecond) { throw new AnalysisException("Invalid timeout: " + timeoutSecond + ". Timeout should between " + minLoadTimeOutSecond + " and " + maxLoadTimeoutSecond @@ -176,7 +181,8 @@ public class GlobalTransactionMgr implements Writable { TxnCommitAttachment txnCommitAttachment) throws UserException { if (!MetaLockUtils.tryWriteLockTablesOrMetaException(tableList, timeoutMillis, TimeUnit.MILLISECONDS)) { - throw new UserException("get tableList write lock timeout, tableList=(" + StringUtils.join(tableList, ",") + ")"); + throw new UserException("get tableList write lock timeout, tableList=(" + + StringUtils.join(tableList, ",") + ")"); } try { preCommitTransaction2PC(db.getId(), tableList, transactionId, tabletCommitInfos, txnCommitAttachment); @@ -185,8 +191,8 @@ public class GlobalTransactionMgr implements Writable { } } - public void preCommitTransaction2PC(long dbId, List
tableList, long transactionId, List tabletCommitInfos, - TxnCommitAttachment txnCommitAttachment) + public void preCommitTransaction2PC(long dbId, List
tableList, long transactionId, + List tabletCommitInfos, TxnCommitAttachment txnCommitAttachment) throws UserException { if (Config.disable_load_job) { throw new TransactionCommitFailedException("disable_load_job is set to true, all load jobs are prevented"); @@ -197,7 +203,8 @@ public class GlobalTransactionMgr implements Writable { dbTransactionMgr.preCommitTransaction2PC(tableList, transactionId, tabletCommitInfos, txnCommitAttachment); } - public void commitTransaction(long dbId, List
tableList, long transactionId, List tabletCommitInfos) + public void commitTransaction(long dbId, List
tableList, + long transactionId, List tabletCommitInfos) throws UserException { commitTransaction(dbId, tableList, transactionId, tabletCommitInfos, null); } @@ -211,8 +218,8 @@ public class GlobalTransactionMgr implements Writable { * @note it is necessary to optimize the `lock` mechanism and `lock` scope resulting from wait lock long time * @note callers should get all tables' write locks before call this api */ - public void commitTransaction(long dbId, List
tableList, long transactionId, List tabletCommitInfos, - TxnCommitAttachment txnCommitAttachment) + public void commitTransaction(long dbId, List
tableList, long transactionId, + List tabletCommitInfos, TxnCommitAttachment txnCommitAttachment) throws UserException { if (Config.disable_load_job) { throw new TransactionCommitFailedException("disable_load_job is set to true, all load jobs are prevented"); @@ -246,7 +253,8 @@ public class GlobalTransactionMgr implements Writable { StopWatch stopWatch = new StopWatch(); stopWatch.start(); if (!MetaLockUtils.tryWriteLockTablesOrMetaException(tableList, timeoutMillis, TimeUnit.MILLISECONDS)) { - throw new UserException("get tableList write lock timeout, tableList=(" + StringUtils.join(tableList, ",") + ")"); + throw new UserException("get tableList write lock timeout, tableList=(" + + StringUtils.join(tableList, ",") + ")"); } try { commitTransaction(db.getId(), tableList, transactionId, tabletCommitInfos, txnCommitAttachment); @@ -257,7 +265,8 @@ public class GlobalTransactionMgr implements Writable { long publishTimeoutMillis = timeoutMillis - stopWatch.getTime(); DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(db.getId()); if (publishTimeoutMillis < 0) { - // here commit transaction successfully cost too much time to cause that publishTimeoutMillis is less than zero, + // here commit transaction successfully cost too much time + // to cause that publishTimeoutMillis is less than zero, // so we just return false to indicate publish timeout return false; } @@ -269,7 +278,8 @@ public class GlobalTransactionMgr implements Writable { StopWatch stopWatch = new StopWatch(); stopWatch.start(); if (!MetaLockUtils.tryWriteLockTablesOrMetaException(tableList, timeoutMillis, TimeUnit.MILLISECONDS)) { - throw new UserException("get tableList write lock timeout, tableList=(" + StringUtils.join(tableList, ",") + ")"); + throw new UserException("get tableList write lock timeout, tableList=(" + + StringUtils.join(tableList, ",") + ")"); } try { commitTransaction2PC(db.getId(), transactionId); @@ -285,7 +295,8 @@ public class GlobalTransactionMgr implements Writable { abortTransaction(dbId, transactionId, reason, null); } - public void abortTransaction(Long dbId, Long txnId, String reason, TxnCommitAttachment txnCommitAttachment) throws UserException { + public void abortTransaction(Long dbId, Long txnId, String reason, + TxnCommitAttachment txnCommitAttachment) throws UserException { DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); dbTransactionMgr.abortTransaction(txnId, reason, txnCommitAttachment); } @@ -344,7 +355,8 @@ public class GlobalTransactionMgr implements Writable { } /** - * Check whether a load job already exists before checking all `TransactionId` related with this load job have finished. + * Check whether a load job already exists before + * checking all `TransactionId` related with this load job have finished. * finished * * @throws AnalysisException is database does not exist anymore @@ -487,7 +499,8 @@ public class GlobalTransactionMgr implements Writable { } /** - * It is a non thread safe method, only invoked by checkpoint thread without any lock or image dump thread with db lock + * It is a non thread safe method, only invoked by checkpoint thread + * without any lock or image dump thread with db lock */ public int getTransactionNum() { int txnNum = 0; @@ -527,7 +540,8 @@ public class GlobalTransactionMgr implements Writable { idGenerator.readFields(in); } - public TransactionState getTransactionStateByCallbackIdAndStatus(long dbId, long callbackId, Set status) { + public TransactionState getTransactionStateByCallbackIdAndStatus( + long dbId, long callbackId, Set status) { try { DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); return dbTransactionMgr.getTransactionStateByCallbackIdAndStatus(callbackId, status); @@ -583,7 +597,8 @@ public class GlobalTransactionMgr implements Writable { dbTransactionMgr.updateDatabaseUsedQuotaData(usedQuotaDataBytes); } - public TWaitingTxnStatusResult getWaitingTxnStatus(TWaitingTxnStatusRequest request) throws AnalysisException, TimeoutException { + public TWaitingTxnStatusResult getWaitingTxnStatus(TWaitingTxnStatusRequest request) + throws AnalysisException, TimeoutException { long dbId = request.getDbId(); int commitTimeoutSec = Config.commit_timeout_second; for (int i = 0; i < commitTimeoutSec; ++i) { @@ -593,8 +608,7 @@ public class GlobalTransactionMgr implements Writable { TransactionStatus txnStatus = null; if (request.isSetTxnId()) { long txnId = request.getTxnId(); - TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr(). - getTransactionState(dbId, txnId); + TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(dbId, txnId); if (txnState == null) { throw new AnalysisException("txn does not exist: " + txnId); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java index c44366f1d1..87edcf6885 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java @@ -144,7 +144,8 @@ public class PublishVersionDaemon extends MasterDaemon { List unfinishedTasks = Lists.newArrayList(); for (PublishVersionTask publishVersionTask : transTasks.values()) { if (publishVersionTask.isFinished()) { - // sometimes backend finish publish version task, but it maybe failed to change transactionid to version for some tablets + // sometimes backend finish publish version task, + // but it maybe failed to change transactionid to version for some tablets // and it will upload the failed tabletinfo to fe and fe will deal with them List errorTablets = publishVersionTask.getErrorTablets(); if (errorTablets == null || errorTablets.isEmpty()) { @@ -157,7 +158,8 @@ public class PublishVersionDaemon extends MasterDaemon { if (tabletInvertedIndex.getTabletMeta(tabletId) == null) { continue; } - Replica replica = tabletInvertedIndex.getReplica(tabletId, publishVersionTask.getBackendId()); + Replica replica = tabletInvertedIndex.getReplica( + tabletId, publishVersionTask.getBackendId()); if (replica != null) { publishErrorReplicaIds.add(replica.getId()); } else { @@ -206,10 +208,12 @@ public class PublishVersionDaemon extends MasterDaemon { for (Long errorPartitionId : errorPartitionIds) { Partition partition = olapTable.getPartition(errorPartitionId); if (partition != null) { - List materializedIndexList = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + List materializedIndexList + = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); for (MaterializedIndex materializedIndex : materializedIndexList) { for (Tablet tablet : materializedIndex.getTablets()) { - Replica replica = tablet.getReplicaByBackendId(unfinishedTask.getBackendId()); + Replica replica = tablet.getReplicaByBackendId( + unfinishedTask.getBackendId()); if (replica != null) { publishErrorReplicaIds.add(replica.getId()); } @@ -232,7 +236,8 @@ public class PublishVersionDaemon extends MasterDaemon { if (shouldFinishTxn) { try { // one transaction exception should not affect other transaction - globalTransactionMgr.finishTransaction(transactionState.getDbId(), transactionState.getTransactionId(), publishErrorReplicaIds); + globalTransactionMgr.finishTransaction(transactionState.getDbId(), + transactionState.getTransactionId(), publishErrorReplicaIds); } catch (Exception e) { LOG.warn("error happens when finish transaction {}", transactionState.getTransactionId(), e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionIdGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionIdGenerator.java index dedf679f8c..3e2792f3f8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionIdGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionIdGenerator.java @@ -65,6 +65,7 @@ public class TransactionIdGenerator { public void write(DataOutput out) throws IOException { out.writeLong(batchEndId); } + public void readFields(DataInput in) throws IOException { batchEndId = in.readLong(); // maybe a little rough diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java index 988514e5f7..9c549d803f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionState.java @@ -251,7 +251,7 @@ public class TransactionState implements Writable { } public TransactionState(long dbId, List tableIdList, long transactionId, String label, TUniqueId requestId, - LoadJobSourceType sourceType, TxnCoordinator txnCoordinator, long callbackId, long timeoutMs) { + LoadJobSourceType sourceType, TxnCoordinator txnCoordinator, long callbackId, long timeoutMs) { this.dbId = dbId; this.tableIdList = (tableIdList == null ? Lists.newArrayList() : tableIdList); this.transactionId = transactionId; @@ -425,8 +425,8 @@ public class TransactionState implements Writable { afterStateTransform(transactionStatus, txnOperated, null); } - public void afterStateTransform(TransactionStatus transactionStatus, boolean txnOperated, String txnStatusChangeReason) - throws UserException { + public void afterStateTransform(TransactionStatus transactionStatus, + boolean txnOperated, String txnStatusChangeReason) throws UserException { // after status changed if (callback == null) { callback = Catalog.getCurrentGlobalTransactionMgr().getCallbackFactory().getCallback(callbackId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TxnStateChangeCallback.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TxnStateChangeCallback.java index b968ee0be1..a368c54d6a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TxnStateChangeCallback.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TxnStateChangeCallback.java @@ -57,7 +57,8 @@ public interface TxnStateChangeCallback { * maybe null * @return */ - void afterAborted(TransactionState txnState, boolean txnOperated, String txnStatusChangeReason) throws UserException; + void afterAborted(TransactionState txnState, boolean txnOperated, + String txnStatusChangeReason) throws UserException; void replayOnAborted(TransactionState txnState); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index 767f65a41c..85c7ba577c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -958,14 +958,14 @@ public class AlterTest { // external table support reorder column db = Catalog.getCurrentCatalog().getDbOrMetaException("default_cluster:test"); odbcTable = db.getTableOrMetaException("odbc_table"); - Assert.assertTrue(odbcTable.getBaseSchema().stream(). - map(column -> column.getName()). - reduce("", (totalName, columnName) -> totalName + columnName).equals("k1k2k3k4k5k6")); + Assert.assertTrue(odbcTable.getBaseSchema().stream() + .map(column -> column.getName()) + .reduce("", (totalName, columnName) -> totalName + columnName).equals("k1k2k3k4k5k6")); stmt = "alter table test.odbc_table order by (k6, k5, k4, k3, k2, k1)"; alterTable(stmt, false); - Assert.assertTrue(odbcTable.getBaseSchema().stream(). - map(column -> column.getName()). - reduce("", (totalName, columnName) -> totalName + columnName).equals("k6k5k4k3k2k1")); + Assert.assertTrue(odbcTable.getBaseSchema().stream() + .map(column -> column.getName()) + .reduce("", (totalName, columnName) -> totalName + columnName).equals("k6k5k4k3k2k1")); // external table support drop column stmt = "alter table test.odbc_table drop column k6"; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java index 4d78e7393c..7b78058d3f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateDataSyncJobStmtTest.java @@ -91,6 +91,7 @@ public class CreateDataSyncJobStmtTest { } }; } + @Test public void testNoDb() { CreateDataSyncJobStmt stmt = new CreateDataSyncJobStmt( diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java index 34c82453e0..01039f62c1 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/GroupByClauseTest.java @@ -205,6 +205,7 @@ public class GroupByClauseTest { groupingExprs.remove(0); Assert.assertEquals(groupByClause.getGroupingExprs(), groupingExprs); } + @Test public void testReset() { ArrayList groupingExprs = new ArrayList<>(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java index 04d9968c46..d487c3d148 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java @@ -21,7 +21,6 @@ import org.apache.doris.catalog.ArrayType; import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.PrimitiveType; import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; import org.apache.doris.common.ExceptionChecker; import org.apache.doris.common.util.SqlParserUtils; import org.apache.doris.qe.ConnectContext; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/SetOperationStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/SetOperationStmtTest.java index 8949d2703c..25b881f934 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/SetOperationStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/SetOperationStmtTest.java @@ -43,6 +43,7 @@ public class SetOperationStmtTest { MockedAuth.mockedAuth(auth); MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); } + @Test public void testNormal() throws Exception { String sql = "select k1,k2 from t where k1='a' union select k1,k2 from t where k1='b';"; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java index 6d73f9f51d..0a9cfcc63e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java @@ -67,7 +67,8 @@ public class TableNameComparedLowercaseTest { + "\"storage_format\" = \"V2\"\n" + ")"; String table2 = "create table db1.TABLE2(k1 int, k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " - + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; + + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 " + + "properties('replication_num' = '1');"; dorisAssert = new DorisAssert(); dorisAssert.withDatabase("db1").useDatabase("db1"); dorisAssert.withTable(table1) @@ -81,7 +82,8 @@ public class TableNameComparedLowercaseTest { @Test public void testTableNameLowerCase() { - Set tableNames = Catalog.getCurrentCatalog().getDbNullable("default_cluster:db1").getTableNamesWithLock(); + Set tableNames = Catalog.getCurrentCatalog() + .getDbNullable("default_cluster:db1").getTableNamesWithLock(); Assert.assertEquals(2, tableNames.size()); Assert.assertTrue(tableNames.contains("TABLE1")); Assert.assertTrue(tableNames.contains("TABLE2")); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java index bfd783bacf..89de4fc847 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameStoredLowercaseTest.java @@ -66,7 +66,8 @@ public class TableNameStoredLowercaseTest { + "\"storage_format\" = \"V2\"\n" + ")"; String table2 = "create table db1.TABLE2(k1 int, k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " - + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; + + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3 " + + "properties('replication_num' = '1');"; dorisAssert = new DorisAssert(); dorisAssert.withDatabase("db1").useDatabase("db1"); dorisAssert.withTable(table1) @@ -80,7 +81,8 @@ public class TableNameStoredLowercaseTest { @Test public void testTableNameLowerCase() { - Set tableNames = Catalog.getCurrentCatalog().getDbNullable("default_cluster:db1").getTableNamesWithLock(); + Set tableNames = Catalog.getCurrentCatalog() + .getDbNullable("default_cluster:db1").getTableNamesWithLock(); Assert.assertEquals(2, tableNames.size()); Assert.assertTrue(tableNames.contains("table1")); Assert.assertTrue(tableNames.contains("table2")); diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java index cb3edb8966..2d7ab5e26d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java @@ -94,6 +94,7 @@ public class BackupJobTest { public MockBackupHandler(Catalog catalog) { super(catalog); } + @Override public RepositoryMgr getRepoMgr() { return repoMgr; @@ -105,6 +106,7 @@ public class BackupJobTest { public MockRepositoryMgr() { super(); } + @Override public Repository getRepo(long repoId) { return repo; diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java index 76c5f2e918..4e483e3817 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java @@ -81,6 +81,7 @@ public class RestoreJobTest { public MockBackupHandler(Catalog catalog) { super(catalog); } + @Override public RepositoryMgr getRepoMgr() { return repoMgr; @@ -92,6 +93,7 @@ public class RestoreJobTest { public MockRepositoryMgr() { super(); } + @Override public Repository getRepo(long repoId) { return repo; diff --git a/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java index d6fc770de6..d315ac5fbc 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/blockrule/SqlBlockRuleMgrTest.java @@ -281,4 +281,4 @@ public class SqlBlockRuleMgrTest extends TestWithFeService { Assertions.assertEquals(sqlBlockRule.getSqlPattern().toString(), read.getSqlPattern().toString()); file.delete(); } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTypeTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTypeTest.java index 1aded1c260..8c8644fc09 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTypeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/ColumnTypeTest.java @@ -33,6 +33,7 @@ import java.io.FileOutputStream; public class ColumnTypeTest { private FakeCatalog fakeCatalog; + @Before public void setUp() { fakeCatalog = new FakeCatalog(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/HiveTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/HiveTableTest.java index c7dad72037..983cd2736e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/HiveTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/HiveTableTest.java @@ -21,7 +21,6 @@ import org.apache.doris.common.DdlException; import com.google.common.collect.Lists; import com.google.common.collect.Maps; - import org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/OlapTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/OlapTableTest.java index c557992729..35c46a9e43 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/OlapTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/OlapTableTest.java @@ -56,8 +56,8 @@ public class OlapTableTest { continue; } OlapTable tbl = (OlapTable) table; - tbl.setIndexes(Lists.newArrayList(new Index("index", Lists.newArrayList("col"), IndexDef.IndexType.BITMAP - , "xxxxxx"))); + tbl.setIndexes(Lists.newArrayList(new Index("index", Lists.newArrayList("col"), + IndexDef.IndexType.BITMAP, "xxxxxx"))); System.out.println("orig table id: " + tbl.getId()); FastByteArrayOutputStream byteArrayOutputStream = new FastByteArrayOutputStream(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java index 98cb2febe7..5a23312ae0 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TableTest.java @@ -92,13 +92,17 @@ public class TableTest { public void lockTestWithException() { table.markDropped(); ExceptionChecker.expectThrowsWithMsg(DdlException.class, - "errCode = 2, detailMessage = unknown table, tableName=test", () -> table.writeLockOrDdlException()); + "errCode = 2, detailMessage = unknown table, tableName=test", + () -> table.writeLockOrDdlException()); ExceptionChecker.expectThrowsWithMsg(MetaNotFoundException.class, - "errCode = 7, detailMessage = unknown table, tableName=test", () -> table.writeLockOrMetaException()); + "errCode = 7, detailMessage = unknown table, tableName=test", + () -> table.writeLockOrMetaException()); ExceptionChecker.expectThrowsWithMsg(AlterCancelException.class, - "errCode = 2, detailMessage = unknown table, tableName=test", () -> table.writeLockOrAlterCancelException()); + "errCode = 2, detailMessage = unknown table, tableName=test", + () -> table.writeLockOrAlterCancelException()); ExceptionChecker.expectThrowsWithMsg(MetaNotFoundException.class, - "errCode = 7, detailMessage = unknown table, tableName=test", () -> table.tryWriteLockOrMetaException(1000, TimeUnit.MILLISECONDS)); + "errCode = 7, detailMessage = unknown table, tableName=test", + () -> table.tryWriteLockOrMetaException(1000, TimeUnit.MILLISECONDS)); table.unmarkDropped(); } @@ -136,7 +140,7 @@ public class TableTest { List column = Lists.newArrayList(); column.add(column2); - table1.setIndexMeta(new Long(2), "test", column, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS); + table1.setIndexMeta(2L, "test", column, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS); Deencapsulation.setField(table1, "baseIndexId", 1000); table1.write(dos); dos.flush(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TabletTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TabletTest.java index edf4081701..0d439a587c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TabletTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TabletTest.java @@ -159,7 +159,8 @@ public class TabletTest { * @param backendId2ReplicaIsBad beId -> if replica is a bad replica */ @SafeVarargs - private final void testTabletColocateHealthStatus0(Tablet.TabletStatus exceptedTabletStatus, Pair... backendId2ReplicaIsBad) { + private final void testTabletColocateHealthStatus0(Tablet.TabletStatus exceptedTabletStatus, + Pair... backendId2ReplicaIsBad) { Tablet tablet = new Tablet(1); int replicaId = 1; for (Pair pair : backendId2ReplicaIsBad) { @@ -169,7 +170,8 @@ public class TabletTest { versionAndSuccessVersion = 99L; lastFailVersion = 100L; } - tablet.addReplica(new Replica(replicaId++, pair.first, versionAndSuccessVersion, 0, 200000L, 3000L, ReplicaState.NORMAL, lastFailVersion, versionAndSuccessVersion)); + tablet.addReplica(new Replica(replicaId++, pair.first, versionAndSuccessVersion, 0, 200000L, 3000L, + ReplicaState.NORMAL, lastFailVersion, versionAndSuccessVersion)); } Assert.assertEquals(tablet.getColocateHealthStatus(100L, new ReplicaAllocation((short) 3), Sets.newHashSet(1L, 2L, 3L)), exceptedTabletStatus); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java index 33dfc86ce5..9921dd9f39 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TempPartitionTest.java @@ -135,7 +135,8 @@ public class TempPartitionTest { return tabletMeta.getPartitionId(); } - private void getPartitionNameToTabletIdMap(String tbl, boolean isTemp, Map partNameToTabletId) throws Exception { + private void getPartitionNameToTabletIdMap(String tbl, boolean isTemp, + Map partNameToTabletId) throws Exception { partNameToTabletId.clear(); String showStr = "show " + (isTemp ? "temporary" : "") + " partitions from " + tbl; ShowPartitionsStmt showStmt = (ShowPartitionsStmt) UtFrameUtils.parseAndAnalyzeStmt(showStr, ctx); @@ -183,7 +184,8 @@ public class TempPartitionTest { Catalog.getCurrentCatalog().createDb(createDbStmt); System.out.println(Catalog.getCurrentCatalog().getDbNames()); // create table tbl1 - String createTblStmtStr1 = "create table db1.tbl1(k1 int) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; + String createTblStmtStr1 = "create table db1.tbl1(k1 int) distributed by hash(k1)" + + " buckets 3 properties('replication_num' = '1');"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx); Catalog.getCurrentCatalog().createTable(createTableStmt); @@ -322,20 +324,24 @@ public class TempPartitionTest { stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp2, tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('invalid' = 'invalid');"; + stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('invalid' = 'invalid');"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp2, tp3) properties('strict_range' = 'false');"; + stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp2, tp3)" + + " properties('strict_range' = 'false');"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('strict_range' = 'false', 'use_temp_partition_name' = 'true');"; + stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('strict_range' = 'false', 'use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db2.tbl2", true, 1); checkShowPartitionsResultNum("db2.tbl2", false, 3); checkTabletExists(tempPartitionTabletIds2.values(), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true); - checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); + checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), + originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db2.tbl2 partition (p3);"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); @@ -362,7 +368,8 @@ public class TempPartitionTest { alterTable(stmtStr, true); stmtStr = "alter table db2.tbl2 replace partition(p31, p32) with temporary partition(tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 replace partition(p31, p32) with temporary partition(tp3) properties('strict_range' = 'false');"; + stmtStr = "alter table db2.tbl2 replace partition(p31, p32) with temporary partition(tp3)" + + " properties('strict_range' = 'false');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db2.tbl2", false, 3); checkShowPartitionsResultNum("db2.tbl2", true, 0); @@ -385,7 +392,8 @@ public class TempPartitionTest { checkPartitionExist(tbl2, "p2", true, false); checkPartitionExist(tbl2, "p3", true, true); - stmtStr = "alter table db2.tbl2 replace partition(tp3) with temporary partition(p3) properties('use_temp_partition_name' = 'true');"; + stmtStr = "alter table db2.tbl2 replace partition(tp3) with temporary partition(p3)" + + " properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkPartitionExist(tbl2, "tp1", false, true); checkPartitionExist(tbl2, "tp2", false, true); @@ -450,7 +458,8 @@ public class TempPartitionTest { TempPartitions tempPartitions = Deencapsulation.getField(tbl2, "tempPartitions"); testSerializeTempPartitions(tempPartitions); - stmtStr = "alter table db2.tbl2 replace partition (tp1, tp2) with temporary partition (p2) properties('strict_range' = 'false');"; + stmtStr = "alter table db2.tbl2 replace partition (tp1, tp2) with temporary partition (p2)" + + " properties('strict_range' = 'false');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db2.tbl2", false, 2); checkShowPartitionsResultNum("db2.tbl2", true, 0); @@ -464,11 +473,14 @@ public class TempPartitionTest { checkTablet("db2.tbl2", "tp3", false, 2); // for now, we have 2 partitions: p2, tp3, [min, 20), [20, 30). 0 temp partition. - stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true') distributed by hash(k1) buckets 3"; + stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') " + + "('in_memory' = 'true') distributed by hash(k1) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') " + + "('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') " + + "('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; alterTable(stmtStr, false); Partition p2 = tbl2.getPartition("p2"); @@ -479,7 +491,8 @@ public class TempPartitionTest { stmtStr = "alter table db2.tbl2 replace partition (p2) with temporary partition (tp4)"; alterTable(stmtStr, false); - // for now, we have 2 partitions: p2, tp3, [min, 20), [20, 30). 0 temp partition. and p2 bucket is 3, 'in_memory' is true. + // for now, we have 2 partitions: p2, tp3, [min, 20), [20, 30). 0 temp partition. + // and p2 bucket is 3, 'in_memory' is true. p2 = tbl2.getPartition("p2"); Assert.assertNotNull(p2); Assert.assertTrue(tbl2.getPartitionInfo().getIsInMemory(p2.getId())); @@ -547,9 +560,11 @@ public class TempPartitionTest { alterTable(stmtStr, false); stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5)"; alterTable(stmtStr, true); - stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5) properties('strict_range' = 'true', 'use_temp_partition_name' = 'true')"; + stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5)" + + " properties('strict_range' = 'true', 'use_temp_partition_name' = 'true')"; alterTable(stmtStr, true); - stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5) properties('strict_range' = 'false', 'use_temp_partition_name' = 'true')"; + stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5)" + + " properties('strict_range' = 'false', 'use_temp_partition_name' = 'true')"; alterTable(stmtStr, false); // now base range is [min, 10), [50, 60) -> p1,tp5 @@ -684,20 +699,23 @@ public class TempPartitionTest { stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp2, tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('invalid' = 'invalid');"; + stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('invalid' = 'invalid');"; alterTable(stmtStr, true); stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp2, tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('use_temp_partition_name' = 'true');"; + stmtStr = "alter table db4.tbl4 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db4.tbl4", true, 1); // tp3 checkShowPartitionsResultNum("db4.tbl4", false, 3); // tp1, tp2, p3 checkTabletExists(tempPartitionTabletIds2.values(), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true); - checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); + checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), + originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db4.tbl4 partition (p3);"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); @@ -745,7 +763,8 @@ public class TempPartitionTest { checkPartitionExist(tbl4, "p2", true, false); checkPartitionExist(tbl4, "p3", true, true); - stmtStr = "alter table db4.tbl4 replace partition(tp3) with temporary partition(p3) properties('use_temp_partition_name' = 'true');"; + stmtStr = "alter table db4.tbl4 replace partition(tp3) with temporary partition(p3)" + + " properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkPartitionExist(tbl4, "tp1", false, true); checkPartitionExist(tbl4, "tp2", false, true); @@ -825,11 +844,14 @@ public class TempPartitionTest { checkTablet("db4.tbl4", "tp3", false, 2); // for now, we have 2 partitions: p2, tp3, ('1', '2', '3', '4', '5', '6'), ('7', '8', '9'). 0 temp partition. - stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6') ('in_memory' = 'true') distributed by hash(k1) buckets 3"; + stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6')" + + " ('in_memory' = 'true') distributed by hash(k1) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6') ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6')" + + " ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6') ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db4.tbl4 add temporary partition tp4 values in ('1', '2', '3', '4', '5', '6')" + + " ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; alterTable(stmtStr, false); Partition p2 = tbl4.getPartition("p2"); @@ -840,7 +862,8 @@ public class TempPartitionTest { stmtStr = "alter table db4.tbl4 replace partition (p2) with temporary partition (tp4)"; alterTable(stmtStr, false); - // for now, we have 2 partitions: p2, tp3, ('1', '2', '3', '4', '5', '6'), ('7', '8', '9'). 0 temp partition. and p2 bucket is 3, 'in_memory' is true. + // for now, we have 2 partitions: p2, tp3, ('1', '2', '3', '4', '5', '6'), + // ('7', '8', '9'). 0 temp partition. and p2 bucket is 3, 'in_memory' is true. p2 = tbl4.getPartition("p2"); Assert.assertNotNull(p2); Assert.assertTrue(tbl4.getPartitionInfo().getIsInMemory(p2.getId())); @@ -881,17 +904,21 @@ public class TempPartitionTest { stmtStr = "alter table db4.tbl4 add temporary partition p32 values in ('9')"; alterTable(stmtStr, false); - stmtStr = "alter table db4.tbl4 replace partition (tp2) with temporary partition (p2) properties('strict_range' = 'true');"; + stmtStr = "alter table db4.tbl4 replace partition (tp2) with temporary partition (p2)" + + " properties('strict_range' = 'true');"; alterTable(stmtStr, true); - stmtStr = "alter table db4.tbl4 replace partition (tp2) with temporary partition (p2) properties('strict_range' = 'false', 'use_temp_partition_name' = 'true');"; + stmtStr = "alter table db4.tbl4 replace partition (tp2) with temporary partition (p2)" + + " properties('strict_range' = 'false', 'use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); - stmtStr = "alter table db4.tbl4 replace partition (tp3) with temporary partition (p31, p32) properties('strict_range' = 'true', 'use_temp_partition_name' = 'true');"; + stmtStr = "alter table db4.tbl4 replace partition (tp3) with temporary partition (p31, p32)" + + " properties('strict_range' = 'true', 'use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); stmtStr = "alter table db4.tbl4 add temporary partition p4 values in ('1', '2', '3', '4')"; alterTable(stmtStr, false); - stmtStr = "alter table db4.tbl4 replace partition (tp1) with temporary partition (p4) properties('strict_range' = 'false');"; + stmtStr = "alter table db4.tbl4 replace partition (tp1) with temporary partition (p4)" + + " properties('strict_range' = 'false');"; alterTable(stmtStr, true); } @@ -934,26 +961,33 @@ public class TempPartitionTest { checkShowPartitionsResultNum("db5.tbl5", false, 3); // add temp partition with duplicate name - String stmtStr = "alter table db5.tbl5 add temporary partition p1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; + String stmtStr = "alter table db5.tbl5 add temporary partition p1 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; alterTable(stmtStr, true); // add temp partition - stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition tp2 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\");"; + stmtStr = "alter table db5.tbl5 add temporary partition tp2 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\");"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in" + + " ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 add temporary partition tp2 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp2 values in" + + " ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in ((\"2\",\"beijing\"), (\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in" + + " ((\"2\",\"beijing\"), (\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in" + + " ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; alterTable(stmtStr, false); Map tempPartitionTabletIds = Maps.newHashMap(); @@ -987,7 +1021,8 @@ public class TempPartitionTest { checkShowPartitionsResultNum("db5.tbl5", true, 2); checkShowPartitionsResultNum("db5.tbl5", false, 3); - stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp3 values in" + + " ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db5.tbl5", true, 3); @@ -1022,20 +1057,23 @@ public class TempPartitionTest { stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp2, tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('invalid' = 'invalid');"; + stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('invalid' = 'invalid');"; alterTable(stmtStr, true); stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp2, tp3);"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('use_temp_partition_name' = 'true');"; + stmtStr = "alter table db5.tbl5 replace partition(p1, p2) with temporary partition(tp1, tp2)" + + " properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db5.tbl5", true, 1); // tp3 checkShowPartitionsResultNum("db5.tbl5", false, 3); // tp1, tp2, p3 checkTabletExists(tempPartitionTabletIds2.values(), true); checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true); - checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false); + checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), + originPartitionTabletIds2.get("p2")), false); String truncateStr = "truncate table db5.tbl5 partition (p3);"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx); @@ -1068,11 +1106,14 @@ public class TempPartitionTest { checkPartitionExist(tbl5, "tp2", false, true); checkPartitionExist(tbl5, "tp3", false, true); - stmtStr = "alter table db5.tbl5 add temporary partition p1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p1 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition p2 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p2 values in" + + " ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition p3 values in ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p3 values in" + + " ((\"3\",\"beijing\"), (\"3\", \"shanghai\"));"; alterTable(stmtStr, false); stmtStr = "alter table db5.tbl5 replace partition(tp1, tp2) with temporary partition(p1, p2);"; alterTable(stmtStr, false); @@ -1083,7 +1124,8 @@ public class TempPartitionTest { checkPartitionExist(tbl5, "p2", true, false); checkPartitionExist(tbl5, "p3", true, true); - stmtStr = "alter table db5.tbl5 replace partition(tp3) with temporary partition(p3) properties('use_temp_partition_name' = 'true');"; + stmtStr = "alter table db5.tbl5 replace partition(tp3) with temporary partition(p3)" + + " properties('use_temp_partition_name' = 'true');"; alterTable(stmtStr, false); checkPartitionExist(tbl5, "tp1", false, true); checkPartitionExist(tbl5, "tp2", false, true); @@ -1094,11 +1136,13 @@ public class TempPartitionTest { checkShowPartitionsResultNum("db5.tbl5", false, 3); checkShowPartitionsResultNum("db5.tbl5", true, 0); - stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; // name conflict + stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; // name conflict alterTable(stmtStr, true); stmtStr = "alter table db5.tbl5 rename partition p3 tp3;"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition p1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p1 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; alterTable(stmtStr, false); originPartitionTabletIds2 = Maps.newHashMap(); @@ -1126,7 +1170,8 @@ public class TempPartitionTest { stmtStr = "alter table db5.tbl5 add rollup r1(k1);"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition p2 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p2 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, true); // wait rollup finish @@ -1143,7 +1188,8 @@ public class TempPartitionTest { // waiting table state to normal Thread.sleep(500); - stmtStr = "alter table db5.tbl5 add temporary partition p2 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition p2 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, false); TempPartitions tempPartitions = Deencapsulation.getField(tbl5, "tempPartitions"); @@ -1162,12 +1208,20 @@ public class TempPartitionTest { checkTablet("db5.tbl5", "p2", false, 2); checkTablet("db5.tbl5", "tp3", false, 2); - // for now, we have 2 partitions: p2, tp3, (("1","beijing"), ("1", "shanghai"), ("2","beijing"), ("2", "shanghai")), ('7', '8', '9'). 0 temp partition. - stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\")) ('in_memory' = 'true') distributed by hash(k1) buckets 3"; + // for now, we have 2 partitions: p2, tp3, + // (("1","beijing"), ("1", "shanghai"), ("2","beijing"), ("2", "shanghai")), ('7', '8', '9'). + // 0 temp partition. + stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"))" + + " ('in_memory' = 'true') distributed by hash(k1) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\")) ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"))" + + " ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3"; alterTable(stmtStr, true); - stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\")) ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; + stmtStr = "alter table db5.tbl5 add temporary partition tp4 values in" + + " ((\"1\",\"beijing\"), (\"1\", \"shanghai\"), (\"2\",\"beijing\"), (\"2\", \"shanghai\"))" + + " ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3"; alterTable(stmtStr, false); Partition p2 = tbl5.getPartition("p2"); @@ -1178,15 +1232,19 @@ public class TempPartitionTest { stmtStr = "alter table db5.tbl5 replace partition (p2) with temporary partition (tp4)"; alterTable(stmtStr, false); - // for now, we have 2 partitions: p2, tp3, (("1","beijing"), ("1", "shanghai"), ("2","beijing"), ("2", "shanghai")), ('7', '8', '9'). 0 temp partition. and p2 bucket is 3, 'in_memory' is true. + // for now, we have 2 partitions: p2, tp3, + // (("1","beijing"), ("1", "shanghai"), ("2","beijing"), ("2", "shanghai")), ('7', '8', '9'). + // 0 temp partition. and p2 bucket is 3, 'in_memory' is true. p2 = tbl5.getPartition("p2"); Assert.assertNotNull(p2); Assert.assertTrue(tbl5.getPartitionInfo().getIsInMemory(p2.getId())); Assert.assertEquals(3, p2.getDistributionInfo().getBucketNum()); - stmtStr = "alter table db5.tbl5 add temporary partition tp1 values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp1" + + " values in ((\"1\",\"beijing\"), (\"1\", \"shanghai\"));"; alterTable(stmtStr, false); - stmtStr = "alter table db5.tbl5 add temporary partition tp2 values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; + stmtStr = "alter table db5.tbl5 add temporary partition tp2" + + " values in ((\"2\",\"beijing\"), (\"2\", \"shanghai\"));"; alterTable(stmtStr, false); checkShowPartitionsResultNum("db5.tbl5", false, 2); @@ -1228,7 +1286,7 @@ public class TempPartitionTest { file.delete(); } - private void testSerializeTempPartitions(TempPartitions tempPartitionsInstance) throws IOException, AnalysisException { + private void testSerializeTempPartitions(TempPartitions tempPartitionsInstance) throws IOException { MetaContext metaContext = new MetaContext(); metaContext.setMetaVersion(FeMetaVersion.VERSION_CURRENT); metaContext.setThreadLocalInfo(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java index 09732b6a5f..9fe66df7fd 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/TruncateTableTest.java @@ -91,7 +91,8 @@ public class TruncateTableTest { long p20211008Id = tbl.getPartition("p20211008").getId(); // truncate p20211008(real name is P20211008) String truncateStr = "TRUNCATE TABLE test.case_sensitive_table PARTITION p20211008; \n"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + TruncateTableStmt truncateTableStmt + = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); Assert.assertNotEquals(p20211008Id, tbl.getPartition("p20211008").getId()); // 2. truncate P20211007 @@ -107,11 +108,14 @@ public class TruncateTableTest { @Test public void testTruncateTable() throws Exception { - String stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210902 VALUES [('2021-09-02'), ('2021-09-03')) DISTRIBUTED BY HASH(`k1`) BUCKETS 3;"; + String stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210902 VALUES [('2021-09-02'), ('2021-09-03'))" + + " DISTRIBUTED BY HASH(`k1`) BUCKETS 3;"; alterTable(stmtStr); - stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210903 VALUES [('2021-09-03'), ('2021-09-04')) DISTRIBUTED BY HASH(`k1`) BUCKETS 4;"; + stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210903 VALUES [('2021-09-03'), ('2021-09-04'))" + + " DISTRIBUTED BY HASH(`k1`) BUCKETS 4;"; alterTable(stmtStr); - stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210904 VALUES [('2021-09-04'), ('2021-09-05')) DISTRIBUTED BY HASH(`k1`) BUCKETS 5;"; + stmtStr = "ALTER TABLE test.tbl ADD PARTITION p20210904 VALUES [('2021-09-04'), ('2021-09-05'))" + + " DISTRIBUTED BY HASH(`k1`) BUCKETS 5;"; alterTable(stmtStr); checkShowTabletResultNum("test.tbl", "p20210901", 2); checkShowTabletResultNum("test.tbl", "p20210902", 3); @@ -119,7 +123,8 @@ public class TruncateTableTest { checkShowTabletResultNum("test.tbl", "p20210904", 5); String truncateStr = "truncate table test.tbl;"; - TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); + TruncateTableStmt truncateTableStmt + = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, connectContext); Catalog.getCurrentCatalog().truncateTable(truncateTableStmt); checkShowTabletResultNum("test.tbl", "p20210901", 2); checkShowTabletResultNum("test.tbl", "p20210902", 3); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java index 2889742187..88f02df4cb 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalancerTestUtil.java @@ -44,7 +44,10 @@ public class RebalancerTestUtil { public static Backend createBackend(long id, long totalCap, long usedCap) { return createBackend(id, totalCap, Lists.newArrayList(usedCap), 1); } - // size of usedCaps should equal to diskNum + + /** + * size of usedCaps should equal to diskNum. + */ public static Backend createBackend(long id, long totalCap, List usedCaps, int diskNum) { // ip:port won't be checked Backend be = new Backend(id, "192.168.0." + id, 9051); @@ -70,6 +73,7 @@ public class RebalancerTestUtil { int tabletId, List beIds) { createTablet(invertedIndex, db, olapTable, partitionName, medium, tabletId, beIds, null); } + public static void createTablet(TabletInvertedIndex invertedIndex, Database db, OlapTable olapTable, String partitionName, TStorageMedium medium, int tabletId, List beIds, List replicaSizes) { Partition partition = olapTable.getPartition(partitionName); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java index f60e5013b6..73385ab046 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java @@ -82,7 +82,7 @@ public class TabletRepairAndBalanceTest { // use a unique dir so that it won't be conflict with other unit test which // may also start a Mocked Frontend private static String runningDirBase = "fe"; - private static String runningDir = runningDirBase + "/mocked/TabletRepairAndBalanceTest/" + UUID.randomUUID().toString() + "/"; + private static String runningDir = runningDirBase + "/mocked/TabletRepairAndBalanceTest/" + UUID.randomUUID() + "/"; private static ConnectContext connectContext; private static Random random = new Random(System.currentTimeMillis()); @@ -264,7 +264,8 @@ public class TabletRepairAndBalanceTest { OlapTable tbl = (OlapTable) db.getTableNullable("tbl1"); // alter table's replica allocation failed, tag not enough - String alterStr = "alter table test.tbl1 set (\"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 3\");"; + String alterStr = "alter table test.tbl1" + + " set (\"replication_allocation\" = \"tag.location.zone1: 2, tag.location.zone2: 3\");"; ExceptionChecker.expectThrows(DdlException.class, () -> alterTable(alterStr)); ReplicaAllocation tblReplicaAlloc = tbl.getDefaultReplicaAllocation(); Assert.assertEquals(3, tblReplicaAlloc.getTotalReplicaNum()); @@ -272,7 +273,8 @@ public class TabletRepairAndBalanceTest { Assert.assertEquals(Short.valueOf((short) 1), tblReplicaAlloc.getReplicaNumByTag(tag2)); // alter partition's replica allocation succeed - String alterStr2 = "alter table test.tbl1 modify partition p1 set (\"replication_allocation\" = \"tag.location.zone1: 1, tag.location.zone2: 2\");"; + String alterStr2 = "alter table test.tbl1 modify partition p1" + + " set (\"replication_allocation\" = \"tag.location.zone1: 1, tag.location.zone2: 2\");"; ExceptionChecker.expectThrowsNoException(() -> alterTable(alterStr2)); Partition p1 = tbl.getPartition("p1"); ReplicaAllocation p1ReplicaAlloc = tbl.getPartitionInfo().getReplicaAllocation(p1.getId()); @@ -395,7 +397,8 @@ public class TabletRepairAndBalanceTest { ExceptionChecker.expectThrowsNoException(() -> alterTable(alterStr3)); // change tbl1's p1's replica allocation to zone1:4, which is forbidden - String alterStr4 = "alter table test.tbl1 modify partition p1 set ('replication_allocation' = 'tag.location.zone1:4')"; + String alterStr4 = "alter table test.tbl1 modify partition p1" + + " set ('replication_allocation' = 'tag.location.zone1:4')"; ExceptionChecker.expectThrows(DdlException.class, () -> alterTable(alterStr4)); // change col_tbl1's default replica allocation to zone2:4, which is allowed @@ -416,7 +419,8 @@ public class TabletRepairAndBalanceTest { Backend backend = backends.get(i); String backendStmt = "alter system modify backend \"" + backend.getHost() + ":" + backend.getHeartbeatPort() + "\" set ('tag.location' = 'default')"; - AlterSystemStmt systemStmt = (AlterSystemStmt) UtFrameUtils.parseAndAnalyzeStmt(backendStmt, connectContext); + AlterSystemStmt systemStmt + = (AlterSystemStmt) UtFrameUtils.parseAndAnalyzeStmt(backendStmt, connectContext); DdlExecutor.execute(Catalog.getCurrentCatalog(), systemStmt); } Assert.assertEquals(Tag.DEFAULT_BACKEND_TAG, backends.get(0).getTag()); @@ -448,13 +452,15 @@ public class TabletRepairAndBalanceTest { ExceptionChecker.expectThrowsNoException(() -> alterTable(alterStr6)); Assert.assertEquals(4, tbl2.getPartitionNames().size()); PartitionInfo partitionInfo = tbl2.getPartitionInfo(); - Assert.assertEquals(ReplicaAllocation.DEFAULT_ALLOCATION, partitionInfo.getReplicaAllocation(tbl2.getPartition("p4").getId())); + Assert.assertEquals(ReplicaAllocation.DEFAULT_ALLOCATION, + partitionInfo.getReplicaAllocation(tbl2.getPartition("p4").getId())); // change tbl2 to a colocate table String alterStr7 = "alter table test.tbl2 SET (\"colocate_with\"=\"newg\")"; ExceptionChecker.expectThrowsNoException(() -> alterTable(alterStr7)); ColocateTableIndex.GroupId groupId1 = colocateTableIndex.getGroup(tbl2.getId()); - Assert.assertEquals(ReplicaAllocation.DEFAULT_ALLOCATION, colocateTableIndex.getGroupSchema(groupId1).getReplicaAlloc()); + Assert.assertEquals(ReplicaAllocation.DEFAULT_ALLOCATION, + colocateTableIndex.getGroupSchema(groupId1).getReplicaAlloc()); // test colocate table index persist ExceptionChecker.expectThrowsNoException(() -> testColocateTableIndexSerialization(colocateTableIndex)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java index 2a929d8f83..b298723ed8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java @@ -137,7 +137,8 @@ public class TabletReplicaTooSlowTest { tabletId = cell.getRowKey(); long beId = cell.getColumnKey(); Backend be = Catalog.getCurrentSystemInfo().getBackend(beId); - List pathHashes = be.getDisks().values().stream().map(DiskInfo::getPathHash).collect(Collectors.toList()); + List pathHashes = be.getDisks().values().stream() + .map(DiskInfo::getPathHash).collect(Collectors.toList()); if (be == null) { continue; } diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java index 539ce997d6..e20392b796 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgoTest.java @@ -42,7 +42,8 @@ import java.util.stream.IntStream; public class TwoDimensionalGreedyRebalanceAlgoTest { private static final Logger LOG = LogManager.getLogger(TwoDimensionalGreedyRebalanceAlgoTest.class); - TwoDimensionalGreedyRebalanceAlgo algo = new TwoDimensionalGreedyRebalanceAlgo(TwoDimensionalGreedyRebalanceAlgo.EqualSkewOption.PICK_FIRST); + TwoDimensionalGreedyRebalanceAlgo algo = new TwoDimensionalGreedyRebalanceAlgo( + TwoDimensionalGreedyRebalanceAlgo.EqualSkewOption.PICK_FIRST); // Structure to describe rebalancing-related state of the cluster expressively // enough for the tests. @@ -112,7 +113,8 @@ public class TwoDimensionalGreedyRebalanceAlgoTest { TestClusterConfig.PartitionPerBeReplicas distribution = tcc.partitionReplicas.get(pIdx); PartitionBalanceInfo info = new PartitionBalanceInfo(distribution.partitionId, distribution.indexId); List replicaCount = distribution.numReplicasByServer; - IntStream.range(0, replicaCount.size()).forEach(i -> info.beByReplicaCount.put(replicaCount.get(i), tcc.beIds.get(i))); + IntStream.range(0, replicaCount.size()) + .forEach(i -> info.beByReplicaCount.put(replicaCount.get(i), tcc.beIds.get(i))); Long maxCount = info.beByReplicaCount.keySet().last(); Long minCount = info.beByReplicaCount.keySet().first(); @@ -142,7 +144,8 @@ public class TwoDimensionalGreedyRebalanceAlgoTest { beByTotalReplicaCount.put(10L, 10001L); beByTotalReplicaCount.put(10L, 10002L); // no info of partition - TreeMultimap skewMap = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); + TreeMultimap skewMap + = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary()); try { TwoDimensionalGreedyRebalanceAlgo.applyMove(move, beByTotalReplicaCount, skewMap); } catch (Exception e) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java index 11fa99415c..0fa7161a5e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/GenericPoolTest.java @@ -239,6 +239,7 @@ public class GenericPoolTest { // TODO Auto-generated method stub return null; } + @Override public void cleanTrash() throws TException { // TODO Auto-generated method stub diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/ThreadPoolManagerTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/ThreadPoolManagerTest.java index c514fb6764..15c62fb191 100755 --- a/fe/fe-core/src/test/java/org/apache/doris/common/ThreadPoolManagerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/ThreadPoolManagerTest.java @@ -40,8 +40,10 @@ public class ThreadPoolManagerTest { List metricList = MetricRepo.getMetricsByName("thread_pool"); Assert.assertEquals(6, metricList.size()); - Assert.assertEquals(ThreadPoolManager.LogDiscardPolicy.class, testCachedPool.getRejectedExecutionHandler().getClass()); - Assert.assertEquals(ThreadPoolManager.BlockedPolicy.class, testFixedThreaddPool.getRejectedExecutionHandler().getClass()); + Assert.assertEquals(ThreadPoolManager.LogDiscardPolicy.class, + testCachedPool.getRejectedExecutionHandler().getClass()); + Assert.assertEquals(ThreadPoolManager.BlockedPolicy.class, + testFixedThreaddPool.getRejectedExecutionHandler().getClass()); Runnable task = () -> { try { diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/URITest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/URITest.java index 073654083c..235880cf63 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/URITest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/URITest.java @@ -35,6 +35,7 @@ public class URITest { Assert.assertEquals(javaURI.getFragment(), myURI.getFragment()); Assert.assertEquals(javaURI.getUserInfo(), myURI.getUserInfo()); } + @Test public void testNormal() throws UserException, URISyntaxException { String str1 = "foo://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose"; diff --git a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java index 840b2a1672..eab29c860d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java +++ b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java @@ -78,7 +78,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -abstract public class DorisHttpTestCase { +public abstract class DorisHttpTestCase { public OkHttpClient networkClient = new OkHttpClient.Builder() .readTimeout(100, TimeUnit.SECONDS) @@ -338,18 +338,22 @@ abstract public class DorisHttpTestCase { SchemaChangeHandler getSchemaChangeHandler() { return new SchemaChangeHandler(); } + @Mock MaterializedViewHandler getMaterializedViewHandler() { return new MaterializedViewHandler(); } + @Mock Catalog getCurrentCatalog() { return catalog; } + @Mock SystemInfoService getCurrentSystemInfo() { return systemInfoService; } + @Mock TabletInvertedIndex getCurrentInvertedIndex() { return tabletInvertedIndex; diff --git a/fe/fe-core/src/test/java/org/apache/doris/http/TableQueryPlanActionTest.java b/fe/fe-core/src/test/java/org/apache/doris/http/TableQueryPlanActionTest.java index d8a76fb5ef..581fdfd204 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/http/TableQueryPlanActionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/http/TableQueryPlanActionTest.java @@ -45,9 +45,11 @@ public class TableQueryPlanActionTest extends DorisHttpTestCase { super.setUp(); ES_TABLE_URL = "http://localhost:" + HTTP_PORT + "/api/" + DB_NAME + "/es_table"; } + @Test public void testQueryPlanAction() throws IOException, TException { - RequestBody body = RequestBody.create("{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + " \" }", JSON); + RequestBody body = RequestBody.create( + "{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + " \" }", JSON); Request request = new Request.Builder() .post(body) .addHeader("Authorization", rootAuth) @@ -117,7 +119,8 @@ public class TableQueryPlanActionTest extends DorisHttpTestCase { @Test public void testInconsistentResource() throws IOException { - RequestBody body = RequestBody.create(JSON, "{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + 1 + " \" }"); + RequestBody body = RequestBody.create(JSON, + "{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + 1 + " \" }"); Request request = new Request.Builder() .post(body) .addHeader("Authorization", rootAuth) @@ -136,7 +139,8 @@ public class TableQueryPlanActionTest extends DorisHttpTestCase { @Test public void testMalformedJson() throws IOException { - RequestBody body = RequestBody.create(JSON, "{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + " \""); + RequestBody body = RequestBody.create(JSON, + "{ \"sql\" : \" select k1,k2 from " + DB_NAME + "." + TABLE_NAME + " \""); Request request = new Request.Builder() .post(body) .addHeader("Authorization", rootAuth) @@ -154,7 +158,8 @@ public class TableQueryPlanActionTest extends DorisHttpTestCase { @Test public void testNotOlapTableFailure() throws IOException { - RequestBody body = RequestBody.create("{ \"sql\" : \" select k1,k2 from " + DB_NAME + ".es_table" + " \" }", JSON); + RequestBody body = RequestBody.create( + "{ \"sql\" : \" select k1,k2 from " + DB_NAME + ".es_table" + " \" }", JSON); Request request = new Request.Builder() .post(body) .addHeader("Authorization", rootAuth) diff --git a/fe/fe-core/src/test/java/org/apache/doris/ldap/LdapPrivsCheckerTest.java b/fe/fe-core/src/test/java/org/apache/doris/ldap/LdapPrivsCheckerTest.java index 6087ea9ed0..6e705ab6b1 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/ldap/LdapPrivsCheckerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/ldap/LdapPrivsCheckerTest.java @@ -168,10 +168,10 @@ public class LdapPrivsCheckerTest { @Test public void testIsCurrentUser() { Assert.assertTrue(LdapPrivsChecker.isCurrentUser(userIdent)); - Assert.assertFalse(LdapPrivsChecker.isCurrentUser(UserIdentity. - createAnalyzedUserIdentWithIp("default_cluster:lisi", IP))); - Assert.assertFalse(LdapPrivsChecker.isCurrentUser(UserIdentity. - createAnalyzedUserIdentWithIp(USER, "127.0.0.1"))); + Assert.assertFalse(LdapPrivsChecker.isCurrentUser( + UserIdentity.createAnalyzedUserIdentWithIp("default_cluster:lisi", IP))); + Assert.assertFalse(LdapPrivsChecker.isCurrentUser( + UserIdentity.createAnalyzedUserIdentWithIp(USER, "127.0.0.1"))); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java index b050b633f1..e9c80d8260 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/DeleteHandlerTest.java @@ -121,6 +121,7 @@ public class DeleteHandlerTest { @Mock public void logSaveTransactionId(long transactionId) { } + @Mock public void logInsertTransactionState(TransactionState transactionState) { } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/TabletLoadInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/TabletLoadInfoTest.java index dcd97936ea..40f5b922e5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/TabletLoadInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/TabletLoadInfoTest.java @@ -31,6 +31,7 @@ import java.io.FileOutputStream; public class TabletLoadInfoTest { private FakeCatalog fakeCatalog; + @Test public void testSerialization() throws Exception { // mock catalog diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java index c062353898..01b385be2c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/loadv2/SparkRepositoryTest.java @@ -40,12 +40,12 @@ import java.util.List; public class SparkRepositoryTest { private SparkRepository repository; - private final static String DPP_LOCAL_MD5SUM = "b3cd0ae3a4121e2426532484442e90ec"; - private final static String SPARK_LOCAL_MD5SUM = "6d2b052ffbdf7082c019bd202432739c"; - private final static String DPP_VERSION = Config.spark_dpp_version; - private final static String SPARK_LOAD_WORK_DIR = "hdfs://127.0.0.1/99999/user/doris/etl"; - private final static String DPP_NAME = SparkRepository.SPARK_DPP + ".jar"; - private final static String SPARK_NAME = SparkRepository.SPARK_2X + ".zip"; + private static final String DPP_LOCAL_MD5SUM = "b3cd0ae3a4121e2426532484442e90ec"; + private static final String SPARK_LOCAL_MD5SUM = "6d2b052ffbdf7082c019bd202432739c"; + private static final String DPP_VERSION = Config.spark_dpp_version; + private static final String SPARK_LOAD_WORK_DIR = "hdfs://127.0.0.1/99999/user/doris/etl"; + private static final String DPP_NAME = SparkRepository.SPARK_DPP + ".jar"; + private static final String SPARK_NAME = SparkRepository.SPARK_2X + ".zip"; private String remoteRepoPath; private String remoteArchivePath; diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncDataTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncDataTest.java index 6864d1a016..31a650a1c7 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncDataTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncDataTest.java @@ -175,21 +175,26 @@ public class CanalSyncDataTest { @Mock void connect() throws CanalClientException { } + @Mock void disconnect() throws CanalClientException { } + @Mock Message getWithoutAck(int var1, Long var2, TimeUnit var3) throws CanalClientException { offset += batchSize * 1; // Simply set one entry as one byte return CanalTestUtil.fetchMessage( ++nextId, false, batchSize, binlogFile, offset, "mysql_db", "mysql_tbl"); } + @Mock void rollback() throws CanalClientException { } + @Mock void ack(long var1) throws CanalClientException { } + @Mock void subscribe(String var1) throws CanalClientException { } diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncJobTest.java index 4dc7eeb909..3995839fef 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/sync/canal/CanalSyncJobTest.java @@ -139,9 +139,11 @@ public class CanalSyncJobTest { @Mock public void startup() { } + @Mock public void shutdown(boolean needCleanUp) { } + @Mock public void registerChannels(List channels) { } @@ -300,9 +302,11 @@ public class CanalSyncJobTest { @Mock public void startup() { } + @Mock public void shutdown(boolean needCleanUp) { } + @Mock public void registerChannels(List channels) { } diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/BatchModifyPartitionsInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/BatchModifyPartitionsInfoTest.java index 9941079ada..c9e5d2a993 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/BatchModifyPartitionsInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/BatchModifyPartitionsInfoTest.java @@ -35,13 +35,13 @@ import java.io.IOException; import java.util.List; public class BatchModifyPartitionsInfoTest { - private final static String FILE_NAME = "./BatchModifyPartitionsInfoTest"; + private static final String FILE_NAME = "./BatchModifyPartitionsInfoTest"; - private final static long DB_ID = 10000L; - private final static long TB_ID = 30000L; - private final static long PARTITION_ID_1 = 40000L; - private final static long PARTITION_ID_2 = 40001L; - private final static long PARTITION_ID_3 = 40002L; + private static final long DB_ID = 10000L; + private static final long TB_ID = 30000L; + private static final long PARTITION_ID_1 = 40000L; + private static final long PARTITION_ID_2 = 40001L; + private static final long PARTITION_ID_3 = 40002L; @After public void tearDown() { diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java index 234a650ced..3abf6fff41 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java @@ -68,8 +68,8 @@ public class ColocatePlanTest { String createMultiPartitionTableStmt = "create table db1.test_multi_partition(k1 int, k2 int)" + "partition by range(k1) (partition p1 values less than(\"1\"), partition p2 values less than (\"2\"))" + "distributed by hash(k2) buckets 10 properties ('replication_num' = '2', 'colocate_with' = 'group2')"; - CreateTableStmt createMultiTableStmt = (CreateTableStmt) UtFrameUtils. - parseAndAnalyzeStmt(createMultiPartitionTableStmt, ctx); + CreateTableStmt createMultiTableStmt = (CreateTableStmt) UtFrameUtils + .parseAndAnalyzeStmt(createMultiPartitionTableStmt, ctx); Catalog.getCurrentCatalog().createTable(createMultiTableStmt); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java index 295037dfb6..2924e820f9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/RuntimeFilterGeneratorTest.java @@ -102,10 +102,10 @@ public class RuntimeFilterGeneratorTest { BinaryPredicate eqJoinConjunct = new BinaryPredicate(BinaryPredicate.Operator.EQ, lhsExpr, rhsExpr); testJoinExprs.add(eqJoinConjunct); - hashJoinNode = new HashJoinNode(new PlanNodeId(2), lhsScanNode, rhsScanNode, tableRef, testJoinExprs - , new ArrayList<>()); - testPlanFragment = new PlanFragment(new PlanFragmentId(0), hashJoinNode - , new DataPartition(TPartitionType.UNPARTITIONED)); + hashJoinNode = new HashJoinNode(new PlanNodeId(2), lhsScanNode, rhsScanNode, tableRef, testJoinExprs, + new ArrayList<>()); + testPlanFragment = new PlanFragment(new PlanFragmentId(0), hashJoinNode, + new DataPartition(TPartitionType.UNPARTITIONED)); hashJoinNode.setFragment(testPlanFragment); lhsScanNode.setFragment(testPlanFragment); rhsScanNode.setFragment(testPlanFragment); @@ -245,10 +245,10 @@ public class RuntimeFilterGeneratorTest { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[in] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[in] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 1); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 1); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 1); @@ -263,10 +263,10 @@ public class RuntimeFilterGeneratorTest { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[bloom] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[bloom] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 1); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 1); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 1); @@ -301,10 +301,10 @@ public class RuntimeFilterGeneratorTest { } }; RuntimeFilterGenerator.generateRuntimeFilters(analyzer, hashJoinNode); - Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true) - , "RF000[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); - Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false) - , "RF000[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); + Assert.assertEquals(hashJoinNode.getRuntimeFilterExplainString(true), + "RF000[min_max] <- `default_cluster:test_db`.`test_rhs_tbl`.`test_rhs_col`\n"); + Assert.assertEquals(lhsScanNode.getRuntimeFilterExplainString(false), + "RF000[min_max] -> `default_cluster:test_db`.`test_lhs_tbl`.`test_lhs_col`\n"); Assert.assertEquals(testPlanFragment.getTargetRuntimeFilterIds().size(), 1); Assert.assertEquals(testPlanFragment.getBuilderRuntimeFilterIds().size(), 1); Assert.assertEquals(analyzer.getAssignedRuntimeFilter().size(), 1); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java index 7ffc14dd06..f9ff71a4cf 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/StreamLoadScanNodeTest.java @@ -167,8 +167,7 @@ public class StreamLoadScanNodeTest { private StreamLoadScanNode getStreamLoadScanNode(TupleDescriptor dstDesc, TStreamLoadPutRequest request) throws UserException { StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request); - StreamLoadScanNode scanNode = new StreamLoadScanNode(streamLoadTask.getId(), new PlanNodeId(1), dstDesc, dstTable, streamLoadTask); - return scanNode; + return new StreamLoadScanNode(streamLoadTask.getId(), new PlanNodeId(1), dstDesc, dstTable, streamLoadTask); } @Test @@ -322,7 +321,8 @@ public class StreamLoadScanNodeTest { new Expectations() { { catalog.getFunction((Function) any, (Function.CompareMode) any); - result = new ScalarFunction(new FunctionName(FunctionSet.HLL_HASH), Lists.newArrayList(), Type.BIGINT, false, true); + result = new ScalarFunction(new FunctionName(FunctionSet.HLL_HASH), + Lists.newArrayList(), Type.BIGINT, false, true); dstTable.getColumn("k1"); result = columns.stream().filter(c -> c.getName().equals("k1")).findFirst().get(); @@ -365,7 +365,8 @@ public class StreamLoadScanNodeTest { new Expectations() { { catalog.getFunction((Function) any, (Function.CompareMode) any); - result = new ScalarFunction(new FunctionName("hll_hash1"), Lists.newArrayList(), Type.BIGINT, false, true); + result = new ScalarFunction(new FunctionName("hll_hash1"), Lists.newArrayList(), + Type.BIGINT, false, true); minTimes = 0; } }; @@ -581,8 +582,8 @@ public class StreamLoadScanNodeTest { request.setColumns("k1,k2,v1, v2=k2"); request.setWhere("k1 1"); StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request); - StreamLoadScanNode scanNode = new StreamLoadScanNode(streamLoadTask.getId(), new PlanNodeId(1), dstDesc, dstTable, - streamLoadTask); + StreamLoadScanNode scanNode = new StreamLoadScanNode(streamLoadTask.getId(), new PlanNodeId(1), + dstDesc, dstTable, streamLoadTask); scanNode.init(analyzer); scanNode.finalize(analyzer); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java index ce14f2b2ac..1bc7e2a077 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java @@ -479,7 +479,8 @@ public class TableFunctionPlanTest { */ @Test public void aggColumnInOuterQuery() throws Exception { - String sql = "desc verbose select min(c1) from (select c1 from (select k1 as c1, min(k2) as c2 from db1.tbl1 group by c1) a " + String sql = "desc verbose select min(c1) from (select c1 from" + + " (select k1 as c1, min(k2) as c2 from db1.tbl1 group by c1) a " + "lateral view explode_split(c2, \",\") tmp1 as e1) tmp2"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 2, "TABLE FUNCTION NODE")); @@ -492,7 +493,8 @@ public class TableFunctionPlanTest { @Test public void testLateralViewWithView() throws Exception { // test 1 - String createViewStr = "create view db1.v1 (k1,e1) as select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp as e1;"; + String createViewStr = "create view db1.v1 (k1,e1) as select k1,e1" + + " from db1.table_for_view lateral view explode_split(k3,',') tmp as e1;"; CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseAndAnalyzeStmt(createViewStr, ctx); Catalog.getCurrentCatalog().createView(createViewStmt); @@ -506,7 +508,8 @@ public class TableFunctionPlanTest { @Test public void testLateralViewWithWhere() throws Exception { - String sql = "select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp as e1 where k1 in (select k2 from db1.table_for_view);"; + String sql = "select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp as e1" + + " where k1 in (select k2 from db1.table_for_view);"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); Assert.assertTrue(explainString.contains("join op: LEFT SEMI JOIN(BROADCAST)")); Assert.assertTrue(explainString.contains("equal join conjunct: `k1` = `k2`")); @@ -515,16 +518,19 @@ public class TableFunctionPlanTest { @Test public void testLateralViewWithCTE() throws Exception { - String sql = "with tmp as (select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp2 as e1) select * from tmp;"; + String sql = "with tmp as (select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp2 as e1)" + + " select * from tmp;"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("table function: explode_split(`default_cluster:db1`.`table_for_view`.`k3`, ',') ")); + Assert.assertTrue(explainString.contains("table function:" + + " explode_split(`default_cluster:db1`.`table_for_view`.`k3`, ',') ")); } @Test public void testLateralViewWithCTEBug() throws Exception { - String sql = "with tmp as (select * from db1.table_for_view where k2=1) select k1,e1 from tmp lateral view explode_split(k3,',') tmp2 as e1;"; + String sql = "with tmp as (select * from db1.table_for_view where k2=1)" + + " select k1,e1 from tmp lateral view explode_split(k3,',') tmp2 as e1;"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(!explainString.contains("Unknown column 'e1' in 'table list'")); + Assert.assertFalse(explainString.contains("Unknown column 'e1' in 'table list'")); } @Test @@ -535,7 +541,7 @@ public class TableFunctionPlanTest { Catalog.getCurrentCatalog().createView(createViewStmt); String sql = "select k1,e1 from db1.v2 lateral view explode_split(k3,',') tmp as e1;"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(!explainString.contains("Unknown column 'e1' in 'table list'")); + Assert.assertFalse(explainString.contains("Unknown column 'e1' in 'table list'")); } @@ -546,6 +552,7 @@ public class TableFunctionPlanTest { String sql = "with d as (select k1+k1 as k1 from db1.table_for_view ) " + "select k1 from d lateral view explode_split(k1,',') tmp as e1;"; String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(!explainString.contains("Unexpected exception: org.apache.doris.analysis.FunctionCallExpr cannot be cast to org.apache.doris.analysis.SlotRef")); + Assert.assertFalse(explainString.contains("Unexpected exception: org.apache.doris.analysis.FunctionCallExpr" + + " cannot be cast to org.apache.doris.analysis.SlotRef")); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java index 55cbbb0cb9..ef7b588790 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ConnectProcessorTest.java @@ -159,22 +159,27 @@ public class ConnectProcessorTest { public void setKilled() { myContext.setKilled(); } + @Override public MysqlSerializer getSerializer() { return myContext.getSerializer(); } + @Override public QueryState getState() { return myContext.getState(); } + @Override public void setStartTime() { myContext.setStartTime(); } + @Override public String getDatabase() { return myContext.getDatabase(); } + @Override public void setCommand(MysqlCommand command) { if (firstTimeToSetCommand) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java index 67def92ade..8a079f6390 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/CoordinatorTest.java @@ -65,10 +65,12 @@ import java.util.Set; public class CoordinatorTest extends Coordinator { static Planner planner = new Planner(); static ConnectContext context = new ConnectContext(null); + static { context.setQueryId(new TUniqueId(1, 2)); context.setQualifiedUser("root"); } + @Mocked static Catalog catalog; @Mocked @@ -269,8 +271,8 @@ public class CoordinatorTest extends Coordinator { Map> fragmentIdToBuckendIdBucketCountMap = Deencapsulation.getField(bucketShuffleJoinController, "fragmentIdToBuckendIdBucketCountMap"); - long targetBeCount = fragmentIdToBuckendIdBucketCountMap.values(). - stream().flatMap(buckend2BucketCountMap -> buckend2BucketCountMap.values().stream()) + long targetBeCount = fragmentIdToBuckendIdBucketCountMap.values() + .stream().flatMap(buckend2BucketCountMap -> buckend2BucketCountMap.values().stream()) .filter(count -> count == 22).count(); Assert.assertEquals(targetBeCount, 3); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/MultiLoadMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/MultiLoadMgrTest.java index f5058b70c3..da74e4fbb2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/MultiLoadMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/MultiLoadMgrTest.java @@ -40,6 +40,7 @@ public class MultiLoadMgrTest { private ConnectContext context; @Mocked private SystemInfoService systemInfoService; + @Before public void setUp() throws Exception { new Expectations() { diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java index b1eee5ead3..eb2db677e3 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java @@ -297,6 +297,7 @@ public class ShowExecutorTest { Catalog getCurrentCatalog() { return catalog; } + @Mock SystemInfoService getCurrentSystemInfo() { return clusterInfo; diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowResultSetTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowResultSetTest.java index e9493cf8cd..fd731920f6 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowResultSetTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowResultSetTest.java @@ -27,6 +27,7 @@ import java.util.List; public class ShowResultSetTest { @Mocked ShowResultSetMetaData metaData; + @Test public void testNormal() { List> rows = Lists.newArrayList(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java index 945c0fec5a..5b371995de 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java @@ -246,7 +246,8 @@ public class StmtExecutorTest { } @Test - public void testShow(@Mocked ShowStmt showStmt, @Mocked SqlParser parser, @Mocked ShowExecutor executor) throws Exception { + public void testShow(@Mocked ShowStmt showStmt, @Mocked SqlParser parser, + @Mocked ShowExecutor executor) throws Exception { new Expectations() { { showStmt.analyze((Analyzer) any); @@ -281,7 +282,8 @@ public class StmtExecutorTest { } @Test - public void testShowNull(@Mocked ShowStmt showStmt, @Mocked SqlParser parser, @Mocked ShowExecutor executor) throws Exception { + public void testShowNull(@Mocked ShowStmt showStmt, @Mocked SqlParser parser, + @Mocked ShowExecutor executor) throws Exception { new Expectations() { { showStmt.analyze((Analyzer) any); @@ -352,7 +354,8 @@ public class StmtExecutorTest { } @Test - public void testKillOtherFail(@Mocked KillStmt killStmt, @Mocked SqlParser parser, @Mocked ConnectContext killCtx) throws Exception { + public void testKillOtherFail(@Mocked KillStmt killStmt, @Mocked SqlParser parser, + @Mocked ConnectContext killCtx) throws Exception { Catalog killCatalog = AccessTestUtil.fetchAdminCatalog(); new Expectations() { @@ -409,7 +412,8 @@ public class StmtExecutorTest { } @Test - public void testKillOther(@Mocked KillStmt killStmt, @Mocked SqlParser parser, @Mocked ConnectContext killCtx) throws Exception { + public void testKillOther(@Mocked KillStmt killStmt, @Mocked SqlParser parser, + @Mocked ConnectContext killCtx) throws Exception { Catalog killCatalog = AccessTestUtil.fetchAdminCatalog(); new Expectations() { { @@ -500,7 +504,8 @@ public class StmtExecutorTest { } @Test - public void testSet(@Mocked SetStmt setStmt, @Mocked SqlParser parser, @Mocked SetExecutor executor) throws Exception { + public void testSet(@Mocked SetStmt setStmt, @Mocked SqlParser parser, + @Mocked SetExecutor executor) throws Exception { new Expectations() { { setStmt.analyze((Analyzer) any); @@ -538,7 +543,8 @@ public class StmtExecutorTest { } @Test - public void testSetFail(@Mocked SetStmt setStmt, @Mocked SqlParser parser, @Mocked SetExecutor executor) throws Exception { + public void testSetFail(@Mocked SetStmt setStmt, @Mocked SqlParser parser, + @Mocked SetExecutor executor) throws Exception { new Expectations() { { setStmt.analyze((Analyzer) any); diff --git a/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java b/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java index c4443ba747..f96ef1712c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/resource/TagSerializationTest.java @@ -94,8 +94,10 @@ public class TagSerializationTest { DataOutputStream out = new DataOutputStream(new FileOutputStream(file)); TagManager tagManager = new TagManager(); - tagManager.addResourceTag(1L, Tag.create(Tag.TYPE_LOCATION, "rack1")); - tagManager.addResourceTags(2L, TagSet.create(Tag.create(Tag.TYPE_LOCATION, "rack1"), Tag.create(Tag.TYPE_LOCATION, "rack2"))); + tagManager.addResourceTag(1L, + Tag.create(Tag.TYPE_LOCATION, "rack1")); + tagManager.addResourceTags(2L, + TagSet.create(Tag.create(Tag.TYPE_LOCATION, "rack1"), Tag.create(Tag.TYPE_LOCATION, "rack2"))); tagManager.write(out); out.flush(); out.close(); @@ -104,8 +106,10 @@ public class TagSerializationTest { DataInputStream in = new DataInputStream(new FileInputStream(file)); TagManager readTagManager = TagManager.read(in); - Assert.assertEquals(Sets.newHashSet(1L, 2L), readTagManager.getResourceIdsByTag(Tag.create(Tag.TYPE_LOCATION, "rack1"))); - Assert.assertEquals(Sets.newHashSet(2L), readTagManager.getResourceIdsByTags(TagSet.create(Tag.create(Tag.TYPE_LOCATION, "rack2")))); + Assert.assertEquals(Sets.newHashSet(1L, 2L), + readTagManager.getResourceIdsByTag(Tag.create(Tag.TYPE_LOCATION, "rack1"))); + Assert.assertEquals(Sets.newHashSet(2L), + readTagManager.getResourceIdsByTags(TagSet.create(Tag.create(Tag.TYPE_LOCATION, "rack2")))); in.close(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java index 20baf3d39e..e0f27e74a8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/FEFunctionsTest.java @@ -245,6 +245,7 @@ public class FEFunctionsTest { expectedResult = new IntLiteral(1970, Type.INT); Assert.assertEquals(expectedResult, actualResult); } + @Test public void monthTest() throws AnalysisException { IntLiteral actualResult = FEFunctions.month(new DateLiteral("2018-08-08", Type.DATE)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java index b71b36452f..ab132491d0 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java @@ -130,6 +130,7 @@ public class InferFiltersRuleTest { String planString = dorisAssert.query(query).explainQuery(); Assert.assertFalse(planString.contains("`tb1`.`k1` = 1")); } + @Test public void testOn2TablesLeftAntiJoinEqLiteralAt1st() throws Exception { SessionVariable sessionVariable = dorisAssert.getSessionVariable(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java b/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java index ebd38858fb..3c2370dd88 100755 --- a/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/service/ExecuteEnvTest.java @@ -47,21 +47,21 @@ public class ExecuteEnvTest { Assert.assertEquals(oids[i - 1], oids[i]); } } -} -class MyTest implements Runnable { - public int index; - public int[] oids; + static class MyTest implements Runnable { + public int index; + public int[] oids; - MyTest(int index, int[] oids) { - this.index = index; - this.oids = oids; - } + MyTest(int index, int[] oids) { + this.index = index; + this.oids = oids; + } - @Override - public void run() { - ExecuteEnv instance = ExecuteEnv.getInstance(); - int oid = instance.hashCode(); - oids[index] = oid; + @Override + public void run() { + ExecuteEnv instance = ExecuteEnv.getInstance(); + int oid = instance.hashCode(); + oids[index] = oid; + } } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java index 2bd2a2ffc9..e8a5aa9bf2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java @@ -158,20 +158,23 @@ public class SystemInfoServiceTest { be4.setTag(tagb); be5.setTag(tagb); - BeSelectionPolicy policy7 = new BeSelectionPolicy.Builder().needQueryAvailable().addTags(Sets.newHashSet(taga)).build(); + BeSelectionPolicy policy7 = new BeSelectionPolicy.Builder().needQueryAvailable() + .addTags(Sets.newHashSet(taga)).build(); Assert.assertEquals(1, infoService.selectBackendIdsByPolicy(policy7, 1).size()); Assert.assertEquals(2, infoService.selectBackendIdsByPolicy(policy7, 2).size()); Assert.assertTrue(infoService.selectBackendIdsByPolicy(policy7, 2).contains(10001L)); Assert.assertTrue(infoService.selectBackendIdsByPolicy(policy7, 2).contains(10002L)); Assert.assertEquals(0, infoService.selectBackendIdsByPolicy(policy7, 3).size()); - BeSelectionPolicy policy8 = new BeSelectionPolicy.Builder().needQueryAvailable().addTags(Sets.newHashSet(tagb)).build(); + BeSelectionPolicy policy8 = new BeSelectionPolicy.Builder() + .needQueryAvailable().addTags(Sets.newHashSet(tagb)).build(); Assert.assertEquals(3, infoService.selectBackendIdsByPolicy(policy8, 3).size()); Assert.assertTrue(infoService.selectBackendIdsByPolicy(policy8, 3).contains(10003L)); Assert.assertTrue(infoService.selectBackendIdsByPolicy(policy8, 3).contains(10004L)); Assert.assertTrue(infoService.selectBackendIdsByPolicy(policy8, 3).contains(10005L)); - BeSelectionPolicy policy9 = new BeSelectionPolicy.Builder().needQueryAvailable().addTags(Sets.newHashSet(taga, tagb)).build(); + BeSelectionPolicy policy9 = new BeSelectionPolicy.Builder().needQueryAvailable() + .addTags(Sets.newHashSet(taga, tagb)).build(); Assert.assertEquals(5, infoService.selectBackendIdsByPolicy(policy9, 5).size()); // 6. check storage medium diff --git a/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java b/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java index 3fe5d476b3..0cb1a8d4f8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/task/AgentTaskTest.java @@ -97,7 +97,8 @@ public class AgentTaskTest { columns.add(new Column("v1", ScalarType.createType(PrimitiveType.INT), false, AggregateType.SUM, "1", "")); PartitionKey pk1 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), false); - PartitionKey pk2 = PartitionKey.createPartitionKey(Arrays.asList(new PartitionValue("10")), Arrays.asList(columns.get(0))); + PartitionKey pk2 = PartitionKey.createPartitionKey( + Arrays.asList(new PartitionValue("10")), Arrays.asList(columns.get(0))); range1 = Range.closedOpen(pk1, pk2); PartitionKey pk3 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), true); diff --git a/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java b/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java index bb90912c58..794b4a4b0c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/task/SerialExecutorServiceTest.java @@ -122,6 +122,7 @@ public class SerialExecutorServiceTest { @Override public void onFinished(long channelId) { } + @Override public void onFailed(String errMsg) { } diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java index d93bce3237..6a184f36ab 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/MockedBackendFactory.java @@ -126,7 +126,7 @@ public class MockedBackendFactory { // abstract BeThriftService. // User can extends this abstract class to create other custom be thrift service - public static abstract class BeThriftService implements BackendService.Iface { + public abstract static class BeThriftService implements BackendService.Iface { protected MockedBackend backend; public void setBackend(MockedBackend backend) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java index 37a8285003..9bbf6278b7 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java @@ -192,9 +192,11 @@ public class UtFrameUtils { // Create multi backends with different host for unit test. // the host of BE will be "127.0.0.1", "127.0.0.2" - public static void createDorisClusterWithMultiTag(String runningDir, int backendNum) throws EnvVarNotSetException, IOException, - FeStartException, NotInitException, DdlException, InterruptedException { - // set runningUnitTest to true, so that for ut, the agent task will be send to "127.0.0.1" to make cluster running well. + public static void createDorisClusterWithMultiTag(String runningDir, int backendNum) + throws EnvVarNotSetException, IOException, FeStartException, + NotInitException, DdlException, InterruptedException { + // set runningUnitTest to true, so that for ut, + // the agent task will be sent to "127.0.0.1" to make cluster running well. FeConstants.runningUnitTest = true; int feRpcPort = startFEServer(runningDir); for (int i = 0; i < backendNum; i++) { @@ -220,7 +222,8 @@ public class UtFrameUtils { backend.start(); // add be - Backend be = new Backend(Catalog.getCurrentCatalog().getNextId(), backend.getHost(), backend.getHeartbeatPort()); + Backend be = new Backend(Catalog.getCurrentCatalog().getNextId(), + backend.getHost(), backend.getHeartbeatPort()); Map disks = Maps.newHashMap(); DiskInfo diskInfo1 = new DiskInfo("/path" + be.getId()); diskInfo1.setTotalCapacityB(1000000); diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java index 8e32c82e3f..413f3e15b1 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapAndUDF.java @@ -30,7 +30,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn import java.io.IOException; -@Description(name = "bitmap_and", value = "a _FUNC_ b - Compute intersection of two or more input bitmaps, return the new bitmap") +@Description(name = "bitmap_and", value = "a _FUNC_ b - Compute intersection of two or more input bitmaps," + + " return the new bitmap") public class BitmapAndUDF extends GenericUDF { private transient BinaryObjectInspector inputOI0; diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java index 2d718433e8..a58041bf5d 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapCountUDF.java @@ -30,7 +30,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn import java.io.IOException; -@Description(name = "bitmap_count", value = "a _FUNC_ b - Returns the number of distinct integers added to the bitmap (e.g., number of bits set)") +@Description(name = "bitmap_count", value = "a _FUNC_ b - Returns the number of distinct integers" + + " added to the bitmap (e.g., number of bits set)") public class BitmapCountUDF extends GenericUDF { private transient BinaryObjectInspector inputOI; diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java index a15d3f0faf..2bc5910ed0 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapOrUDF.java @@ -30,7 +30,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn import java.io.IOException; -@Description(name = "bitmap_or", value = "a _FUNC_ b - Compute union of two or more input bitmaps, returns the new bitmap") +@Description(name = "bitmap_or", value = "a _FUNC_ b - Compute" + + " union of two or more input bitmaps, returns the new bitmap") public class BitmapOrUDF extends GenericUDF { private transient BinaryObjectInspector inputOI0; diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java index 0289c28da5..6f37925564 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapUnionUDAF.java @@ -37,7 +37,8 @@ import java.io.IOException; * bitmap_union. * */ -@Description(name = "bitmap_union", value = "_FUNC_(expr) - Calculate the grouped bitmap union , Returns an doris bitmap representation of a column.") +@Description(name = "bitmap_union", value = "_FUNC_(expr) - Calculate the grouped bitmap" + + " union , Returns an doris bitmap representation of a column.") public class BitmapUnionUDAF extends AbstractGenericUDAFResolver { @Override diff --git a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java index cb9eebc9b2..9096c3cf9d 100644 --- a/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java +++ b/fe/hive-udf/src/main/java/org/apache/doris/udf/BitmapXorUDF.java @@ -30,7 +30,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn import java.io.IOException; -@Description(name = "bitmap_xor", value = "a _FUNC_ b - Compute the symmetric union of two or more input bitmaps, return the new bitmap") +@Description(name = "bitmap_xor", value = "a _FUNC_ b - Compute the symmetric" + + " union of two or more input bitmaps, return the new bitmap") public class BitmapXorUDF extends GenericUDF { private transient BinaryObjectInspector inputOI0; diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java b/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java index 8997dcd85a..fea9bce2dc 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/JMXJsonUtil.java @@ -164,7 +164,7 @@ public class JMXJsonUtil { jg.writeStartObject(); jg.writeStringField("name", oname.toString()); jg.writeStringField("modelerType", code); - MBeanAttributeInfo attrs[] = minfo.getAttributes(); + MBeanAttributeInfo[] attrs = minfo.getAttributes(); for (int i = 0; i < attrs.length; i++) { writeAttribute(jg, oname, attrs[i]); } diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java b/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java index b7e24116a1..6510e9b80f 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/JniUtil.java @@ -51,8 +51,7 @@ import java.util.Map; * Utility class with methods intended for JNI clients */ public class JniUtil { - private final static TBinaryProtocol.Factory protocolFactory_ = - new TBinaryProtocol.Factory(); + private static final TBinaryProtocol.Factory protocolFactory_ = new TBinaryProtocol.Factory(); /** * Initializes the JvmPauseMonitor instance. diff --git a/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java b/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java index 6ab54339ef..2eed3f0221 100644 --- a/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java +++ b/fe/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java @@ -49,7 +49,7 @@ public class UdfExecutor { public static final String UDF_FUNCTION_NAME = "evaluate"; // Object to deserialize ctor params from BE. - private final static TBinaryProtocol.Factory PROTOCOL_FACTORY = + private static final TBinaryProtocol.Factory PROTOCOL_FACTORY = new TBinaryProtocol.Factory(); private Object udf; @@ -206,13 +206,11 @@ public class UdfExecutor { init(jarFile, className, retType, parameterTypes); } - // CHECKSTYLE OFF @Override protected void finalize() throws Throwable { close(); super.finalize(); } - // CHECKSTYLE ON /** * Close the class loader we may have created. @@ -311,7 +309,8 @@ public class UdfExecutor { return false; } outputOffset += 1; - UdfUtils.UNSAFE.putChar(null, UdfUtils.UNSAFE.getLong(null, outputBufferPtr) + outputOffset - 1, UdfUtils.END_OF_STRING); + UdfUtils.UNSAFE.putChar(null, UdfUtils.UNSAFE.getLong(null, outputBufferPtr) + + outputOffset - 1, UdfUtils.END_OF_STRING); UdfUtils.UNSAFE.putInt(null, UdfUtils.UNSAFE.getLong(null, outputOffsetsPtr) + 4L * row, Integer.parseUnsignedInt(String.valueOf(outputOffset))); } @@ -417,7 +416,8 @@ public class UdfExecutor { return false; } outputOffset += (bytes.length + 1); - UdfUtils.UNSAFE.putChar(UdfUtils.UNSAFE.getLong(null, outputBufferPtr) + outputOffset - 1, UdfUtils.END_OF_STRING); + UdfUtils.UNSAFE.putChar(UdfUtils.UNSAFE.getLong(null, outputBufferPtr) + + outputOffset - 1, UdfUtils.END_OF_STRING); UdfUtils.UNSAFE.putInt(null, UdfUtils.UNSAFE.getLong(null, outputOffsetsPtr) + 4L * row, Integer.parseUnsignedInt(String.valueOf(outputOffset))); UdfUtils.copyMemory(bytes, UdfUtils.BYTE_ARRAY_OFFSET, null, diff --git a/fe/pom.xml b/fe/pom.xml index 43bab8adab..f1fbc031bf 100644 --- a/fe/pom.xml +++ b/fe/pom.xml @@ -69,6 +69,10 @@ under the License. flatten-maven-plugin 1.2.5 + + org.apache.maven.plugins + maven-checkstyle-plugin + org.codehaus.mojo license-maven-plugin @@ -110,34 +114,6 @@ under the License. - - com.diffplug.spotless - spotless-maven-plugin - 2.22.2 - - - - **/jmockit/**/* - - - - - true - 4 - - - check/checkstyle/checkstyle-apache-header.txt - \npackage - - - \\#,org.apache.doris,,java - - - UTF-8 - UNIX - - - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java index 1c40d58896..e5bb5eefca 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DorisRangePartitioner.java @@ -29,6 +29,7 @@ public class DorisRangePartitioner extends Partitioner { private EtlJobConfig.EtlPartitionInfo partitionInfo; private List partitionRangeKeys; List partitionKeyIndexes; + public DorisRangePartitioner(EtlJobConfig.EtlPartitionInfo partitionInfo, List partitionKeyIndexes, List partitionRangeKeys) { diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java index ff4ebb8da8..5b5e3f5d3d 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppColumns.java @@ -21,7 +21,6 @@ import com.google.common.base.Preconditions; import java.io.Serializable; import java.util.ArrayList; -import java.util.Comparator; import java.util.Date; import java.util.List; import java.util.Objects; @@ -107,10 +106,3 @@ class DppColumns implements Comparable, Serializable { + '}'; } } - -class DppColumnsComparator implements Comparator { - @Override - public int compare(DppColumns left, DppColumns right) { - return left.compareTo(right); - } -} diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java index e28c41eb0b..caf79c1a2b 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/DppUtils.java @@ -40,6 +40,7 @@ import java.util.zip.CRC32; public class DppUtils { public static final String BUCKET_ID = "__bucketId__"; + public static Class getClassFromDataType(DataType dataType) { if (dataType == null) { return null; @@ -202,16 +203,19 @@ public class DppUtils { List fields = new ArrayList<>(); for (StructField originField : dstSchema.fields()) { if (binaryColumns.contains(originField.name())) { - fields.add(DataTypes.createStructField(originField.name(), DataTypes.BinaryType, originField.nullable())); + fields.add(DataTypes.createStructField(originField.name(), + DataTypes.BinaryType, originField.nullable())); } else { - fields.add(DataTypes.createStructField(originField.name(), originField.dataType(), originField.nullable())); + fields.add(DataTypes.createStructField(originField.name(), + originField.dataType(), originField.nullable())); } } StructType ret = DataTypes.createStructType(fields); return ret; } - public static StructType createDstTableSchema(List columns, boolean addBucketIdColumn, boolean regardDistinctColumnAsBinary) { + public static StructType createDstTableSchema(List columns, + boolean addBucketIdColumn, boolean regardDistinctColumnAsBinary) { List fields = new ArrayList<>(); if (addBucketIdColumn) { StructField bucketIdField = DataTypes.createStructField(BUCKET_ID, DataTypes.StringType, true); @@ -226,14 +230,17 @@ public class DppUtils { return dstSchema; } - public static List parseColumnsFromPath(String filePath, List columnsFromPath) throws SparkDppException { + public static List parseColumnsFromPath(String filePath, List columnsFromPath) + throws SparkDppException { if (columnsFromPath == null || columnsFromPath.isEmpty()) { return Collections.emptyList(); } String[] strings = filePath.split("/"); if (strings.length < 2) { - System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); - throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + + ", filePath: " + filePath); + throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } String[] columns = new String[columnsFromPath.size()]; int size = 0; @@ -243,13 +250,17 @@ public class DppUtils { continue; } if (str == null || !str.contains("=")) { - System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); - throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + + ", filePath: " + filePath); + throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } String[] pair = str.split("=", 2); if (pair.length != 2) { - System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); - throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + + ", filePath: " + filePath); + throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } int index = columnsFromPath.indexOf(pair[0]); if (index == -1) { @@ -262,8 +273,10 @@ public class DppUtils { } } if (size != columnsFromPath.size()) { - System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); - throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); + System.err.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + + ", filePath: " + filePath); + throw new SparkDppException("Reason: Fail to parse columnsFromPath, expected: " + + columnsFromPath + ", filePath: " + filePath); } return Lists.newArrayList(columns); } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java index 6fc12edb0b..056455d289 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/GlobalDictBuilder.java @@ -187,7 +187,8 @@ public class GlobalDictBuilder { String distinctColumnNameTmp = distinctColumnNameOrigin.toString(); globalDictBuildWorkers.add(() -> { // get global dict max value - List maxGlobalDictValueRow = spark.sql(getMaxGlobalDictValueSql(distinctColumnNameTmp)).collectAsList(); + List maxGlobalDictValueRow + = spark.sql(getMaxGlobalDictValueSql(distinctColumnNameTmp)).collectAsList(); if (maxGlobalDictValueRow.size() == 0) { throw new RuntimeException(String.format("get max dict value failed: %s", distinctColumnNameTmp)); } @@ -199,13 +200,16 @@ public class GlobalDictBuilder { maxDictValue = (long) row.get(0); minDictValue = (long) row.get(1); } - LOG.info(" column " + distinctColumnNameTmp + " 's max value in dict is " + maxDictValue + ", min value is " + minDictValue); + LOG.info(" column " + distinctColumnNameTmp + " 's max value in dict is " + + maxDictValue + ", min value is " + minDictValue); // maybe never happened, but we need detect it if (minDictValue < 0) { - throw new RuntimeException(String.format(" column %s 's cardinality has exceed bigint's max value", distinctColumnNameTmp)); + throw new RuntimeException(String.format(" column %s 's cardinality has exceed bigint's max value", + distinctColumnNameTmp)); } - if (veryHighCardinalityColumn.contains(distinctColumnNameTmp) && veryHighCardinalityColumnSplitNum > 1) { + if (veryHighCardinalityColumn.contains(distinctColumnNameTmp) + && veryHighCardinalityColumnSplitNum > 1) { // split distinct key first and then encode with count buildGlobalDictBySplit(maxDictValue, distinctColumnNameTmp); } else { @@ -259,7 +263,8 @@ public class GlobalDictBuilder { } private String getCreateDistinctKeyTableSql() { - return "create table if not exists " + distinctKeyTableName + "(dict_key string) partitioned by (dict_column string) stored as sequencefile "; + return "create table if not exists " + distinctKeyTableName + + "(dict_key string) partitioned by (dict_column string) stored as sequencefile "; } private String getInsertDistinctKeyTableSql(String distinctColumnName, String sourceHiveTable) { @@ -278,7 +283,8 @@ public class GlobalDictBuilder { } private String getMaxGlobalDictValueSql(String distinctColumnName) { - return "select max(dict_value) as max_value,min(dict_value) as min_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "'"; + return "select max(dict_value) as max_value,min(dict_value) as min_value from " + + globalDictTableName + " where dict_column='" + distinctColumnName + "'"; } private void buildGlobalDictBySplit(long maxGlobalDictValue, String distinctColumnName) { @@ -294,9 +300,11 @@ public class GlobalDictBuilder { long currentDatasetStartDictValue = currentMaxDictValue; long splitDistinctValueCount = splitedDistinctValue[i].count(); currentMaxDictValue += splitDistinctValueCount; - String tmpDictTableName = String.format("%s_%s_tmp_dict_%s", i, currentDatasetStartDictValue, distinctColumnName); + String tmpDictTableName = String.format("%s_%s_tmp_dict_%s", i, + currentDatasetStartDictValue, distinctColumnName); distinctKeyMap.put(tmpDictTableName, currentDatasetStartDictValue); - Dataset distinctValueFrame = spark.createDataFrame(splitedDistinctValue[i].toJavaRDD(), getDistinctValueSchema()); + Dataset distinctValueFrame = spark.createDataFrame( + splitedDistinctValue[i].toJavaRDD(), getDistinctValueSchema()); distinctValueFrame.createOrReplaceTempView(tmpDictTableName); } @@ -306,8 +314,10 @@ public class GlobalDictBuilder { private String getSplitBuildGlobalDictSql(Map distinctKeyMap, String distinctColumnName) { StringBuilder sql = new StringBuilder(); - sql.append("insert overwrite table ").append(globalDictTableName).append(" partition(dict_column='").append(distinctColumnName).append("') ") - .append(" select dict_key,dict_value from ").append(globalDictTableName).append(" where dict_column='").append(distinctColumnName).append("' "); + sql.append("insert overwrite table ").append(globalDictTableName) + .append(" partition(dict_column='").append(distinctColumnName).append("') ") + .append(" select dict_key,dict_value from ").append(globalDictTableName) + .append(" where dict_column='").append(distinctColumnName).append("' "); for (Map.Entry entry : distinctKeyMap.entrySet()) { sql.append(" union all select dict_key, CAST((row_number() over(order by dict_key)) as BIGINT) ") .append(String.format("+ CAST(%s as BIGINT) as dict_value from %s", @@ -334,12 +344,15 @@ public class GlobalDictBuilder { private String getBuildGlobalDictSql(long maxGlobalDictValue, String distinctColumnName) { return "insert overwrite table " + globalDictTableName + " partition(dict_column='" + distinctColumnName + "') " - + " select dict_key,dict_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "' " + + " select dict_key,dict_value from " + globalDictTableName + + " where dict_column='" + distinctColumnName + "' " + " union all select t1.dict_key as dict_key," + "CAST((row_number() over(order by t1.dict_key)) as BIGINT) + " + "CAST(" + maxGlobalDictValue + " as BIGINT) as dict_value from " - + "(select dict_key from " + distinctKeyTableName + " where dict_column='" + distinctColumnName + "' and dict_key is not null)t1 left join " - + " (select dict_key,dict_value from " + globalDictTableName + " where dict_column='" + distinctColumnName + "' )t2 " + + "(select dict_key from " + distinctKeyTableName + + " where dict_column='" + distinctColumnName + "' and dict_key is not null)t1 left join " + + " (select dict_key,dict_value from " + globalDictTableName + + " where dict_column='" + distinctColumnName + "' )t2 " + "on t1.dict_key = t2.dict_key where t2.dict_value is null"; } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java index 02d7e79adf..62d9ce99ae 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/MinimumCoverageRollupTreeBuilder.java @@ -85,8 +85,8 @@ public class MinimumCoverageRollupTreeBuilder implements RollupTreeBuilder { } } if (!insertIndex(root, indexMetas.get(i), keyColumns, valueColumns)) { - throw new RuntimeException(String.format("can't find a parent rollup for rollup %s, rollup tree is %s", indexMetas.get(i).toString(), - root.toString())); + throw new RuntimeException(String.format("can't find a parent rollup for rollup %s," + + " rollup tree is %s", indexMetas.get(i).toString(), root)); } } return root; diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java index 76fa3cf5fa..ed1cbe73a7 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkDpp.java @@ -59,12 +59,12 @@ import org.slf4j.LoggerFactory; import scala.Tuple2; import java.io.IOException; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.math.BigInteger; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -109,6 +109,7 @@ public final class SparkDpp implements java.io.Serializable { private DppResult dppResult = new DppResult(); Map> tableToBitmapDictColumns = new HashMap<>(); Map> tableToBinaryBitmapColumns = new HashMap<>(); + // just for ut public SparkDpp() {} @@ -133,8 +134,8 @@ public final class SparkDpp implements java.io.Serializable { this.serializableHadoopConf = new SerializableConfiguration(spark.sparkContext().hadoopConfiguration()); } - private JavaPairRDD, Object[]> processRDDAggregate(JavaPairRDD, Object[]> currentPairRDD, RollupTreeNode curNode, - SparkRDDAggregator[] sparkRDDAggregators) throws SparkDppException { + private JavaPairRDD, Object[]> processRDDAggregate(JavaPairRDD, Object[]> currentPairRDD, + RollupTreeNode curNode, SparkRDDAggregator[] sparkRDDAggregators) throws SparkDppException { final boolean isDuplicateTable = !StringUtils.equalsIgnoreCase(curNode.indexMeta.indexType, "AGGREGATE") && !StringUtils.equalsIgnoreCase(curNode.indexMeta.indexType, "UNIQUE"); @@ -152,7 +153,8 @@ public final class SparkDpp implements java.io.Serializable { } if (curNode.indexMeta.isBaseIndex) { - JavaPairRDD, Object[]> result = currentPairRDD.mapToPair(new EncodeBaseAggregateTableFunction(sparkRDDAggregators)) + JavaPairRDD, Object[]> result = currentPairRDD.mapToPair( + new EncodeBaseAggregateTableFunction(sparkRDDAggregators)) .reduceByKey(new AggregateReduceFunction(sparkRDDAggregators), aggregateConcurrency); return result; } else { @@ -337,8 +339,9 @@ public final class SparkDpp implements java.io.Serializable { // get column index map from parent rollup to child rollup // not consider bucketId here - private Pair getColumnIndexInParentRollup(List childRollupKeyColumns, List childRollupValueColumns, - List parentRollupKeyColumns, List parentRollupValueColumns) throws SparkDppException { + private Pair getColumnIndexInParentRollup(List childRollupKeyColumns, + List childRollupValueColumns, List parentRollupKeyColumns, + List parentRollupValueColumns) throws SparkDppException { List keyMap = new ArrayList<>(); List valueMap = new ArrayList<>(); // find column index in parent rollup schema @@ -361,7 +364,8 @@ public final class SparkDpp implements java.io.Serializable { } if (keyMap.size() != childRollupKeyColumns.size() || valueMap.size() != childRollupValueColumns.size()) { - throw new SparkDppException(String.format("column map index from child to parent has error, key size src: %s, dst: %s; value size src: %s, dst: %s", + throw new SparkDppException(String.format("column map index from child to parent has error," + + " key size src: %s, dst: %s; value size src: %s, dst: %s", childRollupKeyColumns.size(), keyMap.size(), childRollupValueColumns.size(), valueMap.size())); } @@ -378,24 +382,23 @@ public final class SparkDpp implements java.io.Serializable { // TODO(wb): support decimal round; see be DecimalV2Value::round DecimalParser decimalParser = (DecimalParser) columnParser; BigDecimal srcBigDecimal = (BigDecimal) srcValue; - if (srcValue != null && (decimalParser.getMaxValue().compareTo(srcBigDecimal) < 0 || decimalParser.getMinValue().compareTo(srcBigDecimal) > 0)) { - LOG.warn(String.format("decimal value is not valid for defination, column=%s, value=%s,precision=%s,scale=%s", - etlColumn.columnName, srcValue.toString(), srcBigDecimal.precision(), srcBigDecimal.scale())); + if (srcValue != null && (decimalParser.getMaxValue().compareTo(srcBigDecimal) < 0 + || decimalParser.getMinValue().compareTo(srcBigDecimal) > 0)) { + LOG.warn(String.format("decimal value is not valid for defination, column=%s," + + " value=%s,precision=%s,scale=%s", + etlColumn.columnName, srcValue, srcBigDecimal.precision(), srcBigDecimal.scale())); return false; } break; case "CHAR": case "VARCHAR": // TODO(wb) padding char type - try { - int strSize = 0; - if (srcValue != null && (strSize = srcValue.toString().getBytes("UTF-8").length) > etlColumn.stringLength) { - LOG.warn(String.format("the length of input is too long than schema. column_name:%s,input_str[%s],schema length:%s,actual length:%s", - etlColumn.columnName, row.toString(), etlColumn.stringLength, strSize)); - return false; - } - } catch (UnsupportedEncodingException e) { - LOG.warn("input string value can not encode with utf-8,value=" + srcValue.toString()); + int strSize = 0; + if (srcValue != null && (strSize = srcValue.toString().getBytes(StandardCharsets.UTF_8).length) + > etlColumn.stringLength) { + LOG.warn(String.format("the length of input is too long than schema." + + " column_name:%s,input_str[%s],schema length:%s,actual length:%s", + etlColumn.columnName, row.toString(), etlColumn.stringLength, strSize)); return false; } break; @@ -411,14 +414,10 @@ public final class SparkDpp implements java.io.Serializable { * 3 fill tuple with partition column */ private JavaPairRDD, Object[]> fillTupleWithPartitionColumn(Dataset dataframe, - EtlJobConfig.EtlPartitionInfo partitionInfo, - List partitionKeyIndex, - List partitionRangeKeys, - List keyColumnNames, - List valueColumnNames, - StructType dstTableSchema, - EtlJobConfig.EtlIndex baseIndex, - List validPartitionIds) throws SparkDppException { + EtlJobConfig.EtlPartitionInfo partitionInfo, List partitionKeyIndex, + List partitionRangeKeys, + List keyColumnNames, List valueColumnNames, StructType dstTableSchema, + EtlJobConfig.EtlIndex baseIndex, List validPartitionIds) throws SparkDppException { List distributeColumns = partitionInfo.distributionColumnRefs; Partitioner partitioner = new DorisRangePartitioner(partitionInfo, partitionKeyIndex, partitionRangeKeys); Set validPartitionIndex = new HashSet<>(); @@ -441,60 +440,58 @@ public final class SparkDpp implements java.io.Serializable { // use PairFlatMapFunction instead of PairMapFunction because the there will be // 0 or 1 output row for 1 input row - JavaPairRDD, Object[]> resultPairRDD = dataframe.toJavaRDD().flatMapToPair(new PairFlatMapFunction, Object[]>() { - @Override - public Iterator, Object[]>> call(Row row) throws Exception { - List, Object[]>> result = new ArrayList<>(); - List keyColumns = new ArrayList<>(); - List valueColumns = new ArrayList<>(valueColumnNames.size()); - for (int i = 0; i < keyColumnNames.size(); i++) { - String columnName = keyColumnNames.get(i); - Object columnObject = row.get(row.fieldIndex(columnName)); - if (!validateData(columnObject, baseIndex.getColumn(columnName), parsers.get(i), row)) { + JavaPairRDD, Object[]> resultPairRDD = dataframe.toJavaRDD().flatMapToPair( + (PairFlatMapFunction, Object[]>) row -> { + List, Object[]>> result = new ArrayList<>(); + List keyColumns = new ArrayList<>(); + List valueColumns = new ArrayList<>(valueColumnNames.size()); + for (int i = 0; i < keyColumnNames.size(); i++) { + String columnName = keyColumnNames.get(i); + Object columnObject = row.get(row.fieldIndex(columnName)); + if (!validateData(columnObject, baseIndex.getColumn(columnName), parsers.get(i), row)) { + abnormalRowAcc.add(1); + return result.iterator(); + } + keyColumns.add(columnObject); + } + + for (int i = 0; i < valueColumnNames.size(); i++) { + String columnName = valueColumnNames.get(i); + Object columnObject = row.get(row.fieldIndex(columnName)); + if (!validateData(columnObject, baseIndex.getColumn(columnName), + parsers.get(i + keyColumnNames.size()), row)) { + abnormalRowAcc.add(1); + return result.iterator(); + } + valueColumns.add(columnObject); + } + + DppColumns key = new DppColumns(keyColumns); + int pid = partitioner.getPartition(key); + if (!validPartitionIndex.contains(pid)) { + LOG.warn("invalid partition for row:" + row + ", pid:" + pid); abnormalRowAcc.add(1); - return result.iterator(); - } - keyColumns.add(columnObject); - } + LOG.info("abnormalRowAcc:" + abnormalRowAcc); + if (abnormalRowAcc.value() < 5) { + LOG.info("add row to invalidRows:" + row.toString()); + invalidRows.add(row.toString()); + LOG.info("invalid rows contents:" + invalidRows.value()); + } + } else { + // TODO(wb) support lagreint for hash + long hashValue = DppUtils.getHashValue(row, distributeColumns, dstTableSchema); + int bucketId = (int) ((hashValue & 0xffffffff) % partitionInfo.partitions.get(pid).bucketNum); + long partitionId = partitionInfo.partitions.get(pid).partitionId; + // bucketKey is partitionId_bucketId + String bucketKey = partitionId + "_" + bucketId; - for (int i = 0; i < valueColumnNames.size(); i++) { - String columnName = valueColumnNames.get(i); - Object columnObject = row.get(row.fieldIndex(columnName)); - if (!validateData(columnObject, baseIndex.getColumn(columnName), - parsers.get(i + keyColumnNames.size()), row)) { - abnormalRowAcc.add(1); - return result.iterator(); + List tuple = new ArrayList<>(); + tuple.add(bucketKey); + tuple.addAll(keyColumns); + result.add(new Tuple2<>(tuple, valueColumns.toArray())); } - valueColumns.add(columnObject); - } - - DppColumns key = new DppColumns(keyColumns); - int pid = partitioner.getPartition(key); - if (!validPartitionIndex.contains(pid)) { - LOG.warn("invalid partition for row:" + row + ", pid:" + pid); - abnormalRowAcc.add(1); - LOG.info("abnormalRowAcc:" + abnormalRowAcc); - if (abnormalRowAcc.value() < 5) { - LOG.info("add row to invalidRows:" + row.toString()); - invalidRows.add(row.toString()); - LOG.info("invalid rows contents:" + invalidRows.value()); - } - } else { - // TODO(wb) support lagreint for hash - long hashValue = DppUtils.getHashValue(row, distributeColumns, dstTableSchema); - int bucketId = (int) ((hashValue & 0xffffffff) % partitionInfo.partitions.get(pid).bucketNum); - long partitionId = partitionInfo.partitions.get(pid).partitionId; - // bucketKey is partitionId_bucketId - String bucketKey = partitionId + "_" + bucketId; - - List tuple = new ArrayList<>(); - tuple.add(bucketKey); - tuple.addAll(keyColumns); - result.add(new Tuple2<>(tuple, valueColumns.toArray())); - } - return result.iterator(); - } - }); + return result.iterator(); + }); // use bucket number as the parallel number int reduceNum = 0; @@ -513,9 +510,8 @@ public final class SparkDpp implements java.io.Serializable { // do the etl process private Dataset convertSrcDataframeToDstDataframe(EtlJobConfig.EtlIndex baseIndex, - Dataset srcDataframe, - StructType dstTableSchema, - EtlJobConfig.EtlFileGroup fileGroup) throws SparkDppException { + Dataset srcDataframe, StructType dstTableSchema, EtlJobConfig.EtlFileGroup fileGroup) + throws SparkDppException { Dataset dataframe = srcDataframe; StructType srcSchema = dataframe.schema(); Set srcColumnNames = new HashSet<>(); @@ -550,18 +546,24 @@ public final class SparkDpp implements java.io.Serializable { } } if (column.columnType.equalsIgnoreCase("DATE")) { - dataframe = dataframe.withColumn(dstField.name(), dataframe.col(dstField.name()).cast(DataTypes.DateType)); + dataframe = dataframe.withColumn(dstField.name(), + dataframe.col(dstField.name()).cast(DataTypes.DateType)); } else if (column.columnType.equalsIgnoreCase("DATETIME")) { - dataframe = dataframe.withColumn(dstField.name(), dataframe.col(dstField.name()).cast(DataTypes.TimestampType)); + dataframe = dataframe.withColumn(dstField.name(), + dataframe.col(dstField.name()).cast(DataTypes.TimestampType)); } else if (column.columnType.equalsIgnoreCase("BOOLEAN")) { dataframe = dataframe.withColumn(dstField.name(), functions.when(functions.lower(dataframe.col(dstField.name())).equalTo("true"), "1") .when(dataframe.col(dstField.name()).equalTo("1"), "1") .otherwise("0")); - } else if (!column.columnType.equalsIgnoreCase(BITMAP_TYPE) && !dstField.dataType().equals(DataTypes.StringType)) { - dataframe = dataframe.withColumn(dstField.name(), dataframe.col(dstField.name()).cast(dstField.dataType())); - } else if (column.columnType.equalsIgnoreCase(BITMAP_TYPE) && dstField.dataType().equals(DataTypes.BinaryType)) { - dataframe = dataframe.withColumn(dstField.name(), dataframe.col(dstField.name()).cast(DataTypes.BinaryType)); + } else if (!column.columnType.equalsIgnoreCase(BITMAP_TYPE) + && !dstField.dataType().equals(DataTypes.StringType)) { + dataframe = dataframe.withColumn(dstField.name(), + dataframe.col(dstField.name()).cast(dstField.dataType())); + } else if (column.columnType.equalsIgnoreCase(BITMAP_TYPE) + && dstField.dataType().equals(DataTypes.BinaryType)) { + dataframe = dataframe.withColumn(dstField.name(), + dataframe.col(dstField.name()).cast(DataTypes.BinaryType)); } if (fileGroup.isNegative && !column.isKey) { // negative load @@ -870,23 +872,25 @@ public final class SparkDpp implements java.io.Serializable { } Dataset dataframe = spark.sql(sql.toString()); - // Note(wb): in current spark load implementation, spark load can't be consistent with doris BE; The reason is as follows + // Note(wb): in current spark load implementation, spark load can't be consistent with doris BE; + // The reason is as follows // For stream load in doris BE, it runs as follow steps: // step 1: type check // step 2: expression calculation // step 3: strict mode check // step 4: nullable column check // BE can do the four steps row by row - // but spark load relies on spark to do step2, so it can only do step 1 for whole dataset and then do step 2 for whole dataset and so on; + // but spark load relies on spark to do step2, so it can only do step 1 for whole dataset + // and then do step 2 for whole dataset and so on; // So in spark load, we first do step 1,3,4,and then do step 2. - dataframe = checkDataFromHiveWithStrictMode(dataframe, baseIndex, fileGroup.columnMappings.keySet(), etlJobConfig.properties.strictMode, - dstTableSchema, dictBitmapColumnSet, binaryBitmapColumnsSet); + dataframe = checkDataFromHiveWithStrictMode(dataframe, baseIndex, fileGroup.columnMappings.keySet(), + etlJobConfig.properties.strictMode, dstTableSchema, dictBitmapColumnSet, binaryBitmapColumnsSet); dataframe = convertSrcDataframeToDstDataframe(baseIndex, dataframe, dstTableSchema, fileGroup); return dataframe; } - private Dataset checkDataFromHiveWithStrictMode( - Dataset dataframe, EtlJobConfig.EtlIndex baseIndex, Set mappingColKeys, boolean isStrictMode, StructType dstTableSchema, + private Dataset checkDataFromHiveWithStrictMode(Dataset dataframe, EtlJobConfig.EtlIndex baseIndex, + Set mappingColKeys, boolean isStrictMode, StructType dstTableSchema, Set dictBitmapColumnSet, Set binaryBitmapColumnsSet) throws SparkDppException { List columnNameNeedCheckArrayList = new ArrayList<>(); List columnParserArrayList = new ArrayList<>(); @@ -912,8 +916,8 @@ public final class SparkDpp implements java.io.Serializable { } } - ColumnParser[] columnParserArray = columnParserArrayList.toArray(new ColumnParser[columnParserArrayList.size()]); - EtlJobConfig.EtlColumn[] columnNameArray = columnNameNeedCheckArrayList.toArray(new EtlJobConfig.EtlColumn[columnNameNeedCheckArrayList.size()]); + ColumnParser[] columnParserArray = columnParserArrayList.toArray(new ColumnParser[0]); + EtlJobConfig.EtlColumn[] columnNameArray = columnNameNeedCheckArrayList.toArray(new EtlJobConfig.EtlColumn[0]); StructType srcSchema = dataframe.schema(); JavaRDD result = dataframe.toJavaRDD().flatMap(new FlatMapFunction() { @@ -934,9 +938,11 @@ public final class SparkDpp implements java.io.Serializable { if (value != null && !columnParserArray[i].parse(value.toString())) { if (isStrictMode) { validRow = false; - LOG.warn(String.format("row parsed failed in strict mode, column name %s, src row %s", column.columnName, row.toString())); - // a column parsed failed would be filled null, but if doris column is not allowed null, we should skip this row + LOG.warn(String.format("row parsed failed in strict mode, column name %s, src row %s", + column.columnName, row.toString())); } else if (!column.isAllowNull) { + // a column parsed failed would be filled null, + // but if doris column is not allowed null, we should skip this row validRow = false; LOG.warn("column:" + i + " can not be null. row:" + row.toString()); break; @@ -968,7 +974,8 @@ public final class SparkDpp implements java.io.Serializable { } }); - // here we just check data but not do cast, so data type should be same with src schema which is hive table schema + // here we just check data but not do cast, + // so data type should be same with src schema which is hive table schema return spark.createDataFrame(result, srcSchema); } @@ -1013,7 +1020,8 @@ public final class SparkDpp implements java.io.Serializable { } } } - List partitionRangeKeys = createPartitionRangeKeys(partitionInfo, partitionKeySchema); + List partitionRangeKeys + = createPartitionRangeKeys(partitionInfo, partitionKeySchema); StructType dstTableSchema = DppUtils.createDstTableSchema(baseIndex.columns, false, false); dstTableSchema = DppUtils.replaceBinaryColsInSchema(binaryBitmapColumnSet, dstTableSchema); RollupTreeBuilder rollupTreeParser = new MinimumCoverageRollupTreeBuilder(); @@ -1026,10 +1034,11 @@ public final class SparkDpp implements java.io.Serializable { Dataset fileGroupDataframe = null; EtlJobConfig.SourceType sourceType = fileGroup.sourceType; if (sourceType == EtlJobConfig.SourceType.FILE) { - fileGroupDataframe = loadDataFromFilePaths(spark, baseIndex, filePaths, fileGroup, dstTableSchema); + fileGroupDataframe = loadDataFromFilePaths( + spark, baseIndex, filePaths, fileGroup, dstTableSchema); } else if (sourceType == EtlJobConfig.SourceType.HIVE) { - fileGroupDataframe = loadDataFromHiveTable(spark, fileGroup.dppHiveDbTableName, baseIndex, fileGroup, dstTableSchema, - dictBitmapColumnSet, binaryBitmapColumnSet); + fileGroupDataframe = loadDataFromHiveTable(spark, fileGroup.dppHiveDbTableName, + baseIndex, fileGroup, dstTableSchema, dictBitmapColumnSet, binaryBitmapColumnSet); } else { throw new RuntimeException("Unknown source type: " + sourceType.name()); } diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java index d5b37ec391..57548443ba 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java @@ -82,7 +82,8 @@ public abstract class SparkRDDAggregator implements Serializable { case "largeint": return new LargeIntMaxAggregator(); default: - throw new SparkDppException(String.format("unsupported max aggregator for column type:%s", columnType)); + throw new SparkDppException( + String.format("unsupported max aggregator for column type:%s", columnType)); } case "min": switch (columnType) { @@ -102,7 +103,8 @@ public abstract class SparkRDDAggregator implements Serializable { case "largeint": return new LargeIntMinAggregator(); default: - throw new SparkDppException(String.format("unsupported min aggregator for column type:%s", columnType)); + throw new SparkDppException( + String.format("unsupported min aggregator for column type:%s", columnType)); } case "sum": switch (columnType) { @@ -123,7 +125,8 @@ public abstract class SparkRDDAggregator implements Serializable { case "decimalv2": return new BigDecimalSumAggregator(); default: - throw new SparkDppException(String.format("unsupported sum aggregator for column type:%s", columnType)); + throw new SparkDppException( + String.format("unsupported sum aggregator for column type:%s", columnType)); } case "replace_if_not_null": return new ReplaceIfNotNullAggregator(); @@ -165,7 +168,8 @@ class EncodeBaseAggregateTableFunction implements PairFunction, Object[]>, List, Object[]> { +class EncodeRollupAggregateTableFunction + implements PairFunction, Object[]>, List, Object[]> { Pair columnIndexInParentRollup; @@ -174,7 +178,8 @@ class EncodeRollupAggregateTableFunction implements PairFunction, Object[]> call(Tuple2, Object[]> parentRollupKeyValuePair) throws Exception { + public Tuple2, Object[]> call(Tuple2, Object[]> parentRollupKeyValuePair) + throws Exception { Integer[] keyColumnIndexMap = columnIndexInParentRollup.getKey(); Integer[] valueColumnIndexMap = columnIndexInParentRollup.getValue(); diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java index 43c0b73d15..7c53c1c7ab 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/EtlJobConfig.java @@ -137,7 +137,8 @@ public class EtlJobConfig implements Serializable { // hdfsEtlPath/jobs/dbId/loadLabel/PendingTaskSignature private static final String ETL_OUTPUT_PATH_FORMAT = "%s/jobs/%d/%s/%d"; - private static final String ETL_OUTPUT_FILE_NAME_DESC_V1 = "version.label.tableId.partitionId.indexId.bucket.schemaHash.parquet"; + private static final String ETL_OUTPUT_FILE_NAME_DESC_V1 + = "version.label.tableId.partitionId.indexId.bucket.schemaHash.parquet"; // tableId.partitionId.indexId.bucket.schemaHash public static final String TABLET_META_FORMAT = "%d.%d.%d.%d.%d"; public static final String ETL_OUTPUT_FILE_FORMAT = "parquet"; @@ -189,7 +190,8 @@ public class EtlJobConfig implements Serializable { } public static String getOutputFilePattern(String loadLabel, FilePatternVersion filePatternVersion) { - return String.format("%s.%s.%s.%s", filePatternVersion.name(), loadLabel, TABLET_META_FORMAT, ETL_OUTPUT_FILE_FORMAT); + return String.format("%s.%s.%s.%s", filePatternVersion.name(), loadLabel, + TABLET_META_FORMAT, ETL_OUTPUT_FILE_FORMAT); } public static String getDppResultFilePath(String outputPath) { @@ -491,7 +493,8 @@ public class EtlJobConfig implements Serializable { public Map hiveTableProperties; // hive db table used in dpp, not serialized - // set with hiveDbTableName (no bitmap column) or IntermediateHiveTable (created by global dict builder) in spark etl job + // set with hiveDbTableName (no bitmap column) or IntermediateHiveTable (created by global dict builder) + // in spark etl job public String dppHiveDbTableName; // for data infile path diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java index 9133a04a21..ab558811f5 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java @@ -142,10 +142,13 @@ public class SparkEtlJob { tableToBinaryBitmapColumns.put(entry.getKey(), binaryBitmapColumns); } } - LOG.info("init hiveSourceTables: " + hiveSourceTables + ", tableToBitmapDictColumns: " + tableToBitmapDictColumns); + LOG.info("init hiveSourceTables: " + hiveSourceTables + + ",tableToBitmapDictColumns: " + tableToBitmapDictColumns); // spark etl must have only one table with bitmap type column to process. - if (hiveSourceTables.size() > 1 || tableToBitmapDictColumns.size() > 1 || tableToBinaryBitmapColumns.size() > 1) { + if (hiveSourceTables.size() > 1 + || tableToBitmapDictColumns.size() > 1 + || tableToBinaryBitmapColumns.size() > 1) { throw new Exception("spark etl job must have only one hive table with bitmap type column to process"); } } @@ -180,7 +183,8 @@ public class SparkEtlJob { String taskId = etlJobConfig.outputPath.substring(etlJobConfig.outputPath.lastIndexOf("/") + 1); String globalDictTableName = String.format(EtlJobConfig.GLOBAL_DICT_TABLE_NAME, tableId); String distinctKeyTableName = String.format(EtlJobConfig.DISTINCT_KEY_TABLE_NAME, tableId, taskId); - String dorisIntermediateHiveTable = String.format(EtlJobConfig.DORIS_INTERMEDIATE_HIVE_TABLE_NAME, tableId, taskId); + String dorisIntermediateHiveTable = String.format( + EtlJobConfig.DORIS_INTERMEDIATE_HIVE_TABLE_NAME, tableId, taskId); String sourceHiveFilter = fileGroup.where; // others @@ -197,10 +201,10 @@ public class SparkEtlJob { + ", globalDictTableName: " + globalDictTableName + ", dorisIntermediateHiveTable: " + dorisIntermediateHiveTable); try { - GlobalDictBuilder globalDictBuilder = new GlobalDictBuilder( - dictColumnMap, dorisOlapTableColumnList, mapSideJoinColumns, sourceHiveDBTableName, - sourceHiveFilter, dorisHiveDB, distinctKeyTableName, globalDictTableName, dorisIntermediateHiveTable, - buildConcurrency, veryHighCardinalityColumn, veryHighCardinalityColumnSplitNum, spark); + GlobalDictBuilder globalDictBuilder = new GlobalDictBuilder(dictColumnMap, dorisOlapTableColumnList, + mapSideJoinColumns, sourceHiveDBTableName, sourceHiveFilter, dorisHiveDB, distinctKeyTableName, + globalDictTableName, dorisIntermediateHiveTable, buildConcurrency, veryHighCardinalityColumn, + veryHighCardinalityColumnSplitNum, spark); globalDictBuilder.createHiveIntermediateTable(); globalDictBuilder.extractDistinctColumn(); globalDictBuilder.buildGlobalDict();