diff --git a/fe/be-java-extensions/avro-scanner/src/main/java/org/apache/doris/avro/AvroReader.java b/fe/be-java-extensions/avro-scanner/src/main/java/org/apache/doris/avro/AvroReader.java index 50b647361b..ac8d4fd621 100644 --- a/fe/be-java-extensions/avro-scanner/src/main/java/org/apache/doris/avro/AvroReader.java +++ b/fe/be-java-extensions/avro-scanner/src/main/java/org/apache/doris/avro/AvroReader.java @@ -63,7 +63,9 @@ public abstract class AvroReader { protected void openSchemaReader() throws IOException { InputStream inputStream = new BufferedInputStream(fileSystem.open(path)); schemaReader = new DataFileStream<>(inputStream, new GenericDatumReader<>()); - LOG.debug("success open avro schema reader."); + if (LOG.isDebugEnabled()) { + LOG.debug("success open avro schema reader."); + } } protected void openDataReader(AvroFileContext avroFileContext) throws IOException { @@ -72,7 +74,9 @@ public abstract class AvroReader { FileSplit fileSplit = new FileSplit(path, avroFileContext.getSplitStartOffset(), avroFileContext.getSplitSize(), job); dataReader = new AvroRecordReader<>(job, fileSplit); - LOG.debug("success open avro data reader."); + if (LOG.isDebugEnabled()) { + LOG.debug("success open avro data reader."); + } } protected void projectionSchema(JobConf job, AvroFileContext avroFileContext) { @@ -99,7 +103,9 @@ public abstract class AvroReader { projectionSchema = avroSchema; } AvroJob.setInputSchema(job, projectionSchema); - LOG.debug("projection avro schema is:" + projectionSchema.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("projection avro schema is:" + projectionSchema.toString()); + } } } diff --git a/fe/be-java-extensions/java-common/src/main/java/org/apache/doris/common/jni/utils/JMXJsonUtil.java b/fe/be-java-extensions/java-common/src/main/java/org/apache/doris/common/jni/utils/JMXJsonUtil.java index 02cb53232d..7046c9e2cd 100644 --- a/fe/be-java-extensions/java-common/src/main/java/org/apache/doris/common/jni/utils/JMXJsonUtil.java +++ b/fe/be-java-extensions/java-common/src/main/java/org/apache/doris/common/jni/utils/JMXJsonUtil.java @@ -202,7 +202,9 @@ public class JMXJsonUtil { } catch (RuntimeErrorException e) { // RuntimeErrorException happens when an unexpected failure occurs in getAttribute // for example https://issues.apache.org/jira/browse/DAEMON-120 - LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", e); + if (LOG.isDebugEnabled()) { + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", e); + } return; } catch (AttributeNotFoundException e) { //Ignored the attribute was not found, which should never happen because the bean diff --git a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/BaseExecutor.java b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/BaseExecutor.java index 1e996b720e..8ad171d601 100644 --- a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/BaseExecutor.java +++ b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/BaseExecutor.java @@ -138,7 +138,9 @@ public abstract class BaseExecutor { classLoader.close(); } catch (IOException e) { // Log and ignore. - LOG.debug("Error closing the URLClassloader.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Error closing the URLClassloader.", e); + } } } // We are now un-usable (because the class loader has been diff --git a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdafExecutor.java b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdafExecutor.java index caae5ef820..cf0021c7db 100644 --- a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdafExecutor.java +++ b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdafExecutor.java @@ -318,7 +318,9 @@ public class UdafExecutor extends BaseExecutor { Pair returnType = UdfUtils.setReturnType(funcRetType, methods[idx].getReturnType()); if (!returnType.first) { - LOG.debug("result function set return parameterTypes has error"); + if (LOG.isDebugEnabled()) { + LOG.debug("result function set return parameterTypes has error"); + } } else { retType = returnType.second; retClass = methods[idx].getReturnType(); @@ -330,14 +332,18 @@ public class UdafExecutor extends BaseExecutor { addIndex = methodAccess.getIndex(UDAF_ADD_FUNCTION); argClass = methods[idx].getParameterTypes(); if (argClass.length != parameterTypes.length + 1) { - LOG.debug("add function parameterTypes length not equal " + argClass.length + " " - + parameterTypes.length + " " + methods[idx].getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("add function parameterTypes length not equal " + argClass.length + " " + + parameterTypes.length + " " + methods[idx].getName()); + } } if (!(parameterTypes.length == 0)) { Pair inputType = UdfUtils.setArgTypes(parameterTypes, argClass, true); if (!inputType.first) { - LOG.debug("add function set arg parameterTypes has error"); + if (LOG.isDebugEnabled()) { + LOG.debug("add function set arg parameterTypes has error"); + } } else { argTypes = inputType.second; } diff --git a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java index 76801ba4bb..7e44cd3e42 100644 --- a/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java +++ b/fe/be-java-extensions/java-udf/src/main/java/org/apache/doris/udf/UdfExecutor.java @@ -142,7 +142,9 @@ public class UdfExecutor extends BaseExecutor { String className = request.fn.scalar_fn.symbol; ArrayList signatures = Lists.newArrayList(); try { - LOG.debug("Loading UDF '" + className + "' from " + jarPath); + if (LOG.isDebugEnabled()) { + LOG.debug("Loading UDF '" + className + "' from " + jarPath); + } ClassLoader loader; if (jarPath != null) { // Save for cleanup. @@ -186,7 +188,9 @@ public class UdfExecutor extends BaseExecutor { retType = returnType.second; } argTypes = new JavaUdfDataType[0]; - LOG.debug("Loaded UDF '" + className + "' from " + jarPath); + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded UDF '" + className + "' from " + jarPath); + } return; } returnType = UdfUtils.setReturnType(funcRetType, m.getReturnType()); @@ -203,7 +207,9 @@ public class UdfExecutor extends BaseExecutor { } else { argTypes = inputType.second; } - LOG.debug("Loaded UDF '" + className + "' from " + jarPath); + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded UDF '" + className + "' from " + jarPath); + } retType.setKeyType(keyType); retType.setValueType(valueType); return; diff --git a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java index fce74a27dc..69ec49e336 100644 --- a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java +++ b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java @@ -64,7 +64,9 @@ public class PaimonJniScanner extends JniScanner { public PaimonJniScanner(int batchSize, Map params) { this.classLoader = this.getClass().getClassLoader(); - LOG.debug("params:{}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("params:{}", params); + } this.params = params; String[] requiredFields = params.get("required_fields").split(","); String[] requiredTypes = params.get("columns_types").split("#"); @@ -117,13 +119,17 @@ public class PaimonJniScanner extends JniScanner { private List getPredicates() { List predicates = PaimonScannerUtils.decodeStringToObject(paimonPredicate); - LOG.debug("predicates:{}", predicates); + if (LOG.isDebugEnabled()) { + LOG.debug("predicates:{}", predicates); + } return predicates; } private Split getSplit() { Split split = PaimonScannerUtils.decodeStringToObject(paimonSplit); - LOG.debug("split:{}", split); + if (LOG.isDebugEnabled()) { + LOG.debug("split:{}", split); + } return split; } @@ -201,7 +207,9 @@ public class PaimonJniScanner extends JniScanner { } this.table = tableExt.getTable(); paimonAllFieldNames = PaimonScannerUtils.fieldNames(this.table.rowType()); - LOG.debug("paimonAllFieldNames:{}", paimonAllFieldNames); + if (LOG.isDebugEnabled()) { + LOG.debug("paimonAllFieldNames:{}", paimonAllFieldNames); + } } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/catalog/PrimitiveType.java b/fe/fe-common/src/main/java/org/apache/doris/catalog/PrimitiveType.java index 772181447c..cc51a28a41 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/catalog/PrimitiveType.java +++ b/fe/fe-common/src/main/java/org/apache/doris/catalog/PrimitiveType.java @@ -1006,4 +1006,3 @@ public enum PrimitiveType { } } } - diff --git a/fe/fe-common/src/main/java/org/apache/doris/catalog/Type.java b/fe/fe-common/src/main/java/org/apache/doris/catalog/Type.java index ecc394e499..96caba0e4b 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/catalog/Type.java +++ b/fe/fe-common/src/main/java/org/apache/doris/catalog/Type.java @@ -2322,4 +2322,3 @@ public abstract class Type { return typeMap.getOrDefault(typeName, Type.UNSUPPORTED); } } - diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/VariableAnnotation.java b/fe/fe-common/src/main/java/org/apache/doris/common/VariableAnnotation.java index 63ce08a486..1f8c1f3a99 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/VariableAnnotation.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/VariableAnnotation.java @@ -38,4 +38,3 @@ public enum VariableAnnotation { return prefix; } } - diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitInputStream.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitInputStream.java index 0138b66945..38b9a21fb5 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitInputStream.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitInputStream.java @@ -51,7 +51,9 @@ public class LimitInputStream extends InputStream { throw new IOException("InputStream is null"); } speed = limitspeed; - LOG.debug("LimitinputStream limit speed: {}", speed); + if (LOG.isDebugEnabled()) { + LOG.debug("LimitinputStream limit speed: {}", speed); + } this.in = in; bytesReadTotal = 0; bstart = false; diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java index e71af2dbbd..8b8d781139 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/LimitOutputStream.java @@ -52,7 +52,9 @@ public class LimitOutputStream extends OutputStream { throw new IOException("OutputStream is null"); } speed = limitspeed; - LOG.debug("LimitOutputStream limit speed: {}", speed); + if (LOG.isDebugEnabled()) { + LOG.debug("LimitOutputStream limit speed: {}", speed); + } this.out = out; bytesWriteTotal = 0; bstart = false; diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java index 1636554fbc..2fce0e5617 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ConstructorReflection.java @@ -158,4 +158,3 @@ public final class ConstructorReflection { } } } - diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java index 084b5bec1a..f37aedee00 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/FieldReflection.java @@ -285,4 +285,3 @@ public final class FieldReflection { } } - diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java index 9c47ffe5d8..84a54dfde5 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/jmockit/ParameterReflection.java @@ -165,4 +165,3 @@ public final class ParameterReflection { || secondType.isPrimitive() && secondType == AutoType.getPrimitiveType(firstType); } } - diff --git a/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java b/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java index 21b6485c5b..7c91856806 100644 --- a/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java +++ b/fe/fe-core/src/main/java/com/aliyun/datalake/metastore/hive2/ProxyMetaStoreClient.java @@ -2190,4 +2190,4 @@ public class ProxyMetaStoreClient implements IMetaStoreClient { return FunctionalUtils.call(client, Optional.empty(), allowFailure, consumer, this.readWriteClientType, actionName, parameters); } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java index 21be9a75bf..c2eeff4f1f 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueDecoratorBase.java @@ -1543,4 +1543,3 @@ public class AWSGlueDecoratorBase implements AWSGlue { return null; } } - diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java index ec74797bfa..0973576ac5 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastore.java @@ -130,4 +130,4 @@ public interface AWSGlueMetastore { String tableName, List columnStatistics ); -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java index e8da0056b2..1494677c4b 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreBaseDecorator.java @@ -195,4 +195,4 @@ public class AWSGlueMetastoreBaseDecorator implements AWSGlueMetastore { return awsGlueMetastore.updateTableColumnStatistics(dbName, tableName, columnStatistics); } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java index 7ef0280e15..158e34a3ac 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreCacheDecorator.java @@ -182,4 +182,4 @@ public class AWSGlueMetastoreCacheDecorator extends AWSGlueMetastoreBaseDecorato return Objects.hash(dbName, tableName); } } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java index ad0353d096..35220726e3 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/AWSGlueMetastoreFactory.java @@ -44,4 +44,4 @@ public class AWSGlueMetastoreFactory { boolean tableCacheEnabled = conf.getBoolean(AWS_GLUE_TABLE_CACHE_ENABLE, false); return (databaseCacheEnabled || tableCacheEnabled); } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java index 7569139251..78fa1bc3fb 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/metastore/DefaultAWSGlueMetastore.java @@ -659,4 +659,4 @@ public class DefaultAWSGlueMetastore implements AWSGlueMetastore { } return columnStatisticsErrors; } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/AWSGlueConfig.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/AWSGlueConfig.java index 16e99ca40e..eeef8ab84d 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/AWSGlueConfig.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/AWSGlueConfig.java @@ -61,4 +61,4 @@ public final class AWSGlueConfig { public static final String AWS_GLUE_ACCESS_KEY = "aws.glue.access-key"; public static final String AWS_GLUE_SECRET_KEY = "aws.glue.secret-key"; public static final String AWS_GLUE_SESSION_TOKEN = "aws.glue.session-token"; -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/BatchCreatePartitionsHelper.java b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/BatchCreatePartitionsHelper.java index ba138babf1..f5dd987234 100644 --- a/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/BatchCreatePartitionsHelper.java +++ b/fe/fe-core/src/main/java/com/amazonaws/glue/catalog/util/BatchCreatePartitionsHelper.java @@ -150,4 +150,4 @@ public final class BatchCreatePartitionsHelper { return partitionsFailed; } -} \ No newline at end of file +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java index 6425230b14..c5e7f33f99 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java @@ -445,8 +445,10 @@ public class MaterializedViewHandler extends AlterHandler { mvJob.addMVIndex(partitionId, mvIndex); - LOG.debug("create materialized view index {} based on index {} in partition {}", - mvIndexId, baseIndexId, partitionId); + if (LOG.isDebugEnabled()) { + LOG.debug("create materialized view index {} based on index {} in partition {}", + mvIndexId, baseIndexId, partitionId); + } } // end for partitions LOG.info("finished to create materialized view job: {}", mvJob.getJobId()); @@ -607,7 +609,9 @@ public class MaterializedViewHandler extends AlterHandler { column.setUniqueId(Column.COLUMN_UNIQUE_ID_INIT_VALUE); }); } - LOG.debug("lightSchemaChange:{}, newMVColumns:{}", olapTable.getEnableLightSchemaChange(), newMVColumns); + if (LOG.isDebugEnabled()) { + LOG.debug("lightSchemaChange:{}, newMVColumns:{}", olapTable.getEnableLightSchemaChange(), newMVColumns); + } return newMVColumns; } @@ -851,8 +855,11 @@ public class MaterializedViewHandler extends AlterHandler { column.setUniqueId(Column.COLUMN_UNIQUE_ID_INIT_VALUE); }); } - LOG.debug("lightSchemaChange:{}, rollupSchema:{}, baseSchema:{}", - olapTable.getEnableLightSchemaChange(), rollupSchema, olapTable.getSchemaByIndexId(baseIndexId, true)); + if (LOG.isDebugEnabled()) { + LOG.debug("lightSchemaChange:{}, rollupSchema:{}, baseSchema:{}", + olapTable.getEnableLightSchemaChange(), + rollupSchema, olapTable.getSchemaByIndexId(baseIndexId, true)); + } return rollupSchema; } @@ -1125,9 +1132,11 @@ public class MaterializedViewHandler extends AlterHandler { tableRunningJobSet.add(jobId); shouldJobRun = true; } else { - LOG.debug("number of running alter job {} in table {} exceed limit {}. job {} is suspended", - tableRunningJobSet.size(), rollupJobV2.getTableId(), - Config.max_running_rollup_job_num_per_table, rollupJobV2.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("number of running alter job {} in table {} exceed limit {}. job {} is suspended", + tableRunningJobSet.size(), rollupJobV2.getTableId(), + Config.max_running_rollup_job_num_per_table, rollupJobV2.getJobId()); + } shouldJobRun = false; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java index 4ad9e0fcb7..f60a47632a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java @@ -609,8 +609,10 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable { tbl.getIndexMetaByIndexId(rollupIndexId).setMaxColUniqueId(maxColUniqueId); - LOG.debug("rollupIndexId:{}, maxColUniqueId:{}, indexIdToSchema:{}", rollupIndexId, maxColUniqueId, - tbl.getIndexIdToSchema(true)); + if (LOG.isDebugEnabled()) { + LOG.debug("rollupIndexId:{}, maxColUniqueId:{}, indexIdToSchema:{}", rollupIndexId, maxColUniqueId, + tbl.getIndexIdToSchema(true)); + } tbl.rebuildFullSchema(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index 81e8984780..19b2f132d2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -967,7 +967,9 @@ public class SchemaChangeHandler extends AlterHandler { //type key column do not allow light schema change. if (newColumn.isKey()) { - LOG.debug("newColumn: {}, isKey()==true", newColumn); + if (LOG.isDebugEnabled()) { + LOG.debug("newColumn: {}, isKey()==true", newColumn); + } lightSchemaChange = false; } @@ -1179,7 +1181,9 @@ public class SchemaChangeHandler extends AlterHandler { // value modIndexSchema.add(toAddColumn); } - LOG.debug("newColumn setUniqueId({}), modIndexSchema:{}", newColumnUniqueId, modIndexSchema); + if (LOG.isDebugEnabled()) { + LOG.debug("newColumn setUniqueId({}), modIndexSchema:{}", newColumnUniqueId, modIndexSchema); + } } private void checkIndexExists(OlapTable olapTable, String targetIndexName) throws DdlException { @@ -1376,11 +1380,15 @@ public class SchemaChangeHandler extends AlterHandler { } if (!needAlter) { - LOG.debug("index[{}] is not changed. ignore", alterIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("index[{}] is not changed. ignore", alterIndexId); + } continue; } - LOG.debug("index[{}] is changed. start checking...", alterIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("index[{}] is changed. start checking...", alterIndexId); + } // 1. check order: a) has key; b) value after key boolean meetValue = false; boolean hasKey = false; @@ -1473,13 +1481,17 @@ public class SchemaChangeHandler extends AlterHandler { // 5. calc short key short newShortKeyColumnCount = Env.calcShortKeyColumnCount(alterSchema, indexIdToProperties.get(alterIndexId), !olapTable.isDuplicateWithoutKey()); - LOG.debug("alter index[{}] short key column count: {}", alterIndexId, newShortKeyColumnCount); + if (LOG.isDebugEnabled()) { + LOG.debug("alter index[{}] short key column count: {}", alterIndexId, newShortKeyColumnCount); + } indexIdToShortKeyColumnCount.put(alterIndexId, newShortKeyColumnCount); // 6. store the changed columns for edit log changedIndexIdToSchema.put(alterIndexId, alterSchema); - LOG.debug("schema change[{}-{}-{}] check pass.", dbId, tableId, alterIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("schema change[{}-{}-{}] check pass.", dbId, tableId, alterIndexId); + } } // end for indices if (changedIndexIdToSchema.isEmpty() && !hasIndexChange) { @@ -1802,14 +1814,18 @@ public class SchemaChangeHandler extends AlterHandler { @Override public int getAsInt() { pendingMaxColUniqueId++; - LOG.debug("index id:{}, pendingMaxColUniqueId:{}", indexId, pendingMaxColUniqueId); + if (LOG.isDebugEnabled()) { + LOG.debug("index id:{}, pendingMaxColUniqueId:{}", indexId, pendingMaxColUniqueId); + } return pendingMaxColUniqueId; } }; } colUniqueIdSupplierMap.put(entry.getKey(), colUniqueIdSupplier); } - LOG.debug("in process indexSchemaMap:{}", indexSchemaMap); + if (LOG.isDebugEnabled()) { + LOG.debug("in process indexSchemaMap:{}", indexSchemaMap); + } List newIndexes = olapTable.getCopiedIndexes(); List alterIndexes = new ArrayList<>(); @@ -2009,10 +2025,12 @@ public class SchemaChangeHandler extends AlterHandler { } } // end for alter clauses - LOG.debug("table: {}({}), lightSchemaChange: {}, lightIndexChange: {}," - + " buildIndexChange: {}, indexSchemaMap:{}", - olapTable.getName(), olapTable.getId(), lightSchemaChange, - lightIndexChange, buildIndexChange, indexSchemaMap); + if (LOG.isDebugEnabled()) { + LOG.debug("table: {}({}), lightSchemaChange: {}, lightIndexChange: {}," + + " buildIndexChange: {}, indexSchemaMap:{}", + olapTable.getName(), olapTable.getId(), lightSchemaChange, + lightIndexChange, buildIndexChange, indexSchemaMap); + } if (lightSchemaChange) { long jobId = Env.getCurrentEnv().getNextId(); @@ -2448,13 +2466,17 @@ public class SchemaChangeHandler extends AlterHandler { continue; } jobList.add(job); - LOG.debug("add build index job {} on table {} for specific id", jobId, tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("add build index job {} on table {} for specific id", jobId, tableName); + } } } else { for (IndexChangeJob job : indexChangeJobs.values()) { if (!job.isDone() && job.getTableId() == olapTable.getId()) { jobList.add(job); - LOG.debug("add build index job {} on table {} for all", job.getJobId(), tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("add build index job {} on table {} for all", job.getJobId(), tableName); + } } } } @@ -2466,7 +2488,9 @@ public class SchemaChangeHandler extends AlterHandler { if (jobList.size() > 0) { for (IndexChangeJob job : jobList) { long jobId = job.getJobId(); - LOG.debug("cancel build index job {} on table {}", jobId, tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("cancel build index job {} on table {}", jobId, tableName); + } if (!job.cancel("user cancelled")) { LOG.warn("cancel build index job {} on table {} failed", jobId, tableName); throw new DdlException("Job can not be cancelled. State: " + job.getJobState()); @@ -2630,7 +2654,9 @@ public class SchemaChangeHandler extends AlterHandler { long jobId, boolean isReplay) throws DdlException { - LOG.debug("indexSchemaMap:{}, indexes:{}", indexSchemaMap, indexes); + if (LOG.isDebugEnabled()) { + LOG.debug("indexSchemaMap:{}, indexes:{}", indexSchemaMap, indexes); + } // for bitmapIndex boolean hasIndexChange = false; Set newSet = new HashSet<>(indexes); @@ -2684,7 +2710,9 @@ public class SchemaChangeHandler extends AlterHandler { if (!isReplay) { TableAddOrDropInvertedIndicesInfo info = new TableAddOrDropInvertedIndicesInfo(rawSql, db.getId(), olapTable.getId(), indexSchemaMap, indexes, alterIndexes, isDropIndex, jobId); - LOG.debug("logModifyTableAddOrDropInvertedIndices info:{}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("logModifyTableAddOrDropInvertedIndices info:{}", info); + } Env.getCurrentEnv().getEditLog().logModifyTableAddOrDropInvertedIndices(info); if (isDropIndex) { @@ -2708,7 +2736,9 @@ public class SchemaChangeHandler extends AlterHandler { if (!isReplay) { TableAddOrDropColumnsInfo info = new TableAddOrDropColumnsInfo(rawSql, db.getId(), olapTable.getId(), indexSchemaMap, indexes, jobId); - LOG.debug("logModifyTableAddOrDropColumns info:{}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("logModifyTableAddOrDropColumns info:{}", info); + } Env.getCurrentEnv().getEditLog().logModifyTableAddOrDropColumns(info); } LOG.info("finished modify table's add or drop or modify columns. table: {}, job: {}, is replay: {}", @@ -2717,7 +2747,9 @@ public class SchemaChangeHandler extends AlterHandler { } public void replayModifyTableLightSchemaChange(TableAddOrDropColumnsInfo info) throws MetaNotFoundException { - LOG.debug("info:{}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("info:{}", info); + } long dbId = info.getDbId(); long tableId = info.getTableId(); Map> indexSchemaMap = info.getIndexSchemaMap(); @@ -2745,7 +2777,9 @@ public class SchemaChangeHandler extends AlterHandler { // Must get all columns including invisible columns. // Because in alter process, all columns must be considered. List alterSchema = indexSchemaMap.get(alterIndexId); - LOG.debug("index[{}] is changed. start checking...", alterIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("index[{}] is changed. start checking...", alterIndexId); + } // 1. check order: a) has key; b) value after key boolean meetValue = false; boolean hasKey = false; @@ -2811,7 +2845,9 @@ public class SchemaChangeHandler extends AlterHandler { // 5. store the changed columns for edit log changedIndexIdToSchema.put(alterIndexId, alterSchema); - LOG.debug("schema change[{}-{}-{}] check pass.", db.getId(), olapTable.getId(), alterIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("schema change[{}-{}-{}] check pass.", db.getId(), olapTable.getId(), alterIndexId); + } } // end for indices return changedIndexIdToSchema; } @@ -2847,7 +2883,9 @@ public class SchemaChangeHandler extends AlterHandler { public void replayModifyTableAddOrDropInvertedIndices(TableAddOrDropInvertedIndicesInfo info) throws MetaNotFoundException { - LOG.debug("info:{}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("info:{}", info); + } long dbId = info.getDbId(); long tableId = info.getTableId(); Map> indexSchemaMap = info.getIndexSchemaMap(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java index 08e2797341..200532c6d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java @@ -678,8 +678,10 @@ public class SchemaChangeJobV2 extends AlterJobV2 { } } tbl.getIndexMetaByIndexId(shadowIdxId).setMaxColUniqueId(maxColUniqueId); - LOG.debug("originIdxId:{}, shadowIdxId:{}, maxColUniqueId:{}, indexSchema:{}", - originIdxId, shadowIdxId, maxColUniqueId, indexSchemaMap.get(shadowIdxId)); + if (LOG.isDebugEnabled()) { + LOG.debug("originIdxId:{}, shadowIdxId:{}, maxColUniqueId:{}, indexSchema:{}", + originIdxId, shadowIdxId, maxColUniqueId, indexSchemaMap.get(shadowIdxId)); + } tbl.deleteIndexInfo(originIdxName); // the shadow index name is '__doris_shadow_xxx', rename it to origin name 'xxx' diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupTableRefClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupTableRefClause.java index 8b53b6d196..39e5c3dfe8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupTableRefClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupTableRefClause.java @@ -66,7 +66,9 @@ public class AbstractBackupTableRefClause implements ParseNode { tableRefList.add(tableRef); } - LOG.debug("table refs after normalization: {}", Joiner.on(",").join(tableRefList)); + if (LOG.isDebugEnabled()) { + LOG.debug("table refs after normalization: {}", Joiner.on(",").join(tableRefList)); + } } public boolean isExclude() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java index 0b4317f871..1393c3f410 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java @@ -1037,7 +1037,9 @@ public class Analyzer { newTblName == null ? d.getTable().getName() : newTblName.toString()); } - LOG.debug("register column ref table {}, colName {}, col {}", tblName, colName, col.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("register column ref table {}, colName {}, col {}", tblName, colName, col.toSql()); + } if (col.getType().isVariantType() || (subColNames != null && !subColNames.isEmpty())) { if (getContext() != null && !getContext().getSessionVariable().enableVariantAccessInOriginalPlanner && (subColNames != null && !subColNames.isEmpty())) { @@ -1075,7 +1077,9 @@ public class Analyzer { return result; } result = globalState.descTbl.addSlotDescriptor(d); - LOG.debug("register slot descriptor {}", result); + if (LOG.isDebugEnabled()) { + LOG.debug("register slot descriptor {}", result); + } result.setSubColLables(subColNames); result.setColumn(col); if (!subColNames.isEmpty()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java index 5b4b5a9109..4cc328309d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArrayLiteral.java @@ -255,4 +255,3 @@ public class ArrayLiteral extends LiteralExpr { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java index 1ce647a473..f9bd72e3f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java @@ -315,7 +315,9 @@ public class BinaryPredicate extends Predicate implements Writable { Preconditions.checkState(match.getReturnType().getPrimitiveType() == PrimitiveType.BOOLEAN); //todo(dhc): should add oppCode //this.vectorOpcode = match.opcode; - LOG.debug(debugString() + " opcode: " + vectorOpcode); + if (LOG.isDebugEnabled()) { + LOG.debug(debugString() + " opcode: " + vectorOpcode); + } } private boolean canCompareDate(PrimitiveType t1, PrimitiveType t2) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java index 2a96609c27..f069c1cd69 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java @@ -588,4 +588,3 @@ public class CastExpr extends Expr { return this.notFold; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java index 9c99b73965..75cc514aa8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java @@ -1212,4 +1212,3 @@ public class DataDescription implements InsertStmt.DataDesc { return toSql(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java index 6fe7fe7578..b0c4ecb252 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java @@ -344,7 +344,9 @@ public class DescribeStmt extends ShowStmt { getDb(), getTableName(), Sets.newHashSet(row.get(0)), PrivPredicate.SHOW); res.add(row); } catch (UserException e) { - LOG.debug(e.getMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug(e.getMessage()); + } } } return res; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java index f7ef598b75..bcc6030b7e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java @@ -362,4 +362,3 @@ public final class ExprSubstitutionMap { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java index 0ba9122373..7296516907 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java @@ -131,7 +131,9 @@ public enum ExpressionFunctions { if (ConnectContext.get() != null) { ConnectContext.get().getState().reset(); } - LOG.debug("failed to invoke", e); + if (LOG.isDebugEnabled()) { + LOG.debug("failed to invoke", e); + } return constExpr; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java index 178b0d0e14..86e93c4206 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java @@ -102,7 +102,9 @@ public class InlineViewRef extends TableRef { public InlineViewRef(String alias, QueryStmt queryStmt, List colLabels) { this(alias, queryStmt); explicitColLabels = Lists.newArrayList(colLabels); - LOG.debug("inline view explicitColLabels {}", explicitColLabels); + if (LOG.isDebugEnabled()) { + LOG.debug("inline view explicitColLabels {}", explicitColLabels); + } } /** @@ -245,10 +247,14 @@ public class InlineViewRef extends TableRef { // TODO: relax this a bit by allowing propagation out of the inline view (but // not into it) List slots = analyzer.changeSlotToNullableOfOuterJoinedTuples(); - LOG.debug("inline view query {}", queryStmt.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("inline view query {}", queryStmt.toSql()); + } for (int i = 0; i < getColLabels().size(); ++i) { String colName = getColLabels().get(i); - LOG.debug("inline view register {}", colName); + if (LOG.isDebugEnabled()) { + LOG.debug("inline view register {}", colName); + } SlotDescriptor slotDesc = analyzer.registerColumnRef(getAliasAsName(), colName, getSubColPath().get(i)); Expr colExpr = queryStmt.getResultExprs().get(i); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LambdaFunctionCallExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LambdaFunctionCallExpr.java index e1a5cc6cf7..ebf3d1307a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LambdaFunctionCallExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LambdaFunctionCallExpr.java @@ -247,7 +247,9 @@ public class LambdaFunctionCallExpr extends FunctionCallExpr { LOG.warn("fn {} not exists", this.toSqlImpl()); throw new AnalysisException(getFunctionNotFoundError(collectChildReturnTypes())); } - LOG.debug("fn string: " + fn.signatureString() + ". return type: " + fn.getReturnType()); + if (LOG.isDebugEnabled()) { + LOG.debug("fn string: " + fn.signatureString() + ". return type: " + fn.getReturnType()); + } this.type = fn.getReturnType(); if (this.type.isArrayType() && ((ArrayType) this.type).getItemType().isDecimalV3() && getChild(0).getType().isArrayType() diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java index e2fba76b9b..4401347996 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java @@ -799,13 +799,19 @@ public class NativeInsertStmt extends InsertStmt { if (LOG.isDebugEnabled()) { for (Expr expr : queryStmt.getResultExprs()) { - LOG.debug("final result expr: {}, {}", expr, System.identityHashCode(expr)); + if (LOG.isDebugEnabled()) { + LOG.debug("final result expr: {}, {}", expr, System.identityHashCode(expr)); + } } for (Expr expr : queryStmt.getBaseTblResultExprs()) { - LOG.debug("final base table result expr: {}, {}", expr, System.identityHashCode(expr)); + if (LOG.isDebugEnabled()) { + LOG.debug("final base table result expr: {}, {}", expr, System.identityHashCode(expr)); + } } for (String colLabel : queryStmt.getColLabels()) { - LOG.debug("final col label: {}", colLabel); + if (LOG.isDebugEnabled()) { + LOG.debug("final col label: {}", colLabel); + } } } } @@ -1225,7 +1231,9 @@ public class NativeInsertStmt extends InsertStmt { olapTable.readLock(); try { if (groupCommitPlanner != null && olapTable.getBaseSchemaVersion() == baseSchemaVersion) { - LOG.debug("reuse group commit plan, table={}", olapTable); + if (LOG.isDebugEnabled()) { + LOG.debug("reuse group commit plan, table={}", olapTable); + } reuseGroupCommitPlan = true; return groupCommitPlanner; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java index 945a8b0745..cc2ee8ef10 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java @@ -588,7 +588,9 @@ public class OutFileClause { } if (processedPropKeys.size() != properties.size()) { - LOG.debug("{} vs {}", processedPropKeys, properties); + if (LOG.isDebugEnabled()) { + LOG.debug("{} vs {}", processedPropKeys, properties); + } throw new AnalysisException("Unknown properties: " + properties.keySet().stream() .filter(k -> !processedPropKeys.contains(k)).collect(Collectors.toList())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PrepareStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PrepareStmt.java index f9bb9e5e05..1545ea67ef 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PrepareStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PrepareStmt.java @@ -218,7 +218,9 @@ public class PrepareStmt extends StatementBase { inner.getPlaceHolders().get(i).setLiteral(values.get(i)); } if (!values.isEmpty()) { - LOG.debug("assign values {}", values.get(0).toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("assign values {}", values.get(0).toSql()); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java index c1d879fd06..3aded3a5d1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java @@ -355,7 +355,9 @@ public abstract class QueryStmt extends StatementBase implements Queriable { strBuilder.append("or an insert/ctas statement has no effect on the query result "); strBuilder.append("unless a LIMIT and/or OFFSET is used in conjunction "); strBuilder.append("with the ORDER BY."); - LOG.debug(strBuilder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug(strBuilder.toString()); + } } } else { evaluateOrderBy = true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java index bd7da27a67..0b8e607052 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java @@ -726,7 +726,9 @@ public class SelectStmt extends QueryStmt { // be prevent from reading from ScanNode.Those columns will be finally // read by the second fetch phase isTwoPhaseOptEnabled = true; - LOG.debug("two phase read optimize enabled"); + if (LOG.isDebugEnabled()) { + LOG.debug("two phase read optimize enabled"); + } // Expr.analyze(resultExprs, analyzer); Set resultSlots = Sets.newHashSet(); Set orderingSlots = Sets.newHashSet(); @@ -752,9 +754,11 @@ public class SelectStmt extends QueryStmt { slot.setNeedMaterialize(false); } - LOG.debug("resultsSlots {}", resultSlots); - LOG.debug("orderingSlots {}", orderingSlots); - LOG.debug("conjuntSlots {}", conjuntSlots); + if (LOG.isDebugEnabled()) { + LOG.debug("resultsSlots {}", resultSlots); + LOG.debug("orderingSlots {}", orderingSlots); + LOG.debug("conjuntSlots {}", conjuntSlots); + } } if (evaluateOrderBy) { createSortTupleInfo(analyzer); @@ -828,12 +832,16 @@ public class SelectStmt extends QueryStmt { if (tbl.getTable().getType() != Table.TableType.OLAP) { return false; } - LOG.debug("table ref {}", tbl); + if (LOG.isDebugEnabled()) { + LOG.debug("table ref {}", tbl); + } // Need enable light schema change, since opt rely on // column_unique_id of each slot OlapTable olapTable = (OlapTable) tbl.getTable(); if (!olapTable.isDupKeysOrMergeOnWrite()) { - LOG.debug("only support duplicate key or MOW model"); + if (LOG.isDebugEnabled()) { + LOG.debug("only support duplicate key or MOW model"); + } return false; } if (!olapTable.getEnableLightSchemaChange()) { @@ -853,8 +861,10 @@ public class SelectStmt extends QueryStmt { } // Check order by exprs are all slot refs // Rethink? implement more generic to support all exprs - LOG.debug("getOrderingExprs {}", sortInfo.getOrderingExprs()); - LOG.debug("getOrderByElements {}", getOrderByElements()); + if (LOG.isDebugEnabled()) { + LOG.debug("getOrderingExprs {}", sortInfo.getOrderingExprs()); + LOG.debug("getOrderByElements {}", getOrderByElements()); + } for (Expr sortExpr : sortInfo.getOrderingExprs()) { if (!(sortExpr instanceof SlotRef)) { return false; @@ -1079,7 +1089,9 @@ public class SelectStmt extends QueryStmt { long rowCount = 0; if (tblRef.getTable().getType() == TableType.OLAP) { rowCount = ((OlapTable) (tblRef.getTable())).getRowCount(); - LOG.debug("tableName={} rowCount={}", tblRef.getAlias(), rowCount); + if (LOG.isDebugEnabled()) { + LOG.debug("tableName={} rowCount={}", tblRef.getAlias(), rowCount); + } } candidates.add(Pair.of(tblRef, rowCount)); } @@ -2804,7 +2816,9 @@ public class SelectStmt extends QueryStmt { OlapTable olapTable = (OlapTable) tbl.getTable(); Preconditions.checkNotNull(eqPredicates); eqPredicates = getExpectedBinaryPredicates(eqPredicates, whereClause, TExprOpcode.EQ); - LOG.debug("predicates {}", eqPredicates); + if (LOG.isDebugEnabled()) { + LOG.debug("predicates {}", eqPredicates); + } if (eqPredicates == null) { return false; } @@ -2864,7 +2878,9 @@ public class SelectStmt extends QueryStmt { if (binaryPredicate.getOpcode() != expected) { return null; } - LOG.debug("binary pred {}", expr); + if (LOG.isDebugEnabled()) { + LOG.debug("binary pred {}", expr); + } Pair p = binaryPredicate.extract(); if (p == null || result.containsKey(p.first)) { return null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java index 62094733d4..466d958ab1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java @@ -204,7 +204,9 @@ public class ShowAlterStmt extends ShowStmt { throw new UserException("SHOW " + type.name() + " does not implement yet"); } - LOG.debug("process SHOW PROC '{}';", sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("process SHOW PROC '{}';", sb.toString()); + } // create show proc stmt // '/jobs/db_name/rollup|schema_change/ node = ProcService.getInstance().open(sb.toString()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBuildIndexStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBuildIndexStmt.java index d804b9b9f8..5b02e306aa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBuildIndexStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBuildIndexStmt.java @@ -124,7 +124,9 @@ public class ShowBuildIndexStmt extends ShowStmt { sb.append(db.getId()); sb.append("/build_index"); - LOG.debug("process SHOW PROC '{}';", sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("process SHOW PROC '{}';", sb.toString()); + } // create show proc stmt // '/jobs/db_name/build_index/ node = ProcService.getInstance().open(sb.toString()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowColumnStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowColumnStmt.java index dfa5c3f2cb..eb7fcaf028 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowColumnStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowColumnStmt.java @@ -173,4 +173,3 @@ public class ShowColumnStmt extends ShowStmt { return metaData; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDbStmt.java index f88040f4ba..f02ffd35be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDbStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowDbStmt.java @@ -111,4 +111,3 @@ public class ShowDbStmt extends ShowStmt { return META_DATA; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java index 88b960ce97..b3babe1e36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java @@ -156,4 +156,3 @@ public class ShowLoadProfileStmt extends ShowStmt { return RedirectStatus.FORWARD_NO_SYNC; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java index 7477d1673b..f27840faa8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java @@ -160,4 +160,3 @@ public class ShowQueryProfileStmt extends ShowStmt { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowVariablesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowVariablesStmt.java index 68d04faefc..efe36311ca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowVariablesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowVariablesStmt.java @@ -106,7 +106,9 @@ public class ShowVariablesStmt extends ShowStmt { selectStmt = new SelectStmt(selectList, new FromClause(Lists.newArrayList(new TableRef(tableName, null))), where, null, null, null, LimitElement.NO_LIMIT); - LOG.debug("select stmt is {}", selectStmt.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("select stmt is {}", selectStmt.toSql()); + } // DB: type // table: thread id diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java index 6bc544a11f..4615fcbd82 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotDescriptor.java @@ -311,7 +311,9 @@ public class SlotDescriptor { tSlotDescriptor.setNeedMaterialize(needMaterialize); tSlotDescriptor.setIsAutoIncrement(isAutoInc); if (column != null) { - LOG.debug("column name:{}, column unique id:{}", column.getNonShadowName(), column.getUniqueId()); + if (LOG.isDebugEnabled()) { + LOG.debug("column name:{}, column unique id:{}", column.getNonShadowName(), column.getUniqueId()); + } tSlotDescriptor.setColUniqueId(column.getUniqueId()); tSlotDescriptor.setPrimitiveType(column.getDataType().toThrift()); tSlotDescriptor.setIsKey(column.isKey()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java index 78a39fadc5..29087b4bb8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java @@ -215,7 +215,9 @@ public class SlotRef extends Expr { @Override public void computeOutputColumn(Analyzer analyzer) { outputColumn = desc.getSlotOffset(); - LOG.debug("SlotRef: " + debugString() + " outputColumn: " + outputColumn); + if (LOG.isDebugEnabled()) { + LOG.debug("SlotRef: " + debugString() + " outputColumn: " + outputColumn); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java index a4d5962bc4..d1e3129244 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java @@ -282,7 +282,9 @@ public class SortInfo { // Update the tuple descriptor used to materialize the input of the sort. setMaterializedTupleInfo(sortTupleDesc, sortTupleExprs); - LOG.debug("sortTupleDesc {}", sortTupleDesc); + if (LOG.isDebugEnabled()) { + LOG.debug("sortTupleDesc {}", sortTupleDesc); + } return substOrderBy; } @@ -311,7 +313,9 @@ public class SortInfo { materializedDesc.initFromExpr(origOrderingExpr); materializedDesc.setIsMaterialized(true); SlotRef origSlotRef = origOrderingExpr.getSrcSlotRef(); - LOG.debug("origOrderingExpr {}", origOrderingExpr); + if (LOG.isDebugEnabled()) { + LOG.debug("origOrderingExpr {}", origOrderingExpr); + } if (origSlotRef != null) { // need do this for two phase read of topn query optimization // check https://github.com/apache/doris/pull/15642 for detail diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java index 1ac31c8fc6..0c6c71b7c2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java @@ -224,7 +224,9 @@ public class StmtRewriter { } catch (UserException e) { throw new AnalysisException(e.getMessage()); } - LOG.debug("Outer query is changed to {}", inlineViewRef.tableRefToSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("Outer query is changed to {}", inlineViewRef.tableRefToSql()); + } /* * Columns which belong to outer query can substitute for output columns of inline view @@ -255,7 +257,9 @@ public class StmtRewriter { } havingClause.reset(); Expr newWherePredicate = havingClause.substitute(smap, analyzer, false); - LOG.debug("Having predicate is changed to " + newWherePredicate.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("Having predicate is changed to " + newWherePredicate.toSql()); + } ArrayList newOrderByElements = null; if (orderByElements != null) { newOrderByElements = Lists.newArrayList(); @@ -263,7 +267,9 @@ public class StmtRewriter { OrderByElement newOrderByElement = new OrderByElement(orderByElement.getExpr().reset().substitute(smap), orderByElement.getIsAsc(), orderByElement.getNullsFirstParam()); newOrderByElements.add(newOrderByElement); - LOG.debug("Order by element is changed to " + newOrderByElement.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("Order by element is changed to " + newOrderByElement.toSql()); + } } } List newSelectItems = Lists.newArrayList(); @@ -271,7 +277,9 @@ public class StmtRewriter { SelectListItem newItem = new SelectListItem(selectList.getItems().get(i).getExpr().reset().substitute(smap), columnLabels.get(i)); newSelectItems.add(newItem); - LOG.debug("New select item is changed to " + newItem.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("New select item is changed to " + newItem.toSql()); + } } SelectList newSelectList = new SelectList(newSelectItems, selectList.isDistinct()); @@ -291,7 +299,9 @@ public class StmtRewriter { // equal where subquery result = rewriteSelectStatement(result, analyzer); - LOG.debug("The final stmt is " + result.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("The final stmt is " + result.toSql()); + } return result; } @@ -634,7 +644,9 @@ public class StmtRewriter { private static boolean mergeExpr(SelectStmt stmt, Expr expr, Analyzer analyzer, TupleDescriptor markTuple) throws AnalysisException { // LOG.warn("dhc mergeExpr stmt={} expr={}", stmt, expr); - LOG.debug("SUBQUERY mergeExpr stmt={} expr={}", stmt.toSql(), expr.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("SUBQUERY mergeExpr stmt={} expr={}", stmt.toSql(), expr.toSql()); + } Preconditions.checkNotNull(expr); Preconditions.checkNotNull(analyzer); Preconditions.checkState(expr.getSubquery().getAnalyzer() != null, diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java index c318e067a0..f9900b8f60 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java @@ -205,9 +205,11 @@ public class Subquery extends Expr { @Override public Subquery clone() { Subquery ret = new Subquery(this); - LOG.debug("SUBQUERY clone old={} new={}", - System.identityHashCode(this), - System.identityHashCode(ret)); + if (LOG.isDebugEnabled()) { + LOG.debug("SUBQUERY clone old={} new={}", + System.identityHashCode(this), + System.identityHashCode(ret)); + } return ret; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TimestampArithmeticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TimestampArithmeticExpr.java index 01ffef09f2..2a44ddebca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TimestampArithmeticExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TimestampArithmeticExpr.java @@ -289,7 +289,9 @@ public class TimestampArithmeticExpr extends Expr { } } } - LOG.debug("fn is {} name is {}", fn, funcOpName); + if (LOG.isDebugEnabled()) { + LOG.debug("fn is {} name is {}", fn, funcOpName); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index bed797b916..adbdc6d0cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -190,8 +190,10 @@ public class BackupJob extends AbstractJob { taskProgress.remove(task.getTabletId()); Long oldValue = unfinishedTaskIds.remove(task.getTabletId()); taskErrMsg.remove(task.getTabletId()); - LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", - info, unfinishedTaskIds.size(), (oldValue != null), this); + if (LOG.isDebugEnabled()) { + LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", + info, unfinishedTaskIds.size(), (oldValue != null), this); + } return oldValue != null; } @@ -247,8 +249,10 @@ public class BackupJob extends AbstractJob { taskProgress.remove(task.getSignature()); Long oldValue = unfinishedTaskIds.remove(task.getSignature()); taskErrMsg.remove(task.getSignature()); - LOG.debug("get finished upload snapshot task, unfinished tasks num: {}, remove result: {}. {}", - unfinishedTaskIds.size(), (oldValue != null), this); + if (LOG.isDebugEnabled()) { + LOG.debug("get finished upload snapshot task, unfinished tasks num: {}, remove result: {}. {}", + unfinishedTaskIds.size(), (oldValue != null), this); + } return oldValue != null; } @@ -319,7 +323,9 @@ public class BackupJob extends AbstractJob { } } - LOG.debug("run backup job: {}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("run backup job: {}", this); + } // run job base on current state switch (state) { @@ -698,7 +704,9 @@ public class BackupJob extends AbstractJob { return; } - LOG.debug("waiting {} tablets to upload snapshot. {}", unfinishedTaskIds.size(), this); + if (LOG.isDebugEnabled()) { + LOG.debug("waiting {} tablets to upload snapshot. {}", unfinishedTaskIds.size(), this); + } } private void saveMetaInfo() { @@ -748,7 +756,9 @@ public class BackupJob extends AbstractJob { } jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, getContent(), backupMeta, snapshotInfos, tableCommitSeqMap); - LOG.debug("job info: {}. {}", jobInfo, this); + if (LOG.isDebugEnabled()) { + LOG.debug("job info: {}. {}", jobInfo, this); + } File jobInfoFile = new File(jobDir, Repository.PREFIX_JOB_INFO + createTimeStr); if (!jobInfoFile.createNewFile()) { status = new Status(ErrCode.COMMON_ERROR, "Failed to create job info file: " + jobInfoFile.toString()); @@ -1054,4 +1064,3 @@ public class BackupJob extends AbstractJob { return sb.toString(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java index a199aca7c3..f154ff72d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java @@ -483,25 +483,33 @@ public class BackupJobInfo implements Writable { // eg: __db_10001/__tbl_10002/__part_10003/__idx_10002/__10004 public String getFilePath(String db, String tbl, String part, String idx, long tabletId) { if (!db.equalsIgnoreCase(dbName)) { - LOG.debug("db name does not equal: {}-{}", dbName, db); + if (LOG.isDebugEnabled()) { + LOG.debug("db name does not equal: {}-{}", dbName, db); + } return null; } BackupOlapTableInfo tblInfo = backupOlapTableObjects.get(tbl); if (tblInfo == null) { - LOG.debug("tbl {} does not exist", tbl); + if (LOG.isDebugEnabled()) { + LOG.debug("tbl {} does not exist", tbl); + } return null; } BackupPartitionInfo partInfo = tblInfo.getPartInfo(part); if (partInfo == null) { - LOG.debug("part {} does not exist", part); + if (LOG.isDebugEnabled()) { + LOG.debug("part {} does not exist", part); + } return null; } BackupIndexInfo idxInfo = partInfo.getIdx(idx); if (idxInfo == null) { - LOG.debug("idx {} does not exist", idx); + if (LOG.isDebugEnabled()) { + LOG.debug("idx {} does not exist", idx); + } return null; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java index 3c5fdd3578..a1ede5b373 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java @@ -424,7 +424,9 @@ public class Repository implements Writable { for (RemoteFile remoteFile : result) { if (remoteFile.isFile()) { - LOG.debug("get snapshot path{} which is not a dir", remoteFile); + if (LOG.isDebugEnabled()) { + LOG.debug("get snapshot path{} which is not a dir", remoteFile); + } continue; } @@ -453,7 +455,9 @@ public class Repository implements Writable { joinPrefix(PREFIX_IDX, info.getIndexId()), joinPrefix(PREFIX_COMMON, info.getTabletId()), joinPrefix(PREFIX_COMMON, info.getSchemaHash())); - LOG.debug("get remote tablet snapshot path: {}", path); + if (LOG.isDebugEnabled()) { + LOG.debug("get remote tablet snapshot path: {}", path); + } return path; } @@ -528,8 +532,10 @@ public class Repository implements Writable { if (fileSystem instanceof BrokerFileSystem) { // this may be a retry, so we should first delete remote file String tmpRemotePath = assembleFileNameWithSuffix(remoteFilePath, SUFFIX_TMP_FILE); - LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", - localFilePath, tmpRemotePath, finalRemotePath); + if (LOG.isDebugEnabled()) { + LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", + localFilePath, tmpRemotePath, finalRemotePath); + } st = fileSystem.delete(tmpRemotePath); if (!st.ok()) { return st; @@ -552,7 +558,9 @@ public class Repository implements Writable { return st; } } else if (fileSystem instanceof S3FileSystem) { - LOG.debug("get md5sum of file: {}. final remote path: {}", localFilePath, finalRemotePath); + if (LOG.isDebugEnabled()) { + LOG.debug("get md5sum of file: {}. final remote path: {}", localFilePath, finalRemotePath); + } st = fileSystem.delete(finalRemotePath); if (!st.ok()) { return st; @@ -564,7 +572,9 @@ public class Repository implements Writable { return st; } } else if (fileSystem instanceof DFSFileSystem) { - LOG.debug("hdfs get md5sum of file: {}. final remote path: {}", localFilePath, finalRemotePath); + if (LOG.isDebugEnabled()) { + LOG.debug("hdfs get md5sum of file: {}. final remote path: {}", localFilePath, finalRemotePath); + } st = fileSystem.delete(finalRemotePath); if (!st.ok()) { return st; @@ -599,7 +609,9 @@ public class Repository implements Writable { String remoteFilePathWithChecksum = replaceFileNameWithChecksumFileName(remoteFilePath, remoteFiles.get(0).getName()); - LOG.debug("get download filename with checksum: " + remoteFilePathWithChecksum); + if (LOG.isDebugEnabled()) { + LOG.debug("get download filename with checksum: " + remoteFilePathWithChecksum); + } // 1. get checksum from remote file name Pair pair = decodeFileNameWithChecksum(remoteFilePathWithChecksum); @@ -743,7 +755,9 @@ public class Repository implements Writable { // get all timestamp // path eg: /location/__palo_repository_repo_name/__ss_my_snap/__info_* String infoFilePath = assembleJobInfoFilePath(snapshotName, -1); - LOG.debug("assemble infoFilePath: {}, snapshot: {}", infoFilePath, snapshotName); + if (LOG.isDebugEnabled()) { + LOG.debug("assemble infoFilePath: {}, snapshot: {}", infoFilePath, snapshotName); + } List results = Lists.newArrayList(); Status st = fileSystem.list(infoFilePath + "*", results); if (!st.ok()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index c6c6beb335..ec4ca14395 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -246,8 +246,10 @@ public class RestoreJob extends AbstractJob { if (removedTabletId != null) { taskErrMsg.remove(task.getSignature()); Preconditions.checkState(task.getTabletId() == removedTabletId, removedTabletId); - LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", - info, unfinishedSignatureToId.size(), this, removedTabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", + info, unfinishedSignatureToId.size(), this, removedTabletId); + } return true; } return false; @@ -613,7 +615,9 @@ public class RestoreJob extends AbstractJob { status = st; return; } - LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this); + if (LOG.isDebugEnabled()) { + LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this); + } if (!localOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames) .equals(remoteOlapTbl.getSignature( BackupHandler.SIGNATURE_VERSION, intersectPartNames))) { @@ -726,7 +730,9 @@ public class RestoreJob extends AbstractJob { // DO NOT set remote table's new name here, cause we will still need the origin name later // remoteOlapTbl.setName(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); remoteOlapTbl.setState(allowLoad ? OlapTableState.RESTORE_WITH_LOAD : OlapTableState.RESTORE); - LOG.debug("put remote table {} to restoredTbls", remoteOlapTbl.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("put remote table {} to restoredTbls", remoteOlapTbl.getName()); + } restoredTbls.add(remoteOlapTbl); } } // end of all restore olap tables @@ -773,7 +779,9 @@ public class RestoreJob extends AbstractJob { } } - LOG.debug("finished to prepare restored partitions and tables. {}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to prepare restored partitions and tables. {}", this); + } // for now, nothing is modified in catalog // generate create replica tasks for all restored partitions @@ -807,7 +815,9 @@ public class RestoreJob extends AbstractJob { restoreTbl.setName(jobInfo.getAliasByOriginNameIfSet(restoreTbl.getName())); } - LOG.debug("finished to generate create replica tasks. {}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to generate create replica tasks. {}", this); + } } finally { db.readUnlock(); } @@ -817,7 +827,9 @@ public class RestoreJob extends AbstractJob { if (!status.ok()) { return; } - LOG.debug("finished to restore resources. {}", this.jobId); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to restore resources. {}", this.jobId); + } // Send create replica task to BE outside the db lock boolean ok = false; @@ -845,7 +857,9 @@ public class RestoreJob extends AbstractJob { } if (ok) { - LOG.debug("finished to create all restored replcias. {}", this); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to create all restored replcias. {}", this); + } // add restored partitions. // table should be in State RESTORE, so no other partitions can be // added to or removed from this table during the restore process. @@ -1151,14 +1165,18 @@ public class RestoreJob extends AbstractJob { private void genFileMapping(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { - LOG.debug("get index id: {}, index name: {}", localIdx.getId(), - localTbl.getIndexNameById(localIdx.getId())); + if (LOG.isDebugEnabled()) { + LOG.debug("get index id: {}, index name: {}", localIdx.getId(), + localTbl.getIndexNameById(localIdx.getId())); + } BackupIndexInfo backupIdxInfo = backupPartInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.sortedTabletInfoList.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { Tablet localTablet = localIdx.getTablets().get(i); BackupTabletInfo backupTabletInfo = backupIdxInfo.sortedTabletInfoList.get(i); - LOG.debug("get tablet mapping: {} to {}, index {}", backupTabletInfo.id, localTablet.getId(), i); + if (LOG.isDebugEnabled()) { + LOG.debug("get tablet mapping: {} to {}, index {}", backupTabletInfo.id, localTablet.getId(), i); + } for (Replica localReplica : localTablet.getReplicas()) { IdChain src = new IdChain(remoteTblId, backupPartInfo.id, backupIdxInfo.id, backupTabletInfo.id, -1L /* no replica id */); @@ -1364,8 +1382,10 @@ public class RestoreJob extends AbstractJob { int batchNum = Math.min(totalNum, Config.restore_download_task_num_per_be); // each task contains several upload sub tasks int taskNumPerBatch = Math.max(totalNum / batchNum, 1); - LOG.debug("backend {} has {} batch, total {} tasks, {}", - beId, batchNum, totalNum, this); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} has {} batch, total {} tasks, {}", + beId, batchNum, totalNum, this); + } List brokerAddrs = null; brokerAddrs = Lists.newArrayList(); @@ -1449,7 +1469,9 @@ public class RestoreJob extends AbstractJob { // download to previous exist snapshot dir String dest = snapshotInfo.getTabletPath(); srcToDest.put(src, dest); - LOG.debug("create download src path: {}, dest path: {}", src, dest); + if (LOG.isDebugEnabled()) { + LOG.debug("create download src path: {}, dest path: {}", src, dest); + } } finally { olapTbl.readUnlock(); @@ -1747,8 +1769,10 @@ public class RestoreJob extends AbstractJob { } } - LOG.debug("restore set partition {} version in table {}, version: {}", - partId, tblId, entry.getValue()); + if (LOG.isDebugEnabled()) { + LOG.debug("restore set partition {} version in table {}, version: {}", + partId, tblId, entry.getValue()); + } } } finally { tbl.writeUnlock(); @@ -2183,4 +2207,3 @@ public class RestoreJob extends AbstractJob { return sb.toString(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java index 468e5a0818..7011807611 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogGcer.java @@ -52,7 +52,9 @@ public class BinlogGcer extends MasterDaemon { @Override protected void runAfterCatalogReady() { - LOG.debug("start binlog syncer jobs."); + if (LOG.isDebugEnabled()) { + LOG.debug("start binlog syncer jobs."); + } try { List tombstones = Env.getCurrentEnv().getBinlogManager().gc(); if (tombstones != null && !tombstones.isEmpty()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinTableValuedFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinTableValuedFunctions.java index b45847088d..ac1b31fcea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinTableValuedFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinTableValuedFunctions.java @@ -67,4 +67,3 @@ public class BuiltinTableValuedFunctions implements FunctionHelper { // Note: Do not add any code here! private BuiltinTableValuedFunctions() {} } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java index 05ba8fac11..c4475bc3c3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java @@ -578,7 +578,9 @@ public class Database extends MetaObject implements Writable, DatabaseIf StringBuilder sb = new StringBuilder(signatureVersion); sb.append(fullQualifiedName); String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of database {}: {}. signature string: {}", fullQualifiedName, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of database {}: {}. signature string: {}", fullQualifiedName, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java index b49acb2ff8..934e7f75fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java @@ -169,8 +169,10 @@ public class DiskInfo implements Writable { * floodStage threshold means a loosely limit, and we use 'AND' to give a more loosely limit. */ public boolean exceedLimit(boolean floodStage) { - LOG.debug("flood stage: {}, diskAvailableCapacityB: {}, totalCapacityB: {}", - floodStage, diskAvailableCapacityB, totalCapacityB); + if (LOG.isDebugEnabled()) { + LOG.debug("flood stage: {}, diskAvailableCapacityB: {}, totalCapacityB: {}", + floodStage, diskAvailableCapacityB, totalCapacityB); + } if (floodStage) { return diskAvailableCapacityB < Config.storage_flood_stage_left_capacity_bytes && this.getUsedPct() > (Config.storage_flood_stage_usage_percent / 100.0); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java index 7fd3f0890a..f5d39a8e8d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DomainResolver.java @@ -61,12 +61,16 @@ public class DomainResolver extends MasterDaemon { // resolve domain name Map> resolvedIPsMap = Maps.newHashMap(); for (String domain : allDomains) { - LOG.debug("begin to resolve domain: {}", domain); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to resolve domain: {}", domain); + } Set resolvedIPs = Sets.newHashSet(); if (!resolveWithBNS(domain, resolvedIPs) && !resolveWithDNS(domain, resolvedIPs)) { continue; } - LOG.debug("get resolved ip of domain {}: {}", domain, resolvedIPs); + if (LOG.isDebugEnabled()) { + LOG.debug("get resolved ip of domain {}: {}", domain, resolvedIPs); + } resolvedIPsMap.put(domain, resolvedIPs); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index 793dcb1f76..9a6a6ab8fd 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -1849,7 +1849,9 @@ public class Env { private boolean isMyself() { Preconditions.checkNotNull(selfNode); Preconditions.checkNotNull(helperNodes); - LOG.debug("self: {}. helpers: {}", selfNode, helperNodes); + if (LOG.isDebugEnabled()) { + LOG.debug("self: {}. helpers: {}", selfNode, helperNodes); + } // if helper nodes contain itself, remove other helpers boolean containSelf = false; for (HostInfo helperNode : helperNodes) { @@ -2754,7 +2756,9 @@ public class Env { EditLog.loadJournal(this, logId, entity); long loadJournalEndTime = System.currentTimeMillis(); replayedJournalId.incrementAndGet(); - LOG.debug("journal {} replayed.", replayedJournalId); + if (LOG.isDebugEnabled()) { + LOG.debug("journal {} replayed.", replayedJournalId); + } if (feType != FrontendNodeType.MASTER) { journalObservable.notifyObservers(replayedJournalId.get()); } @@ -4142,7 +4146,9 @@ public class Env { clusterColumns.put(column.getClusterKeyId(), column); } } - LOG.debug("index column size: {}, cluster column size: {}", indexColumns.size(), clusterColumns.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("index column size: {}, cluster column size: {}", indexColumns.size(), clusterColumns.size()); + } if (isKeysRequired && indexColumns.isEmpty()) { throw new DdlException("The materialized view need key column"); } @@ -4716,7 +4722,9 @@ public class Env { } public void replayRenameColumn(TableRenameColumnInfo info) throws MetaNotFoundException { - LOG.debug("info:{}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("info:{}", info); + } long dbId = info.getDbId(); long tableId = info.getTableId(); String colName = info.getColName(); @@ -4818,8 +4826,10 @@ public class Env { newDataProperty, replicaAlloc, isInMemory, partitionInfo.getStoragePolicy(partition.getId()), tblProperties); editLog.logModifyPartition(info); - LOG.debug("modify partition[{}-{}-{}] replica allocation to {}", db.getId(), table.getId(), partition.getName(), - replicaAlloc.toCreateStmt()); + if (LOG.isDebugEnabled()) { + LOG.debug("modify partition[{}-{}-{}] replica allocation to {}", + db.getId(), table.getId(), partition.getName(), replicaAlloc.toCreateStmt()); + } } /** @@ -4840,8 +4850,10 @@ public class Env { new ModifyTablePropertyOperationLog(db.getId(), table.getId(), table.getName(), properties); editLog.logModifyReplicationNum(info); - LOG.debug("modify table[{}] replication num to {}", table.getName(), - properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); + if (LOG.isDebugEnabled()) { + LOG.debug("modify table[{}] replication num to {}", table.getName(), + properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); + } } // The caller need to hold the table write lock @@ -5379,8 +5391,10 @@ public class Env { Replica replica = tablet.getReplicaById(info.getReplicaId()); if (replica != null) { replica.setBad(true); - LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying", info.getReplicaId(), - info.getTabletId(), info.getBackendId()); + if (LOG.isDebugEnabled()) { + LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying", + info.getReplicaId(), info.getTabletId(), info.getBackendId()); + } } } finally { olapTable.writeUnlock(); @@ -5821,7 +5835,9 @@ public class Env { } private static void getTableMeta(OlapTable olapTable, TGetMetaDBMeta dbMeta) { - LOG.debug("get table meta. table: {}", olapTable.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("get table meta. table: {}", olapTable.getName()); + } TGetMetaTableMeta tableMeta = new TGetMetaTableMeta(); olapTable.readLock(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java index be517c7114..0926554f02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java @@ -255,7 +255,9 @@ public class EsTable extends Table { } } String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of es table {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of es table {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java index c95f04a5f7..c06e0dcd50 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java @@ -349,8 +349,10 @@ public class FunctionSet { if (LOG.isDebugEnabled()) { LOG.debug("templateFunction signature: {}, return type: {}", templateFunction.signatureString(), templateFunction.getReturnType()); - LOG.debug("requestFunction signature: {}, return type: {}", - requestFunction.signatureString(), requestFunction.getReturnType()); + if (LOG.isDebugEnabled()) { + LOG.debug("requestFunction signature: {}, return type: {}", + requestFunction.signatureString(), requestFunction.getReturnType()); + } } List newArgTypes = Lists.newArrayList(); List newRetType = Lists.newArrayList(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionUtil.java index 537176628b..e6c7e07357 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionUtil.java @@ -66,7 +66,9 @@ public class FunctionUtil { List existFuncs = name2Function.get(functionName); if (existFuncs == null) { if (ifExists) { - LOG.debug("function name does not exist: " + functionName); + if (LOG.isDebugEnabled()) { + LOG.debug("function name does not exist: " + functionName); + } return false; } throw new UserException("function name does not exist: " + functionName); @@ -82,7 +84,9 @@ public class FunctionUtil { } if (!isFound) { if (ifExists) { - LOG.debug("function does not exist: " + function); + if (LOG.isDebugEnabled()) { + LOG.debug("function does not exist: " + function); + } return false; } throw new UserException("function does not exist: " + function); @@ -113,7 +117,9 @@ public class FunctionUtil { for (Function existFunc : existFuncs) { if (function.compare(existFunc, Function.CompareMode.IS_IDENTICAL)) { if (ifNotExists) { - LOG.debug("function already exists"); + if (LOG.isDebugEnabled()) { + LOG.debug("function already exists"); + } return false; } throw new UserException("function already exists"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveBucketUtil.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveBucketUtil.java index 49823afabf..87bd11acc4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveBucketUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveBucketUtil.java @@ -127,8 +127,10 @@ public class HiveBucketUtil { int bucketId = bucket.getAsInt(); if (bucketId >= numBuckets) { valid = false; - LOG.debug("Hive table {} is corrupt for file {}(bucketId={}), skip bucket pruning.", - tableName, fileName, bucketId); + if (LOG.isDebugEnabled()) { + LOG.debug("Hive table {} is corrupt for file {}(bucketId={}), skip bucket pruning.", + tableName, fileName, bucketId); + } break; } if (buckets.contains(bucketId)) { @@ -136,13 +138,18 @@ public class HiveBucketUtil { } } else { valid = false; - LOG.debug("File {} is not a bucket file in hive table {}, skip bucket pruning.", fileName, tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("File {} is not a bucket file in hive table {}, skip bucket pruning.", + fileName, tableName); + } break; } } if (valid) { - LOG.debug("{} / {} input splits in hive table {} after bucket pruning.", - result.size(), splits.size(), tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("{} / {} input splits in hive table {} after bucket pruning.", + result.size(), splits.size(), tableName); + } return result; } else { return splits; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java index f8ddcb7f0a..1b2fe78f30 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java @@ -835,5 +835,3 @@ public class HiveMetaStoreClientHelper { return conf; } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/InternalSchemaInitializer.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/InternalSchemaInitializer.java index 169e2fac80..3e3ab3f4ce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/InternalSchemaInitializer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/InternalSchemaInitializer.java @@ -291,4 +291,3 @@ public class InternalSchemaInitializer extends Thread { } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java index 4e62fb1acd..c93f666107 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java @@ -354,7 +354,9 @@ public class JdbcTable extends Table { sb.append(checkSum); String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of odbc table {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of odbc table {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java index 82a49b1490..df682eee92 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java @@ -345,8 +345,10 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable { maxColUniqueId = Column.COLUMN_UNIQUE_ID_INIT_VALUE; this.schema.forEach(column -> { column.setUniqueId(incAndGetMaxColUniqueId()); - LOG.debug("indexId: {}, column:{}, uniqueId:{}", - indexId, column, column.getUniqueId()); + if (LOG.isDebugEnabled()) { + LOG.debug("indexId: {}, column:{}, uniqueId:{}", + indexId, column, column.getUniqueId()); + } }); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java index 97d75251fb..15e39adf82 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java @@ -233,7 +233,9 @@ public class MysqlTable extends Table { sb.append(mysqlTableName); sb.append(getCharset()); String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of mysql table {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of mysql table {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java index a3e11387e4..152db3a176 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcCatalogResource.java @@ -140,22 +140,30 @@ public class OdbcCatalogResource extends Resource { try { // table name adler32.update(name.getBytes(charsetName)); - LOG.debug("signature. view name: {}", name); + if (LOG.isDebugEnabled()) { + LOG.debug("signature. view name: {}", name); + } // type adler32.update(type.name().getBytes(charsetName)); - LOG.debug("signature. view type: {}", type.name()); + if (LOG.isDebugEnabled()) { + LOG.debug("signature. view type: {}", type.name()); + } // configs for (Map.Entry config : configs.entrySet()) { adler32.update(config.getKey().getBytes(charsetName)); adler32.update(config.getValue().getBytes(charsetName)); - LOG.debug("signature. view config: {}", config); + if (LOG.isDebugEnabled()) { + LOG.debug("signature. view config: {}", config); + } } } catch (UnsupportedEncodingException e) { LOG.error("encoding error", e); return -1; } - LOG.debug("signature: {}", Math.abs((int) adler32.getValue())); + if (LOG.isDebugEnabled()) { + LOG.debug("signature: {}", Math.abs((int) adler32.getValue())); + } return Math.abs((int) adler32.getValue()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java index 6cd194b1eb..76444b03de 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java @@ -416,7 +416,9 @@ public class OdbcTable extends Table { sb.append(extraParam); } String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of odbc table {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of odbc table {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index ee8b0830ef..ea90ddcca3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -422,7 +422,9 @@ public class OlapTable extends Table implements MTMVRelatedTableIf { // Column maybe renamed, rebuild the column name map indexMeta.initColumnNameMap(); } - LOG.debug("after rebuild full schema. table {}, schema size: {}", id, fullSchema.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("after rebuild full schema. table {}, schema size: {}", id, fullSchema.size()); + } } public boolean deleteIndexInfo(String indexName) { @@ -1356,7 +1358,9 @@ public class OlapTable extends Table implements MTMVRelatedTableIf { } String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of table {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of table {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } @@ -1595,7 +1599,9 @@ public class OlapTable extends Table implements MTMVRelatedTableIf { } for (MaterializedIndex deleteIndex : shadowIndex) { - LOG.debug("copied table delete shadow index : {}", deleteIndex.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("copied table delete shadow index : {}", deleteIndex.getId()); + } copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId())); } copied.setState(OlapTableState.NORMAL); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index 243170cad4..b7dcba3bf9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -288,7 +288,9 @@ public class PartitionInfo implements Writable { public ReplicaAllocation getReplicaAllocation(long partitionId) { if (!idToReplicaAllocation.containsKey(partitionId)) { - LOG.debug("failed to get replica allocation for partition: {}", partitionId); + if (LOG.isDebugEnabled()) { + LOG.debug("failed to get replica allocation for partition: {}", partitionId); + } return ReplicaAllocation.DEFAULT_ALLOCATION; } return idToReplicaAllocation.get(partitionId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java index 9cc4c8d9b8..0dad361202 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java @@ -448,9 +448,11 @@ public class Replica implements Writable { // TODO: this case is unknown, add log to observe if (this.version > lastFailedVersion && lastFailedVersion > 0) { - LOG.debug("current version {} is larger than last failed version {}, " - + "maybe a fatal error or be report version, print a stack here ", - this.version, lastFailedVersion, new Exception()); + if (LOG.isDebugEnabled()) { + LOG.debug("current version {} is larger than last failed version {}, " + + "maybe a fatal error or be report version, print a stack here ", + this.version, lastFailedVersion, new Exception()); + } } if (lastFailedVersion != this.lastFailedVersion) { @@ -519,8 +521,10 @@ public class Replica implements Writable { } if (this.version < expectedVersion) { - LOG.debug("replica version does not catch up with version: {}. replica: {}", - expectedVersion, this); + if (LOG.isDebugEnabled()) { + LOG.debug("replica version does not catch up with version: {}. replica: {}", + expectedVersion, this); + } return false; } return true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java index b8ef318077..8c7373dde6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java @@ -91,7 +91,9 @@ public class S3Resource extends Resource { S3Properties.requiredS3PingProperties(properties); // default need check resource conf valid, so need fix ut and regression case boolean needCheck = isNeedCheck(properties); - LOG.debug("s3 info need check validity : {}", needCheck); + if (LOG.isDebugEnabled()) { + LOG.debug("s3 info need check validity : {}", needCheck); + } // the endpoint for ping need add uri scheme. String pingEndpoint = properties.get(S3Properties.ENDPOINT); @@ -169,7 +171,9 @@ public class S3Resource extends Resource { // compatible with old version, Need convert if modified properties map uses old properties. S3Properties.convertToStdProperties(properties); boolean needCheck = isNeedCheck(properties); - LOG.debug("s3 info need check validity : {}", needCheck); + if (LOG.isDebugEnabled()) { + LOG.debug("s3 info need check validity : {}", needCheck); + } if (needCheck) { S3Properties.requiredS3PingProperties(this.properties); Map changedProperties = new HashMap<>(this.properties); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java index a5f2a9e55e..d08b5e8aa1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java @@ -583,7 +583,9 @@ public abstract class Table extends MetaObject implements Writable, TableIf { OlapTable olapTable = (OlapTable) this; if (Env.getCurrentColocateIndex().isColocateTable(olapTable.getId())) { - LOG.debug("table {} is a colocate table, skip tablet checker.", name); + if (LOG.isDebugEnabled()) { + LOG.debug("table {} is a colocate table, skip tablet checker.", name); + } return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java index d0985aa692..54406d0d44 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java @@ -502,4 +502,3 @@ public interface TableIf { return Sets.newHashSet(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java index 8137dddae1..b5b8cc6166 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java @@ -135,7 +135,9 @@ public class TabletInvertedIndex { long stamp = readLock(); long start = System.currentTimeMillis(); try { - LOG.debug("begin to do tablet diff with backend[{}]. num: {}", backendId, backendTablets.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to do tablet diff with backend[{}]. num: {}", backendId, backendTablets.size()); + } Map replicaMetaWithBackend = backingReplicaMetaTable.row(backendId); if (replicaMetaWithBackend != null) { taskPool.submit(() -> { @@ -241,8 +243,10 @@ public class TabletInvertedIndex { synchronized (transactionsToClear) { transactionsToClear.put(transactionId, tabletMeta.getPartitionId()); } - LOG.debug("transaction id [{}] is not valid any more, " - + "clear it from backend [{}]", transactionId, backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction id [{}] is not valid any more, " + + "clear it from backend [{}]", transactionId, backendId); + } } else if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) { TableCommitInfo tableCommitInfo = transactionState.getTableCommitInfo(tabletMeta.getTableId()); @@ -322,7 +326,9 @@ public class TabletInvertedIndex { } else { // 2. (meta - be) // may need delete from meta - LOG.debug("backend[{}] does not report tablet[{}-{}]", backendId, tabletId, tabletMeta); + if (LOG.isDebugEnabled()) { + LOG.debug("backend[{}] does not report tablet[{}-{}]", backendId, tabletId, tabletMeta); + } synchronized (tabletDeleteFromMeta) { tabletDeleteFromMeta.put(tabletMeta.getDbId(), tabletId); } @@ -535,10 +541,14 @@ public class TabletInvertedIndex { tabletMetaMap.put(tabletId, tabletMeta); if (!tabletMetaTable.contains(tabletMeta.getPartitionId(), tabletMeta.getIndexId())) { tabletMetaTable.put(tabletMeta.getPartitionId(), tabletMeta.getIndexId(), tabletMeta); - LOG.debug("add tablet meta: {}", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("add tablet meta: {}", tabletId); + } } - LOG.debug("add tablet: {}", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("add tablet: {}", tabletId); + } } finally { writeUnlock(stamp); } @@ -560,10 +570,14 @@ public class TabletInvertedIndex { TabletMeta tabletMeta = tabletMetaMap.remove(tabletId); if (tabletMeta != null) { tabletMetaTable.remove(tabletMeta.getPartitionId(), tabletMeta.getIndexId()); - LOG.debug("delete tablet meta: {}", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("delete tablet meta: {}", tabletId); + } } - LOG.debug("delete tablet: {}", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("delete tablet: {}", tabletId); + } } finally { writeUnlock(stamp); } @@ -578,8 +592,10 @@ public class TabletInvertedIndex { replicaMetaTable.put(tabletId, replica.getBackendId(), replica); replicaToTabletMap.put(replica.getId(), tabletId); backingReplicaMetaTable.put(replica.getBackendId(), tabletId, replica); - LOG.debug("add replica {} of tablet {} in backend {}", - replica.getId(), tabletId, replica.getBackendId()); + if (LOG.isDebugEnabled()) { + LOG.debug("add replica {} of tablet {} in backend {}", + replica.getId(), tabletId, replica.getBackendId()); + } } finally { writeUnlock(stamp); } @@ -595,8 +611,10 @@ public class TabletInvertedIndex { replicaToTabletMap.remove(replica.getId()); replicaMetaTable.remove(tabletId, backendId); backingReplicaMetaTable.remove(backendId, tabletId); - LOG.debug("delete replica {} of tablet {} in backend {}", - replica.getId(), tabletId, backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("delete replica {} of tablet {} in backend {}", + replica.getId(), tabletId, backendId); + } } else { // this may happen when fe restart after tablet is empty(bug cause) // add log instead of assertion to observe @@ -780,7 +798,9 @@ public class TabletInvertedIndex { partitionReplicasInfoMaps.put(medium, partitionReplicasInfo); } catch (IllegalStateException | NullPointerException e) { // If the tablet or be has some problem, don't count in - LOG.debug(e.getMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug(e.getMessage()); + } } } } finally { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java index bc66b97c5e..e2b65a95f6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java @@ -63,8 +63,10 @@ public class TabletStatMgr extends MasterDaemon { address = new TNetworkAddress(backend.getHost(), backend.getBePort()); client = ClientPool.backendPool.borrowObject(address); TTabletStatResult result = client.getTabletStat(); - LOG.debug("get tablet stat from backend: {}, num: {}", backend.getId(), - result.getTabletsStatsSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("get tablet stat from backend: {}, num: {}", backend.getId(), + result.getTabletsStatsSize()); + } updateTabletStat(backend.getId(), result); ok = true; } catch (Throwable e) { @@ -82,8 +84,10 @@ public class TabletStatMgr extends MasterDaemon { } }); }).join(); - LOG.debug("finished to get tablet stat of all backends. cost: {} ms", - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to get tablet stat of all backends. cost: {} ms", + (System.currentTimeMillis() - start)); + } // after update replica in all backends, update index row num start = System.currentTimeMillis(); @@ -127,8 +131,10 @@ public class TabletStatMgr extends MasterDaemon { index.setRowCount(indexRowCount); } // end for indices } // end for partitions - LOG.debug("finished to set row num for table: {} in database: {}", - table.getName(), db.getFullName()); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to set row num for table: {} in database: {}", + table.getName(), db.getFullName()); + } } finally { table.writeUnlock(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java index ff7c5ae3fc..612d29a995 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java @@ -220,7 +220,9 @@ public class View extends Table { sb.append(inlineViewDef); sb.append(sqlMode); String md5 = DigestUtils.md5Hex(sb.toString()); - LOG.debug("get signature of view {}: {}. signature string: {}", name, md5, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get signature of view {}: {}. signature string: {}", name, md5, sb.toString()); + } return md5; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/RangerAccessController.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/RangerAccessController.java index 1712b912cd..30b42e1997 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/RangerAccessController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/RangerAccessController.java @@ -41,13 +41,17 @@ public abstract class RangerAccessController implements CatalogAccessController } if (result.getIsAllowed()) { - LOG.debug("request {} match policy {}", request, result.getPolicyId()); + if (LOG.isDebugEnabled()) { + LOG.debug("request {} match policy {}", request, result.getPolicyId()); + } return true; } else { - LOG.debug(String.format( - "Permission denied: user [%s] does not have privilege for [%s] command on [%s]", - result.getAccessRequest().getUser(), name, - result.getAccessRequest().getResource().getAsString())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format( + "Permission denied: user [%s] does not have privilege for [%s] command on [%s]", + result.getAccessRequest().getUser(), name, + result.getAccessRequest().getResource().getAsString())); + } return false; } } @@ -56,9 +60,13 @@ public abstract class RangerAccessController implements CatalogAccessController public static void checkRequestResults(Collection results, String name) throws AuthorizationException { for (RangerAccessResult result : results) { - LOG.debug("request {} match policy {}", result.getAccessRequest(), result.getPolicyId()); + if (LOG.isDebugEnabled()) { + LOG.debug("request {} match policy {}", result.getAccessRequest(), result.getPolicyId()); + } if (!result.getIsAllowed()) { - LOG.debug(result.getReason()); + if (LOG.isDebugEnabled()) { + LOG.debug(result.getReason()); + } throw new AuthorizationException(String.format( "Permission denied: user [%s] does not have privilege for [%s] command on [%s]", result.getAccessRequest().getUser(), name, diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/doris/RangerDorisAccessController.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/doris/RangerDorisAccessController.java index 44e2686fe3..1a4c7aa6b5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/doris/RangerDorisAccessController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/doris/RangerDorisAccessController.java @@ -110,8 +110,10 @@ public class RangerDorisAccessController extends RangerAccessController { request.setResource(resource); RangerAccessResult result = dorisPlugin.isAccessAllowed(request); - LOG.debug(String.format("maskType: %s, maskTypeDef: %s, maskedValue: %s", result.getMaskType(), - result.getMaskTypeDef(), result.getMaskedValue())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("maskType: %s, maskTypeDef: %s, maskedValue: %s", result.getMaskType(), + result.getMaskTypeDef(), result.getMaskedValue())); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessController.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessController.java index bbe3273577..b701626317 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessController.java @@ -118,8 +118,10 @@ public class RangerHiveAccessController extends RangerAccessController { request.setResource(resource); RangerAccessResult result = hivePlugin.isAccessAllowed(request, auditHandler); - LOG.debug(String.format("maskType: %s, maskTypeDef: %s, maskedValue: %s", result.getMaskType(), - result.getMaskTypeDef(), result.getMaskedValue())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("maskType: %s, maskTypeDef: %s, maskedValue: %s", result.getMaskType(), + result.getMaskTypeDef(), result.getMaskedValue())); + } } private HiveAccessType convertToAccessType(PrivPredicate predicate) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalDatabase.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalDatabase.java index f586ea7ed8..3fbe5e968e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalDatabase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalDatabase.java @@ -64,7 +64,9 @@ public class HMSExternalDatabase extends ExternalDatabase { @Override public void dropTable(String tableName) { - LOG.debug("replayDropTableFromEvent [{}]", tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("replayDropTableFromEvent [{}]", tableName); + } Long tableId = tableNameToId.remove(tableName); if (tableId == null) { LOG.warn("replayDropTableFromEvent [{}] failed", tableName); @@ -75,7 +77,9 @@ public class HMSExternalDatabase extends ExternalDatabase { @Override public void createTable(String tableName, long tableId) { - LOG.debug("create table [{}]", tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("create table [{}]", tableName); + } tableNameToId.put(tableName, tableId); HMSExternalTable table = getExternalTable(tableName, tableId, extCatalog); idToTbl.put(tableId, table); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java index c2c302a889..fbc804192a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/HMSExternalTable.java @@ -250,7 +250,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI // NotSupportedException is required by some operation. throw new NotSupportedException("Unsupported hive input format: " + inputFileFormat); } - LOG.debug("hms table {} is {} with file format: {}", name, remoteTable.getTableType(), inputFileFormat); + if (LOG.isDebugEnabled()) { + LOG.debug("hms table {} is {} with file format: {}", name, remoteTable.getTableType(), inputFileFormat); + } return true; } @@ -326,7 +328,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI makeSureInitialized(); long rowCount = getRowCountFromExternalSource(false); if (rowCount == -1) { - LOG.debug("Will estimate row count from file list."); + if (LOG.isDebugEnabled()) { + LOG.debug("Will estimate row count from file list."); + } rowCount = StatisticsUtil.getRowCountFromFileList(this); } return rowCount; @@ -342,7 +346,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI rowCount = StatisticsUtil.getIcebergRowCount(this); break; default: - LOG.debug("getRowCount for dlaType {} is not supported.", dlaType); + if (LOG.isDebugEnabled()) { + LOG.debug("getRowCount for dlaType {} is not supported.", dlaType); + } rowCount = -1; } return rowCount; @@ -395,14 +401,18 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI } public String getViewExpandedText() { - LOG.debug("View expanded text of hms table [{}.{}.{}] : {}", - this.getCatalog().getName(), this.getDbName(), this.getName(), remoteTable.getViewExpandedText()); + if (LOG.isDebugEnabled()) { + LOG.debug("View expanded text of hms table [{}.{}.{}] : {}", + this.getCatalog().getName(), this.getDbName(), this.getName(), remoteTable.getViewExpandedText()); + } return remoteTable.getViewExpandedText(); } public String getViewOriginalText() { - LOG.debug("View original text of hms table [{}.{}.{}] : {}", - this.getCatalog().getName(), this.getDbName(), this.getName(), remoteTable.getViewOriginalText()); + if (LOG.isDebugEnabled()) { + LOG.debug("View original text of hms table [{}.{}.{}] : {}", + this.getCatalog().getName(), this.getDbName(), this.getName(), remoteTable.getViewOriginalText()); + } return remoteTable.getViewOriginalText(); } @@ -513,7 +523,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI TableStatsMeta tableStats = Env.getCurrentEnv().getAnalysisManager().findTableStatsStatus(id); if (tableStats != null) { long rowCount = tableStats.rowCount; - LOG.debug("Estimated row count for db {} table {} is {}.", dbName, name, rowCount); + if (LOG.isDebugEnabled()) { + LOG.debug("Estimated row count for db {} table {} is {}.", dbName, name, rowCount); + } return rowCount; } @@ -530,7 +542,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI TableStatsMeta tableStats = Env.getCurrentEnv().getAnalysisManager().findTableStatsStatus(id); if (tableStats != null) { long rowCount = tableStats.rowCount; - LOG.debug("Estimated row count for db {} table {} is {}.", dbName, name, rowCount); + if (LOG.isDebugEnabled()) { + LOG.debug("Estimated row count for db {} table {} is {}.", dbName, name, rowCount); + } return rowCount; } @@ -567,7 +581,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI } } } - LOG.debug("get {} partition columns for table: {}", partitionColumns.size(), name); + if (LOG.isDebugEnabled()) { + LOG.debug("get {} partition columns for table: {}", partitionColumns.size(), name); + } } public boolean hasColumnStatistics(String colName) { @@ -615,8 +631,10 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI private Optional getHiveColumnStats(String colName) { List tableStats = getHiveTableColumnStats(Lists.newArrayList(colName)); if (tableStats == null || tableStats.isEmpty()) { - LOG.debug(String.format("No table stats found in Hive metastore for column %s in table %s.", - colName, name)); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("No table stats found in Hive metastore for column %s in table %s.", + colName, name)); + } return Optional.empty(); } @@ -638,7 +656,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI try { setStatData(column, data, columnStatisticBuilder, count); } catch (AnalysisException e) { - LOG.debug(e); + if (LOG.isDebugEnabled()) { + LOG.debug(e); + } return Optional.empty(); } } @@ -713,7 +733,9 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI } } } else { - LOG.debug(String.format("Not suitable data type for column %s", col.getName())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Not suitable data type for column %s", col.getName())); + } throw new RuntimeException("Not supported data type."); } builder.setNdv(ndv); @@ -896,5 +918,3 @@ public class HMSExternalTable extends ExternalTable implements MTMVRelatedTableI return true; } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/IcebergExternalDatabase.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/IcebergExternalDatabase.java index 1b5cd805a0..86c70a6fc7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/IcebergExternalDatabase.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/IcebergExternalDatabase.java @@ -50,7 +50,9 @@ public class IcebergExternalDatabase extends ExternalDatabase avgClusterUsedCapacityPercentMap, @@ -344,9 +346,11 @@ public class BackendLoadStatistic { loadScoreMap.put(medium, loadScore); - LOG.debug("backend {}, medium: {}, capacity coefficient: {}, replica coefficient: {}, load score: {}", - beId, medium, loadScore.capacityCoefficient, loadScore.getReplicaNumCoefficient(), - loadScore.score); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}, medium: {}, capacity coefficient: {}, replica coefficient: {}, load score: {}", + beId, medium, loadScore.capacityCoefficient, loadScore.getReplicaNumCoefficient(), + loadScore.score); + } } } @@ -434,8 +438,10 @@ public class BackendLoadStatistic { RootPathLoadStatistic pathStatistic = pathStatistics.get(i); // if this is a supplement task, ignore the storage medium if (!isSupplement && medium != null && pathStatistic.getStorageMedium() != medium) { - LOG.debug("backend {} path {}'s storage medium {} is not {} storage medium, actual: {}", - beId, pathStatistic.getPath(), pathStatistic.getStorageMedium(), medium); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} path {}'s storage medium {} is not {} storage medium, actual: {}", + beId, pathStatistic.getPath(), pathStatistic.getStorageMedium(), medium); + } continue; } @@ -500,12 +506,14 @@ public class BackendLoadStatistic { + Math.abs(currentDestPathScore - avgUsedPercent); double newDiff = Math.abs(newSrcPathScore - avgUsedPercent) + Math.abs(newDestPathScore - avgUsedPercent); - LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." - + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {}," - + " more balanced: {}", - tabletId, tabletSize, srcPath, destPath, medium, currentSrcPathScore, newSrcPathScore, - currentDestPathScore, newDestPathScore, avgUsedPercent, currentDiff, newDiff, - (newDiff < currentDiff)); + if (LOG.isDebugEnabled()) { + LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." + + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {}," + + " more balanced: {}", + tabletId, tabletSize, srcPath, destPath, medium, currentSrcPathScore, newSrcPathScore, + currentDestPathScore, newDestPathScore, avgUsedPercent, currentDiff, newDiff, + (newDiff < currentDiff)); + } return newDiff < currentDiff; } @@ -541,8 +549,10 @@ public class BackendLoadStatistic { } } - LOG.debug("after adjust, backend {} path classification low/mid/high: {}/{}/{}", - beId, low.size(), mid.size(), high.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("after adjust, backend {} path classification low/mid/high: {}/{}/{}", + beId, low.size(), mid.size(), high.size()); + } } public void getPathStatisticByClass(List low, @@ -562,8 +572,10 @@ public class BackendLoadStatistic { } } - LOG.debug("after adjust, backend {} path classification low/mid/high: {}/{}/{}", - beId, low.size(), mid.size(), high.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("after adjust, backend {} path classification low/mid/high: {}/{}/{}", + beId, low.size(), mid.size(), high.size()); + } } public void incrPathsCopingSize(Map pathsCopingSize) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java index b40d7f7a51..67b5a6a602 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java @@ -87,7 +87,9 @@ public class BeLoadRebalancer extends Rebalancer { boolean isUrgent = clusterStat.getLowHighBEsWithIsUrgent(lowBEs, highBEs, medium); if (lowBEs.isEmpty() && highBEs.isEmpty()) { - LOG.debug("cluster is balance with medium: {}. skip", medium); + if (LOG.isDebugEnabled()) { + LOG.debug("cluster is balance with medium: {}. skip", medium); + } return alternativeTablets; } @@ -173,8 +175,10 @@ public class BeLoadRebalancer extends Rebalancer { } } - LOG.debug("high be {}, medium: {}, path high: {}, remainingPaths: {}, chose high disk: {}", - beStat.getBeId(), medium, pathHigh, remainingPaths, choseHighDisk); + if (LOG.isDebugEnabled()) { + LOG.debug("high be {}, medium: {}, path high: {}, remainingPaths: {}, chose high disk: {}", + beStat.getBeId(), medium, pathHigh, remainingPaths, choseHighDisk); + } if (remainingPaths.isEmpty()) { continue; @@ -373,8 +377,10 @@ public class BeLoadRebalancer extends Rebalancer { BalanceStatus bs = beStat.isFit(tabletCtx.getTabletSize(), tabletCtx.getStorageMedium(), null, false /* not supplement */); if (bs != BalanceStatus.OK) { - LOG.debug("tablet not fit in BE {}, reason: {}, {}", - beStat.getBeId(), bs.getErrMsgs(), isUrgentInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet not fit in BE {}, reason: {}, {}", + beStat.getBeId(), bs.getErrMsgs(), isUrgentInfo); + } continue; } @@ -390,7 +396,9 @@ public class BeLoadRebalancer extends Rebalancer { PathSlot slot = backendsWorkingSlots.get(beStat.getBeId()); if (slot == null) { - LOG.debug("BE does not have slot: {}", beStat.getBeId()); + if (LOG.isDebugEnabled()) { + LOG.debug("BE does not have slot: {}", beStat.getBeId()); + } continue; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index 89dd13a5c0..945183cc48 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -527,7 +527,9 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { counter.unhealthyTabletNum++; unstableReason = String.format("get unhealthy tablet %d in colocate table." + " status: %s", tablet.getId(), st); - LOG.debug(unstableReason); + if (LOG.isDebugEnabled()) { + LOG.debug(unstableReason); + } if (!tablet.readyToBeRepaired(infoService, Priority.NORMAL)) { counter.tabletNotReady++; @@ -868,8 +870,10 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { resultPaths.clear(); BalanceStatus st = beStat.isFit(bucketDataSize, null, resultPaths, false); if (!st.ok()) { - LOG.debug("backend {} is unable to fit in group {}, tablet order idx {}, data size {}", - destBeId, groupId, bucketIndex, bucketDataSize); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} is unable to fit in group {}, tablet order idx {}, data size {}", + destBeId, groupId, bucketIndex, bucketDataSize); + } continue; } @@ -896,9 +900,11 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon { long oldDestThisGroup = lowBackend.getValue(); int oldSrcBucketNum = globalColocateStatistic.getBackendTotalBucketNum(srcBeId); int oldDestBucketNum = globalColocateStatistic.getBackendTotalBucketNum(destBeId); - LOG.debug("OneMove: group {}, src {}, this group {}, all group {}, dest {}, this group {}, " - + "all group {}", groupId, srcBeId, oldSrcThisGroup, oldSrcBucketNum, destBeId, - oldDestThisGroup, oldDestBucketNum); + if (LOG.isDebugEnabled()) { + LOG.debug("OneMove: group {}, src {}, this group {}, all group {}, dest {}, this group {}, " + + "all group {}", groupId, srcBeId, oldSrcThisGroup, oldSrcBucketNum, destBeId, + oldDestThisGroup, oldDestBucketNum); + } Preconditions.checkState( globalColocateStatistic.moveTablet(groupId, tabletOrderIdx, srcBeId, destBeId)); Preconditions.checkState(oldSrcBucketNum - 1 diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java index 631933b069..ae2fe44a7b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java @@ -150,8 +150,10 @@ public class DiskRebalancer extends Rebalancer { // first we should check if mid backends is available. // if all mid backends is not available, we should not start balance if (midBEs.stream().noneMatch(BackendLoadStatistic::isAvailable)) { - LOG.debug("all mid load backends is dead: {} with medium: {}. skip", - midBEs.stream().mapToLong(BackendLoadStatistic::getBeId).toArray(), medium); + if (LOG.isDebugEnabled()) { + LOG.debug("all mid load backends is dead: {} with medium: {}. skip", + midBEs.stream().mapToLong(BackendLoadStatistic::getBeId).toArray(), medium); + } return alternativeTablets; } @@ -315,7 +317,9 @@ public class DiskRebalancer extends Rebalancer { // check src slot PathSlot slot = backendsWorkingSlots.get(replica.getBackendId()); if (slot == null) { - LOG.debug("BE does not have slot: {}", replica.getBackendId()); + if (LOG.isDebugEnabled()) { + LOG.debug("BE does not have slot: {}", replica.getBackendId()); + } throw new SchedException(Status.UNRECOVERABLE, "unable to take src slot"); } long pathHash = slot.takeBalanceSlot(replica.getPathHash()); @@ -347,7 +351,9 @@ public class DiskRebalancer extends Rebalancer { BalanceStatus bs; if ((bs = beStat.isFit(tabletCtx.getTabletSize(), tabletCtx.getStorageMedium(), availPaths, false /* not supplement */)) != BalanceStatus.OK) { - LOG.debug("tablet not fit in BE {}, reason: {}", beStat.getBeId(), bs.getErrMsgs()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet not fit in BE {}, reason: {}", beStat.getBeId(), bs.getErrMsgs()); + } throw new SchedException(Status.UNRECOVERABLE, "tablet not fit in BE"); } // Select a low load path as destination. @@ -359,12 +365,16 @@ public class DiskRebalancer extends Rebalancer { } // check if avail path is low path if (!pathLow.contains(stat.getPathHash())) { - LOG.debug("the path :{} is not low load", stat.getPathHash()); + if (LOG.isDebugEnabled()) { + LOG.debug("the path :{} is not low load", stat.getPathHash()); + } continue; } if (!beStat.isMoreBalanced(tabletCtx.getSrcPathHash(), stat.getPathHash(), tabletCtx.getTabletId(), tabletCtx.getTabletSize(), tabletCtx.getStorageMedium())) { - LOG.debug("the path :{} can not make more balance", stat.getPathHash()); + if (LOG.isDebugEnabled()) { + LOG.debug("the path :{} can not make more balance", stat.getPathHash()); + } continue; } long destPathHash = slot.takeBalanceSlot(stat.getPathHash()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/LoadStatisticForTag.java b/fe/fe-core/src/main/java/org/apache/doris/clone/LoadStatisticForTag.java index faf9704a90..0b8aac65d2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/LoadStatisticForTag.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/LoadStatisticForTag.java @@ -244,8 +244,10 @@ public class LoadStatisticForTag { } } - LOG.debug("classify backend by load. medium: {} avg load score: {}. low/mid/high: {}/{}/{}", - medium, avgLoadScore, lowCounter, midCounter, highCounter); + if (LOG.isDebugEnabled()) { + LOG.debug("classify backend by load. medium: {} avg load score: {}. low/mid/high: {}/{}/{}", + medium, avgLoadScore, lowCounter, midCounter, highCounter); + } } private void classifyBackendByMaxDiskUsage(TStorageMedium medium) { @@ -393,12 +395,14 @@ public class LoadStatisticForTag { double newDiff = Math.abs(newSrcBeScore.score - avgLoadScoreMap.get(medium)) + Math.abs(newDestBeScore.score - avgLoadScoreMap.get(medium)); - LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." - + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {}," - + " more balanced: {}", - tabletId, tabletSize, srcBeId, destBeId, medium, currentSrcBeScore, newSrcBeScore.score, - currentDestBeScore, newDestBeScore.score, avgLoadScoreMap.get(medium), currentDiff, newDiff, - (newDiff < currentDiff)); + if (LOG.isDebugEnabled()) { + LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed." + + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {}," + + " more balanced: {}", + tabletId, tabletSize, srcBeId, destBeId, medium, currentSrcBeScore, newSrcBeScore.score, + currentDestBeScore, newDestBeScore.score, avgLoadScoreMap.get(medium), currentDiff, newDiff, + (newDiff < currentDiff)); + } return newDiff < currentDiff; } @@ -531,10 +535,12 @@ public class LoadStatisticForTag { resortBeStats.accept(lowBEs, true); resortBeStats.accept(highBEs, false); - LOG.debug("urgent backends' classification lowBe {}, highBe {}, medium: {}", - lowBEs.stream().map(BackendLoadStatistic::getBeId).collect(Collectors.toList()), - highBEs.stream().map(BackendLoadStatistic::getBeId).collect(Collectors.toList()), - medium); + if (LOG.isDebugEnabled()) { + LOG.debug("urgent backends' classification lowBe {}, highBe {}, medium: {}", + lowBEs.stream().map(BackendLoadStatistic::getBeId).collect(Collectors.toList()), + highBEs.stream().map(BackendLoadStatistic::getBeId).collect(Collectors.toList()), + medium); + } return true; } @@ -585,8 +591,10 @@ public class LoadStatisticForTag { sortBeStats(mid, medium); sortBeStats(high, medium); - LOG.debug("after adjust, backends' classification low/mid/high: {}/{}/{}, medium: {}", - low.size(), mid.size(), high.size(), medium); + if (LOG.isDebugEnabled()) { + LOG.debug("after adjust, backends' classification low/mid/high: {}/{}/{}, medium: {}", + low.size(), mid.size(), high.size(), medium); + } } public List getSortedBeLoadStats(TStorageMedium medium) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java index b08dc72f77..0bd5c6d803 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java @@ -64,8 +64,10 @@ public class MovesCacheMap { // Cyclical update the cache mapping, cuz the tag may be deleted, we should delete the corresponding cache too. public void updateMapping(Map statisticMap, long expireAfterAccessSecond) { if (expireAfterAccessSecond > 0 && lastExpireConfig != expireAfterAccessSecond) { - LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.", - lastExpireConfig, expireAfterAccessSecond); + if (LOG.isDebugEnabled()) { + LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.", + lastExpireConfig, expireAfterAccessSecond); + } cacheMap.clear(); lastExpireConfig = expireAfterAccessSecond; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java index a730170b36..5b5221cad6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java @@ -106,13 +106,17 @@ public class PartitionRebalancer extends Rebalancer { // The balancing tasks of other cluster or medium might have failed. We use the upper limit value // `total num of in-progress moves` to avoid useless selections. if (movesCacheMap.size() > Config.max_balancing_tablets) { - LOG.debug("Total in-progress moves > {}", Config.max_balancing_tablets); + if (LOG.isDebugEnabled()) { + LOG.debug("Total in-progress moves > {}", Config.max_balancing_tablets); + } return Lists.newArrayList(); } NavigableSet skews = clusterBalanceInfo.partitionInfoBySkew.keySet(); - LOG.debug("Medium {}: peek max skew {}, assume {} in-progress moves are succeeded {}", medium, - skews.isEmpty() ? 0 : skews.last(), movesInProgressList.size(), movesInProgressList); + if (LOG.isDebugEnabled()) { + LOG.debug("Medium {}: peek max skew {}, assume {} in-progress moves are succeeded {}", medium, + skews.isEmpty() ? 0 : skews.last(), movesInProgressList.size(), movesInProgressList); + } List moves = algo.getNextMoves(clusterBalanceInfo, Config.partition_rebalance_max_moves_num_per_selection); @@ -137,7 +141,9 @@ public class PartitionRebalancer extends Rebalancer { tabletCandidates.put(tabletId, tabletMeta); } } - LOG.debug("Find {} candidates for move {}", tabletCandidates.size(), move); + if (LOG.isDebugEnabled()) { + LOG.debug("Find {} candidates for move {}", tabletCandidates.size(), move); + } if (tabletCandidates.isEmpty()) { continue; } @@ -146,7 +152,9 @@ public class PartitionRebalancer extends Rebalancer { Random rand = new SecureRandom(); Object[] keys = tabletCandidates.keySet().toArray(); long pickedTabletId = (long) keys[rand.nextInt(keys.length)]; - LOG.debug("Picked tablet id for move {}: {}", move, pickedTabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("Picked tablet id for move {}: {}", move, pickedTabletId); + } TabletMeta tabletMeta = tabletCandidates.get(pickedTabletId); TabletSchedCtx tabletCtx = new TabletSchedCtx(TabletSchedCtx.Type.BALANCE, @@ -167,7 +175,9 @@ public class PartitionRebalancer extends Rebalancer { if (moves.isEmpty()) { // Balanced cluster should not print too much log messages, so we log it with level debug. - LOG.debug("Medium {}: cluster is balanced.", medium); + if (LOG.isDebugEnabled()) { + LOG.debug("Medium {}: cluster is balanced.", medium); + } } else { LOG.info("Medium {}: get {} moves, actually select {} alternative tablets to move. Tablets detail: {}", medium, moves.size(), alternativeTablets.size(), @@ -216,9 +226,11 @@ public class PartitionRebalancer extends Rebalancer { // If the move was completed, remove it if (moveIsComplete) { toDeleteKeys.add(move.tabletId); - LOG.debug("Move {} is completed. The cur dist: {}", move, - invertedIndex.getReplicasByTabletId(move.tabletId).stream() - .map(Replica::getBackendId).collect(Collectors.toList())); + if (LOG.isDebugEnabled()) { + LOG.debug("Move {} is completed. The cur dist: {}", move, + invertedIndex.getReplicasByTabletId(move.tabletId).stream() + .map(Replica::getBackendId).collect(Collectors.toList())); + } counterBalanceMoveSucceeded.incrementAndGet(); } } @@ -324,8 +336,10 @@ public class PartitionRebalancer extends Rebalancer { movesCacheMap.updateMapping(statisticMap, Config.partition_rebalance_move_expire_after_access); // Perform cache maintenance movesCacheMap.maintain(); - LOG.debug("Move succeeded/total :{}/{}, current {}", - counterBalanceMoveSucceeded.get(), counterBalanceMoveCreated.get(), movesCacheMap); + if (LOG.isDebugEnabled()) { + LOG.debug("Move succeeded/total :{}/{}, current {}", + counterBalanceMoveSucceeded.get(), counterBalanceMoveCreated.get(), movesCacheMap); + } } // Represents a concrete move of a tablet from one be to another. diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java index def9e18a7e..afcce78539 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java @@ -100,7 +100,9 @@ public abstract class Rebalancer { protected boolean unPickOverLongTime(Tag tag, TStorageMedium medium) { Long lastPickTime = lastPickTimeTable.get(tag, medium); Long now = System.currentTimeMillis(); - LOG.debug("tag={}, medium={}, lastPickTime={}, now={}", tag, medium, lastPickTime, now); + if (LOG.isDebugEnabled()) { + LOG.debug("tag={}, medium={}, lastPickTime={}, now={}", tag, medium, lastPickTime, now); + } return lastPickTime == null || now - lastPickTime >= Config.be_rebalancer_idle_seconds * 1000L; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java index 66497e7d18..afd62ec881 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java @@ -211,7 +211,9 @@ public class TabletChecker extends MasterDaemon { removePriosIfNecessary(); stat.counterTabletCheckRound.incrementAndGet(); - LOG.debug(stat.incrementalBrief()); + if (LOG.isDebugEnabled()) { + LOG.debug(stat.incrementalBrief()); + } } public static class CheckerCounter { diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java index 67612c037b..8720340dd2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java @@ -519,7 +519,9 @@ public class TabletSchedCtx implements Comparable { if (backend == null) { // containsBE() is currently only used for choosing dest backend to do clone task. // return true so that it won't choose this backend. - LOG.debug("desc backend {} does not exist, skip. tablet: {}", beId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("desc backend {} does not exist, skip. tablet: {}", beId, tabletId); + } return true; } String host = backend.getHost(); @@ -527,18 +529,25 @@ public class TabletSchedCtx implements Comparable { Backend be = infoService.getBackend(replica.getBackendId()); if (be == null) { // BE has been dropped, skip it - LOG.debug("replica's backend {} does not exist, skip. tablet: {}", replica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} does not exist, skip. tablet: {}", + replica.getBackendId(), tabletId); + } continue; } if (!Config.allow_replica_on_same_host && !FeConstants.runningUnitTest && host.equals(be.getHost())) { - LOG.debug("replica's backend {} is on same host {}, skip. tablet: {}", - replica.getBackendId(), host, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} is on same host {}, skip. tablet: {}", + replica.getBackendId(), host, tabletId); + } return true; } if (replica.getBackendId() == beId) { - LOG.debug("replica's backend {} is same as dest backend {}, skip. tablet: {}", - replica.getBackendId(), beId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} is same as dest backend {}, skip. tablet: {}", + replica.getBackendId(), beId, tabletId); + } return true; } } @@ -594,34 +603,44 @@ public class TabletSchedCtx implements Comparable { List candidates = Lists.newArrayList(); for (Replica replica : tablet.getReplicas()) { if (exceptBeId != -1 && replica.getBackendId() == exceptBeId) { - LOG.debug("replica's backend {} is same as except backend {}, skip. tablet: {}", - replica.getBackendId(), exceptBeId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} is same as except backend {}, skip. tablet: {}", + replica.getBackendId(), exceptBeId, tabletId); + } continue; } if (replica.isBad() || replica.tooSlow()) { - LOG.debug("replica {} is bad({}) or too slow({}), skip. tablet: {}", - replica.getId(), replica.isBad(), replica.tooSlow(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} is bad({}) or too slow({}), skip. tablet: {}", + replica.getId(), replica.isBad(), replica.tooSlow(), tabletId); + } continue; } Backend be = infoService.getBackend(replica.getBackendId()); if (be == null || !be.isAlive()) { // backend which is in decommission can still be the source backend - LOG.debug("replica's backend {} does not exist or is not alive, skip. tablet: {}", - replica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} does not exist or is not alive, skip. tablet: {}", + replica.getBackendId(), tabletId); + } continue; } if (replica.getLastFailedVersion() > 0) { - LOG.debug("replica {} has failed version {}, skip. tablet: {}", - replica.getId(), replica.getLastFailedVersion(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} has failed version {}, skip. tablet: {}", + replica.getId(), replica.getLastFailedVersion(), tabletId); + } continue; } if (!replica.checkVersionCatchUp(visibleVersion, false)) { - LOG.debug("replica {} version {} has not catch up to visible version {}, skip. tablet: {}", - replica.getId(), replica.getVersion(), visibleVersion, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} version {} has not catch up to visible version {}, skip. tablet: {}", + replica.getId(), replica.getVersion(), visibleVersion, tabletId); + } continue; } @@ -638,15 +657,19 @@ public class TabletSchedCtx implements Comparable { for (Replica srcReplica : candidates) { PathSlot slot = backendsWorkingSlots.get(srcReplica.getBackendId()); if (slot == null) { - LOG.debug("replica's backend {} does not have working slot, skip. tablet: {}", - srcReplica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} does not have working slot, skip. tablet: {}", + srcReplica.getBackendId(), tabletId); + } continue; } long srcPathHash = slot.takeSlot(srcReplica.getPathHash()); if (srcPathHash == -1) { - LOG.debug("replica's backend {} does not have available slot, skip. tablet: {}", - srcReplica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} does not have available slot, skip. tablet: {}", + srcReplica.getBackendId(), tabletId); + } continue; } setSrc(srcReplica); @@ -685,18 +708,24 @@ public class TabletSchedCtx implements Comparable { List furtherRepairs = Lists.newArrayList(); for (Replica replica : tablet.getReplicas()) { if (replica.isBad()) { - LOG.debug("replica {} is bad, skip. tablet: {}", - replica.getId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} is bad, skip. tablet: {}", + replica.getId(), tabletId); + } continue; } if (!replica.isScheduleAvailable()) { if (Env.getCurrentSystemInfo().checkBackendScheduleAvailable(replica.getBackendId())) { - LOG.debug("replica's backend {} does not exist or is not scheduler available, skip. tablet: {}", - replica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica's backend {} does not exist or is not scheduler available, skip. tablet: {}", + replica.getBackendId(), tabletId); + } } else { - LOG.debug("user drop replica {}, skip. tablet: {}", - replica, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("user drop replica {}, skip. tablet: {}", + replica, tabletId); + } } continue; } @@ -715,8 +744,11 @@ public class TabletSchedCtx implements Comparable { } // skip healthy replica - LOG.debug("replica {} version {} is healthy, visible version {}, replica state {}, skip. tablet: {}", - replica.getId(), replica.getVersion(), visibleVersion, replica.getState(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} version {} is healthy, visible version {}, " + + "replica state {}, skip. tablet: {}", + replica.getId(), replica.getVersion(), visibleVersion, replica.getState(), tabletId); + } continue; } @@ -769,8 +801,10 @@ public class TabletSchedCtx implements Comparable { if (replica.needFurtherRepair()) { chosenReplica = replica; - LOG.debug("replica {} need further repair, choose it. tablet: {}", - replica.getId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} need further repair, choose it. tablet: {}", + replica.getId(), tabletId); + } break; } @@ -1359,8 +1393,10 @@ public class TabletSchedCtx implements Comparable { replica.setState(ReplicaState.NORMAL); replica.setPreWatermarkTxnId(-1); replica.setPostWatermarkTxnId(-1); - LOG.debug("reset replica {} on backend {} of tablet {} state from DECOMMISSION to NORMAL", - replica.getId(), replica.getBackendId(), tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("reset replica {} on backend {} of tablet {} state from DECOMMISSION to NORMAL", + replica.getId(), replica.getBackendId(), tabletId); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index 4cab2fd8be..eebc8cec0f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -357,7 +357,9 @@ public class TabletScheduler extends MasterDaemon { LoadStatisticForTag loadStatistic = new LoadStatisticForTag(tag, infoService, invertedIndex); loadStatistic.init(); newStatisticMap.put(tag, loadStatistic); - LOG.debug("update load statistic for tag {}:\n{}", tag, loadStatistic.getBrief()); + if (LOG.isDebugEnabled()) { + LOG.debug("update load statistic for tag {}:\n{}", tag, loadStatistic.getBrief()); + } } Map pathsCopingSize = getPathsCopingSize(); for (LoadStatisticForTag loadStatistic : newStatisticMap.values()) { @@ -385,7 +387,9 @@ public class TabletScheduler extends MasterDaemon { private void schedulePendingTablets() { long start = System.currentTimeMillis(); List currentBatch = getNextTabletCtxBatch(); - LOG.debug("get {} tablets to schedule", currentBatch.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("get {} tablets to schedule", currentBatch.size()); + } AgentBatchTask batchTask = new AgentBatchTask(); for (TabletSchedCtx tabletCtx : currentBatch) { @@ -775,13 +779,17 @@ public class TabletScheduler extends MasterDaemon { // and this task is a VERSION_INCOMPLETE task. // This will lead to failure to select a suitable dest replica. // At this time, we try to convert this task to a REPLICA_MISSING task, and schedule it again. - LOG.debug("failed to find version incomplete replica for VERSION_INCOMPLETE task. tablet id: {}, " - + "try to find a new backend", tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("failed to find version incomplete replica for VERSION_INCOMPLETE task. tablet id: {}, " + + "try to find a new backend", tabletCtx.getTabletId()); + } tabletCtx.releaseResource(this, true); tabletCtx.setTabletStatus(TabletStatus.REPLICA_MISSING); handleReplicaMissing(tabletCtx, batchTask); - LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}", - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}", + tabletCtx.getTabletId()); + } return; } else { throw e; @@ -1402,15 +1410,19 @@ public class TabletScheduler extends MasterDaemon { List allFitPathsDiffMedium = Lists.newArrayList(); for (BackendLoadStatistic bes : beStatistics) { if (!bes.isAvailable()) { - LOG.debug("backend {} is not available, skip. tablet: {}", bes.getBeId(), tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} is not available, skip. tablet: {}", bes.getBeId(), tabletCtx.getTabletId()); + } continue; } // exclude BE which already has replica of this tablet or another BE at same host has this replica if (tabletCtx.filterDestBE(bes.getBeId())) { - LOG.debug("backend {} already has replica of this tablet or another BE " - + "at same host has this replica, skip. tablet: {}", - bes.getBeId(), tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} already has replica of this tablet or another BE " + + "at same host has this replica, skip. tablet: {}", + bes.getBeId(), tabletCtx.getTabletId()); + } continue; } @@ -1418,13 +1430,17 @@ public class TabletScheduler extends MasterDaemon { // Else, check the tag. if (forColocate) { if (!tabletCtx.getColocateBackendsSet().contains(bes.getBeId())) { - LOG.debug("backend {} is not in colocate backend set, skip. tablet: {}", - bes.getBeId(), tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} is not in colocate backend set, skip. tablet: {}", + bes.getBeId(), tabletCtx.getTabletId()); + } continue; } } else if (!bes.getTag().equals(tag)) { - LOG.debug("backend {}'s tag {} is not equal to tablet's tag {}, skip. tablet: {}", - bes.getBeId(), bes.getTag(), tag, tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s tag {} is not equal to tablet's tag {}, skip. tablet: {}", + bes.getBeId(), bes.getTag(), tag, tabletCtx.getTabletId()); + } continue; } @@ -1434,14 +1450,18 @@ public class TabletScheduler extends MasterDaemon { if (st.ok()) { resultPaths.stream().forEach(path -> allFitPathsSameMedium.add(new BePathLoadStatPair(bes, path))); } else { - LOG.debug("backend {} unable to find path for tablet: {}. {}", bes.getBeId(), tabletCtx, st); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} unable to find path for tablet: {}. {}", bes.getBeId(), tabletCtx, st); + } resultPaths.clear(); st = bes.isFit(tabletCtx.getTabletSize(), tabletCtx.getStorageMedium(), resultPaths, true); if (st.ok()) { resultPaths.stream().forEach(path -> allFitPathsDiffMedium.add(new BePathLoadStatPair(bes, path))); } else { - LOG.debug("backend {} unable to find path for supplementing tablet: {}. {}", - bes.getBeId(), tabletCtx, st); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} unable to find path for supplementing tablet: {}. {}", + bes.getBeId(), tabletCtx, st); + } } } } @@ -1461,27 +1481,33 @@ public class TabletScheduler extends MasterDaemon { for (BePathLoadStatPair bePathLoadStat : allFitPaths) { RootPathLoadStatistic rootPathLoadStatistic = bePathLoadStat.getPathLoadStatistic(); if (rootPathLoadStatistic.getStorageMedium() != tabletCtx.getStorageMedium()) { - LOG.debug("backend {}'s path {}'s storage medium {} " - + "is not equal to tablet's storage medium {}, skip. tablet: {}", - rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), - rootPathLoadStatistic.getStorageMedium(), tabletCtx.getStorageMedium(), - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s path {}'s storage medium {} " + + "is not equal to tablet's storage medium {}, skip. tablet: {}", + rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), + rootPathLoadStatistic.getStorageMedium(), tabletCtx.getStorageMedium(), + tabletCtx.getTabletId()); + } continue; } PathSlot slot = backendsWorkingSlots.get(rootPathLoadStatistic.getBeId()); if (slot == null) { - LOG.debug("backend {}'s path {}'s slot is null, skip. tablet: {}", - rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s path {}'s slot is null, skip. tablet: {}", + rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), + tabletCtx.getTabletId()); + } continue; } long pathHash = slot.takeSlot(rootPathLoadStatistic.getPathHash()); if (pathHash == -1) { - LOG.debug("backend {}'s path {}'s slot is full, skip. tablet: {}", - rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s path {}'s slot is full, skip. tablet: {}", + rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), + tabletCtx.getTabletId()); + } continue; } return rootPathLoadStatistic; @@ -1494,18 +1520,22 @@ public class TabletScheduler extends MasterDaemon { RootPathLoadStatistic rootPathLoadStatistic = bePathLoadStat.getPathLoadStatistic(); PathSlot slot = backendsWorkingSlots.get(rootPathLoadStatistic.getBeId()); if (slot == null) { - LOG.debug("backend {}'s path {}'s slot is null, skip. tablet: {}", - rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s path {}'s slot is null, skip. tablet: {}", + rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), + tabletCtx.getTabletId()); + } continue; } hasBePath = true; long pathHash = slot.takeSlot(rootPathLoadStatistic.getPathHash()); if (pathHash == -1) { - LOG.debug("backend {}'s path {}'s slot is full, skip. tablet: {}", - rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), - tabletCtx.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {}'s path {}'s slot is full, skip. tablet: {}", + rootPathLoadStatistic.getBeId(), rootPathLoadStatistic.getPathHash(), + tabletCtx.getTabletId()); + } continue; } return rootPathLoadStatistic; @@ -2033,11 +2063,15 @@ public class TabletScheduler extends MasterDaemon { Slot slot = pathSlots.get(pathHash); if (slot == null) { - LOG.debug("path {} is not exist", pathHash); + if (LOG.isDebugEnabled()) { + LOG.debug("path {} is not exist", pathHash); + } return -1; } if (slot.used >= slot.getTotal()) { - LOG.debug("path {} has no available slot", pathHash); + if (LOG.isDebugEnabled()) { + LOG.debug("path {} has no available slot", pathHash); + } return -1; } slot.used++; diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java index 433b0f1539..840c7e0a4b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java @@ -137,7 +137,9 @@ public class TwoDimensionalGreedyRebalanceAlgo { // the partition skew, partition count for all the be should be 0. // Keys are ordered by the natural ordering, so we can get the last(max) key to know if all keys are 0. NavigableSet keySet = info.beByTotalReplicaCount.keySet(); - LOG.debug(keySet); + if (LOG.isDebugEnabled()) { + LOG.debug(keySet); + } Preconditions.checkState(keySet.isEmpty() || keySet.last() == 0L, "non-zero replica count on be while no partition skew information in skewMap"); // Nothing to balance: cluster is empty. @@ -197,10 +199,12 @@ public class TwoDimensionalGreedyRebalanceAlgo { Long minReplicaCount = pbi.beByReplicaCount.keySet().first(); Long maxReplicaCount = pbi.beByReplicaCount.keySet().last(); - LOG.debug("balancing partition {}-{} with replica count skew {}" - + " (min_replica_count: {}, max_replica_count: {})", - pbi.partitionId, pbi.indexId, maxPartitionSkew, - minReplicaCount, maxReplicaCount); + if (LOG.isDebugEnabled()) { + LOG.debug("balancing partition {}-{} with replica count skew {}" + + " (min_replica_count: {}, max_replica_count: {})", + pbi.partitionId, pbi.indexId, maxPartitionSkew, + minReplicaCount, maxReplicaCount); + } // Compute the intersection of the bes most loaded for the table // with the bes most loaded overall, and likewise for least loaded. @@ -209,12 +213,14 @@ public class TwoDimensionalGreedyRebalanceAlgo { pbi.beByReplicaCount, beByTotalReplicaCount); IntersectionResult minLoaded = getIntersection(ExtremumType.MIN, pbi.beByReplicaCount, beByTotalReplicaCount); - LOG.debug("partition-wise: min_count: {}, max_count: {}", - minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition); - LOG.debug("cluster-wise: min_count: {}, max_count: {}", - minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal); - LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}", - minLoaded.intersection.toString(), maxLoaded.intersection.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("partition-wise: min_count: {}, max_count: {}", + minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition); + LOG.debug("cluster-wise: min_count: {}, max_count: {}", + minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal); + LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}", + minLoaded.intersection.toString(), maxLoaded.intersection.toString()); + } // Do not move replicas of a balanced table if the least (most) loaded // servers overall do not intersect the servers hosting the least (most) @@ -240,7 +246,9 @@ public class TwoDimensionalGreedyRebalanceAlgo { : getRandomListElement(maxLoaded.intersection); } - LOG.debug("min_loaded_be: {}, max_loaded_be: {}", minLoadedBe, maxLoadedBe); + if (LOG.isDebugEnabled()) { + LOG.debug("min_loaded_be: {}, max_loaded_be: {}", minLoadedBe, maxLoadedBe); + } if (minLoadedBe.equals(maxLoadedBe)) { // Nothing to move. continue; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigWatcher.java b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigWatcher.java index 2a2a5cbeae..65530b91f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigWatcher.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigWatcher.java @@ -49,7 +49,9 @@ public class ConfigWatcher extends Daemon { @Override protected void runOneCycle() { - LOG.debug("start config watcher loop"); + if (LOG.isDebugEnabled()) { + LOG.debug("start config watcher loop"); + } try { WatchService watchService = FileSystems.getDefault().newWatchService(); configPath.register(watchService, StandardWatchEventKinds.ENTRY_CREATE, diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java index 35a3198616..27b347aa3a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java @@ -154,7 +154,9 @@ public class GenericPool { @Override public boolean validateObject(TNetworkAddress key, PooledObject p) { boolean isOpen = p.getObject().getOutputProtocol().getTransport().isOpen(); - LOG.debug("isOpen={}", isOpen); + if (LOG.isDebugEnabled()) { + LOG.debug("isOpen={}", isOpen); + } return isOpen; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/IndexedPriorityQueue.java b/fe/fe-core/src/main/java/org/apache/doris/common/IndexedPriorityQueue.java index f93db510e7..ae3962fb0b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/IndexedPriorityQueue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/IndexedPriorityQueue.java @@ -225,4 +225,3 @@ public final class IndexedPriorityQueue } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Queue.java b/fe/fe-core/src/main/java/org/apache/doris/common/Queue.java index 6dc58a3523..796e5992f9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Queue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Queue.java @@ -33,4 +33,3 @@ interface Queue { boolean isEmpty(); } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ResettableRandomizedIterator.java b/fe/fe-core/src/main/java/org/apache/doris/common/ResettableRandomizedIterator.java index 11d1860435..b91bccafdb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ResettableRandomizedIterator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ResettableRandomizedIterator.java @@ -60,4 +60,3 @@ public class ResettableRandomizedIterator return result; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServerEventProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServerEventProcessor.java index 33228cd1a1..f3f5ba8589 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServerEventProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServerEventProcessor.java @@ -63,7 +63,9 @@ public class ThriftServerEventProcessor implements TServerEventHandler { Preconditions.checkState(transport instanceof TFramedTransport); // NOTE: we need patch code in TNonblockingServer, we don't use for now. // see https://issues.apache.org/jira/browse/THRI FT-1053 - LOG.debug("TFramedTransport cannot create thrift context. server type: {}", thriftServer.getType()); + if (LOG.isDebugEnabled()) { + LOG.debug("TFramedTransport cannot create thrift context. server type: {}", thriftServer.getType()); + } return null; case SIMPLE: case THREAD_POOL: @@ -92,7 +94,9 @@ public class ThriftServerEventProcessor implements TServerEventHandler { thriftServer.addConnect(clientAddress); - LOG.debug("create thrift context. client: {}, server type: {}", clientAddress, thriftServer.getType()); + if (LOG.isDebugEnabled()) { + LOG.debug("create thrift context. client: {}, server type: {}", clientAddress, thriftServer.getType()); + } return new ThriftServerContext(clientAddress); } @@ -107,7 +111,9 @@ public class ThriftServerEventProcessor implements TServerEventHandler { TNetworkAddress clientAddress = thriftServerContext.getClient(); connectionContext.remove(); thriftServer.removeConnect(clientAddress); - LOG.debug("delete thrift context. client: {}, server type: {}", clientAddress, thriftServer.getType()); + if (LOG.isDebugEnabled()) { + LOG.debug("delete thrift context. client: {}, server type: {}", clientAddress, thriftServer.getType()); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/UpdateablePriorityQueue.java b/fe/fe-core/src/main/java/org/apache/doris/common/UpdateablePriorityQueue.java index c9c0a2b3de..9034ea80d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/UpdateablePriorityQueue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/UpdateablePriorityQueue.java @@ -24,4 +24,3 @@ interface UpdateablePriorityQueue extends Queue, Iterable { boolean addOrUpdate(E element, long priority); } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java index 2acf93d165..433a12bb71 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java @@ -170,8 +170,10 @@ public class BackendsProcDir implements ProcDirInterface { } // backends proc node get result too slow, add log to observer. - LOG.debug("backends proc get tablet num cost: {}, total cost: {}", watch.elapsed(TimeUnit.MILLISECONDS), - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("backends proc get tablet num cost: {}, total cost: {}", watch.elapsed(TimeUnit.MILLISECONDS), + (System.currentTimeMillis() - start)); + } // sort by host name ListComparator> comparator = new ListComparator>(1); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java index 03827f6657..70a3721d82 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java @@ -122,7 +122,9 @@ public class ClusterStatePublisher { LOG.warn("Backend execute publish failed. backend=[{}], message=[{}]", addr, tAgentResult.getStatus().getErrorMsgs()); } - LOG.debug("Success publish to backend([{}])", addr); + if (LOG.isDebugEnabled()) { + LOG.debug("Success publish to backend([{}])", addr); + } // Publish here handler.onResponse(node); } catch (TException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java index 360a59f63a..fef91f6f02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java @@ -594,4 +594,3 @@ public class BrokerUtil { } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ConsistentHash.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ConsistentHash.java index 85199ad32f..d9e3787fa4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ConsistentHash.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ConsistentHash.java @@ -128,4 +128,3 @@ public class ConsistentHash { return ImmutableList.copyOf(uniqueNodes); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java index fd7da29e51..e9fb932b6d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java @@ -220,7 +220,9 @@ public class LocationPath { * @return metadata location path. just convert when storage is compatible with s3 client. */ private static String convertToS3(String location) { - LOG.debug("try convert location to s3 prefix: " + location); + if (LOG.isDebugEnabled()) { + LOG.debug("try convert location to s3 prefix: " + location); + } int pos = findDomainPos(location); return "s3" + location.substring(pos); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java index 7212230470..8644fea622 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java @@ -132,7 +132,9 @@ public class ProfileManager { builder.build(); } catch (Exception e) { element.errMsg = e.getMessage(); - LOG.debug("failed to build profile tree", e); + if (LOG.isDebugEnabled()) { + LOG.debug("failed to build profile tree", e); + } return element; } element.builder = builder; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java index c95b5f3738..eab9b3b673 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java @@ -453,7 +453,9 @@ public class RuntimeProfile { if (tmp != null) { ret.add(profile.getChildMap().get(profileName)); } else { - LOG.debug("could not find {} from {}", profileName, profile.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("could not find {} from {}", profileName, profile.toString()); + } } } return ret; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java index 87b67dfa28..c454c6cab5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java @@ -351,7 +351,9 @@ public class Util { } } } - LOG.debug("get result from url {}: {}", urlStr, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get result from url {}: {}", urlStr, sb.toString()); + } return sb.toString(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java index f1518215af..dd9279a607 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java @@ -106,13 +106,17 @@ public class CheckConsistencyJob { TabletInvertedIndex invertedIndex = Env.getCurrentInvertedIndex(); TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId); if (tabletMeta == null) { - LOG.debug("tablet[{}] has been removed", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}] has been removed", tabletId); + } return false; } Database db = Env.getCurrentInternalCatalog().getDbNullable(tabletMeta.getDbId()); if (db == null) { - LOG.debug("db[{}] does not exist", tabletMeta.getDbId()); + if (LOG.isDebugEnabled()) { + LOG.debug("db[{}] does not exist", tabletMeta.getDbId()); + } return false; } @@ -121,7 +125,9 @@ public class CheckConsistencyJob { AgentBatchTask batchTask = new AgentBatchTask(); Table table = db.getTableNullable(tabletMeta.getTableId()); if (table == null) { - LOG.debug("table[{}] does not exist", tabletMeta.getTableId()); + if (LOG.isDebugEnabled()) { + LOG.debug("table[{}] does not exist", tabletMeta.getTableId()); + } return false; } @@ -131,7 +137,9 @@ public class CheckConsistencyJob { Partition partition = olapTable.getPartition(tabletMeta.getPartitionId()); if (partition == null) { - LOG.debug("partition[{}] does not exist", tabletMeta.getPartitionId()); + if (LOG.isDebugEnabled()) { + LOG.debug("partition[{}] does not exist", tabletMeta.getPartitionId()); + } return false; } @@ -139,19 +147,25 @@ public class CheckConsistencyJob { short replicaNum = olapTable.getPartitionInfo() .getReplicaAllocation(partition.getId()).getTotalReplicaNum(); if (replicaNum == (short) 1) { - LOG.debug("partition[{}]'s replication num is 1. skip consistency check", partition.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("partition[{}]'s replication num is 1. skip consistency check", partition.getId()); + } return false; } MaterializedIndex index = partition.getIndex(tabletMeta.getIndexId()); if (index == null) { - LOG.debug("index[{}] does not exist", tabletMeta.getIndexId()); + if (LOG.isDebugEnabled()) { + LOG.debug("index[{}] does not exist", tabletMeta.getIndexId()); + } return false; } tablet = index.getTablet(tabletId); if (tablet == null) { - LOG.debug("tablet[{}] does not exist", tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}] does not exist", tabletId); + } return false; } @@ -205,7 +219,9 @@ public class CheckConsistencyJob { if (state != JobState.RUNNING) { // failed to send task. set tablet's checked version and version hash to avoid choosing it again if (!table.writeLockIfExist()) { - LOG.debug("table[{}] does not exist", tabletMeta.getTableId()); + if (LOG.isDebugEnabled()) { + LOG.debug("table[{}] does not exist", tabletMeta.getTableId()); + } return false; } try { @@ -222,7 +238,9 @@ public class CheckConsistencyJob { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); - LOG.debug("tablet[{}] send check consistency task. num: {}", tabletId, batchTask.getTaskNum()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}] send check consistency task. num: {}", tabletId, batchTask.getTaskNum()); + } return true; } @@ -292,15 +310,19 @@ public class CheckConsistencyJob { while (iter.hasNext()) { Map.Entry entry = iter.next(); if (tablet.getReplicaByBackendId(entry.getKey()) == null) { - LOG.debug("tablet[{}]'s replica in backend[{}] does not exist. remove from checksumMap", - tabletId, entry.getKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}]'s replica in backend[{}] does not exist. remove from checksumMap", + tabletId, entry.getKey()); + } iter.remove(); continue; } if (entry.getValue() == -1) { - LOG.debug("tablet[{}] has unfinished replica check sum task. backend[{}]", - tabletId, entry.getKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}] has unfinished replica check sum task. backend[{}]", + tabletId, entry.getKey()); + } isFinished = false; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java index c65a48c256..b052f0d4ad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java @@ -181,8 +181,10 @@ public class ConsistencyChecker extends MasterDaemon { } if (!isTime) { - LOG.debug("current time is {}:00, waiting to {}:00 to {}:00", - currentTime, startTime, endTime); + if (LOG.isDebugEnabled()) { + LOG.debug("current time is {}:00, waiting to {}:00 to {}:00", + currentTime, startTime, endTime); + } } return isTime; @@ -190,7 +192,9 @@ public class ConsistencyChecker extends MasterDaemon { private void clearJob(CheckConsistencyJob job) { job.clear(); - LOG.debug("tablet[{}] consistency checking job is cleared", job.getTabletId()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}] consistency checking job is cleared", job.getTabletId()); + } } private boolean addJob(CheckConsistencyJob job) { @@ -281,14 +285,18 @@ public class ConsistencyChecker extends MasterDaemon { // check partition's replication num. if 1 replication. skip if (table.getPartitionInfo().getReplicaAllocation( partition.getId()).getTotalReplicaNum() == (short) 1) { - LOG.debug("partition[{}]'s replication num is 1. ignore", partition.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("partition[{}]'s replication num is 1. ignore", partition.getId()); + } continue; } // check if this partition has no data if (partition.getVisibleVersion() == Partition.PARTITION_INIT_VERSION) { - LOG.debug("partition[{}]'s version is {}. ignore", partition.getId(), - Partition.PARTITION_INIT_VERSION); + if (LOG.isDebugEnabled()) { + LOG.debug("partition[{}]'s version is {}. ignore", partition.getId(), + Partition.PARTITION_INIT_VERSION); + } continue; } partitionQueue.add(partition); @@ -323,8 +331,10 @@ public class ConsistencyChecker extends MasterDaemon { // check if version has already been checked if (partition.getVisibleVersion() == tablet.getCheckedVersion()) { if (tablet.isConsistent()) { - LOG.debug("tablet[{}]'s version[{}] has been checked. ignore", - chosenTabletId, tablet.getCheckedVersion()); + if (LOG.isDebugEnabled()) { + LOG.debug("tablet[{}]'s version[{}] has been checked. ignore", + chosenTabletId, tablet.getCheckedVersion()); + } } } else { LOG.info("chose tablet[{}-{}-{}-{}-{}] to check consistency", db.getId(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownConf.java b/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownConf.java index bb0e7debab..f687db3597 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownConf.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownConf.java @@ -79,4 +79,3 @@ public class CooldownConf implements Writable { return GsonUtils.GSON.fromJson(json, CooldownConf.class); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownDelete.java b/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownDelete.java index 4ca8220ba8..ebbaaf9f2e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownDelete.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cooldown/CooldownDelete.java @@ -43,4 +43,3 @@ public class CooldownDelete implements Writable { return GsonUtils.GSON.fromJson(json, CooldownConf.class); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogFactory.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogFactory.java index 3bf674a8ab..b56c307bbf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogFactory.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogFactory.java @@ -155,4 +155,3 @@ public class CatalogFactory { return catalog; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java index d08d4042ac..c9a427d94c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java @@ -109,7 +109,9 @@ public class CatalogMgr implements Writable, GsonPostProcessable { public static CatalogMgr read(DataInput in) throws IOException { String json = Text.readString(in); - LOG.debug("debug: read json: {}", json); + if (LOG.isDebugEnabled()) { + LOG.debug("debug: read json: {}", json); + } return GsonUtils.GSON.fromJson(json, CatalogMgr.class); } @@ -1014,4 +1016,3 @@ public class CatalogMgr implements Writable, GsonPostProcessable { return new HashSet<>(idToCatalog.values()); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java index 26a81f34f0..67508c4bbe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java @@ -642,4 +642,3 @@ public abstract class ExternalCatalog return new ConcurrentHashMap<>(idToDb); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java index 57aff50c62..2b495b647b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java @@ -136,7 +136,9 @@ public class ExternalMetaCacheMgr { hudiPartitionMgr.cleanTablePartitions(catalogId, dbName, tblName); icebergMetadataCacheMgr.invalidateTableCache(catalogId, dbName, tblName); maxComputeMetadataCacheMgr.invalidateTableCache(catalogId, dbName, tblName); - LOG.debug("invalid table cache for {}.{} in catalog {}", dbName, tblName, catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid table cache for {}.{} in catalog {}", dbName, tblName, catalogId); + } } public void invalidateDbCache(long catalogId, String dbName) { @@ -152,7 +154,9 @@ public class ExternalMetaCacheMgr { hudiPartitionMgr.cleanDatabasePartitions(catalogId, dbName); icebergMetadataCacheMgr.invalidateDbCache(catalogId, dbName); maxComputeMetadataCacheMgr.invalidateDbCache(catalogId, dbName); - LOG.debug("invalid db cache for {} in catalog {}", dbName, catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid db cache for {} in catalog {}", dbName, catalogId); + } } public void invalidateCatalogCache(long catalogId) { @@ -167,7 +171,9 @@ public class ExternalMetaCacheMgr { hudiPartitionMgr.cleanPartitionProcess(catalogId); icebergMetadataCacheMgr.invalidateCatalogCache(catalogId); maxComputeMetadataCacheMgr.invalidateCatalogCache(catalogId); - LOG.debug("invalid catalog cache for {}", catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid catalog cache for {}", catalogId); + } } public void addPartitionsCache(long catalogId, HMSExternalTable table, List partitionNames) { @@ -183,7 +189,9 @@ public class ExternalMetaCacheMgr { } metaCache.addPartitionsCache(dbName, table.getName(), partitionNames, partitionColumnTypes); } - LOG.debug("add partition cache for {}.{} in catalog {}", dbName, table.getName(), catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("add partition cache for {}.{} in catalog {}", dbName, table.getName(), catalogId); + } } public void dropPartitionsCache(long catalogId, HMSExternalTable table, List partitionNames) { @@ -192,7 +200,9 @@ public class ExternalMetaCacheMgr { if (metaCache != null) { metaCache.dropPartitionsCache(dbName, table.getName(), partitionNames, true); } - LOG.debug("drop partition cache for {}.{} in catalog {}", dbName, table.getName(), catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("drop partition cache for {}.{} in catalog {}", dbName, table.getName(), catalogId); + } } public void invalidatePartitionsCache(long catalogId, String dbName, String tableName, @@ -205,6 +215,8 @@ public class ExternalMetaCacheMgr { } } - LOG.debug("invalidate partition cache for {}.{} in catalog {}", dbName, tableName, catalogId); + if (LOG.isDebugEnabled()) { + LOG.debug("invalidate partition cache for {}.{} in catalog {}", dbName, tableName, catalogId); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalSchemaCache.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalSchemaCache.java index 962f9d977c..d8f2edbbd1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalSchemaCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalSchemaCache.java @@ -102,7 +102,9 @@ public class ExternalSchemaCache { public void invalidateTableCache(String dbName, String tblName) { SchemaCacheKey key = new SchemaCacheKey(dbName, tblName); schemaCache.invalidate(key); - LOG.debug("invalid schema cache for {}.{} in catalog {}", dbName, tblName, catalog.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid schema cache for {}.{} in catalog {}", dbName, tblName, catalog.getName()); + } } public void invalidateDbCache(String dbName) { @@ -113,13 +115,17 @@ public class ExternalSchemaCache { schemaCache.invalidate(key); } } - LOG.debug("invalid schema cache for db {} in catalog {} cost: {} ms", dbName, catalog.getName(), - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid schema cache for db {} in catalog {} cost: {} ms", dbName, catalog.getName(), + (System.currentTimeMillis() - start)); + } } public void invalidateAll() { schemaCache.invalidateAll(); - LOG.debug("invalid all schema cache in catalog {}", catalog.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid all schema cache in catalog {}", catalog.getName()); + } } @Data diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java index 92a15badef..3601565aad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java @@ -186,7 +186,9 @@ public class HMSExternalCatalog extends ExternalCatalog { @Override public void dropDatabase(String dbName) { - LOG.debug("drop database [{}]", dbName); + if (LOG.isDebugEnabled()) { + LOG.debug("drop database [{}]", dbName); + } Long dbId = dbNameToId.remove(dbName); if (dbId == null) { LOG.warn("drop database [{}] failed", dbName); @@ -196,7 +198,9 @@ public class HMSExternalCatalog extends ExternalCatalog { @Override public void createDatabase(long dbId, String dbName) { - LOG.debug("create database [{}]", dbName); + if (LOG.isDebugEnabled()) { + LOG.debug("create database [{}]", dbName); + } dbNameToId.put(dbName, dbId); ExternalDatabase db = getDbForInit(dbName, dbId, logType); idToDb.put(dbId, db); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index 44cbbc8cfa..ad8584a5e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -987,7 +987,9 @@ public class InternalCatalog implements CatalogIf { } private void unprotectAddReplica(OlapTable olapTable, ReplicaPersistInfo info) { - LOG.debug("replay add a replica {}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("replay add a replica {}", info); + } Partition partition = olapTable.getPartition(info.getPartitionId()); MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); Tablet tablet = materializedIndex.getTablet(info.getTabletId()); @@ -1006,7 +1008,9 @@ public class InternalCatalog implements CatalogIf { } private void unprotectUpdateReplica(OlapTable olapTable, ReplicaPersistInfo info) { - LOG.debug("replay update a replica {}", info); + if (LOG.isDebugEnabled()) { + LOG.debug("replay update a replica {}", info); + } Partition partition = olapTable.getPartition(info.getPartitionId()); MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); Tablet tablet = materializedIndex.getTablet(info.getTabletId()); @@ -1980,7 +1984,9 @@ public class InternalCatalog implements CatalogIf { // Create olap table and related base index synchronously. private void createOlapTable(Database db, CreateTableStmt stmt) throws UserException { String tableName = stmt.getTableName(); - LOG.debug("begin create olap table: {}", tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("begin create olap table: {}", tableName); + } BinlogConfig dbBinlogConfig; db.readLock(); @@ -2045,7 +2051,9 @@ public class InternalCatalog implements CatalogIf { // calc short key column count short shortKeyColumnCount = Env.calcShortKeyColumnCount(baseSchema, stmt.getProperties(), isKeysRequired); - LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount); + if (LOG.isDebugEnabled()) { + LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount); + } // create table long tableId = idGeneratorBuffer.getNextId(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/es/EsTablePartitions.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/es/EsTablePartitions.java index cbda7338f9..b00264faf3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/es/EsTablePartitions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/es/EsTablePartitions.java @@ -73,12 +73,16 @@ public class EsTablePartitions { idx++; } sb.append(")"); - LOG.debug("begin to parse es table [{}] state from search shards," - + " with partition info [{}]", esTable.getName(), sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to parse es table [{}] state from search shards," + + " with partition info [{}]", esTable.getName(), sb.toString()); + } } } else if (esTable.getPartitionInfo() instanceof SinglePartitionInfo) { - LOG.debug("begin to parse es table [{}] state from search shards, " - + "with no partition info", esTable.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to parse es table [{}] state from search shards, " + + "with no partition info", esTable.getName()); + } } else { throw new DorisEsException("es table only support range partition, " + "but current partition type is " @@ -86,7 +90,9 @@ public class EsTablePartitions { } } esTablePartitions.addIndexState(esTable.getIndexName(), shardPartitions); - LOG.debug("add index {} to es table {}", shardPartitions, esTable.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("add index {} to es table {}", shardPartitions, esTable.getName()); + } if (partitionInfo != null) { // sort the index state according to partition key and then add to range map List esShardPartitionsList = new ArrayList<>( @@ -99,8 +105,10 @@ public class EsTablePartitions { esTablePartitions.addPartition(esShardPartitions.getIndexName(), partitionId); esShardPartitions.setPartitionId(partitionId); ++partitionId; - LOG.debug("add partition to es table [{}] with range [{}]", esTable.getName(), - item.getItems()); + if (LOG.isDebugEnabled()) { + LOG.debug("add partition to es table [{}] with range [{}]", esTable.getName(), + item.getItems()); + } } } return esTablePartitions; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java index c7bf28cda9..1d1565e5e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java @@ -522,10 +522,12 @@ public class HiveMetaStoreCache { e, catalog.getName()); } - LOG.debug("get #{} files from #{} partitions in catalog {} cost: {} ms", - fileLists.stream().mapToInt(l -> l.getFiles() == null - ? (l.getSplits() == null ? 0 : l.getSplits().size()) : l.getFiles().size()).sum(), - partitions.size(), catalog.getName(), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("get #{} files from #{} partitions in catalog {} cost: {} ms", + fileLists.stream().mapToInt(l -> l.getFiles() == null + ? (l.getSplits() == null ? 0 : l.getSplits().size()) : l.getFiles().size()).sum(), + partitions.size(), catalog.getName(), (System.currentTimeMillis() - start)); + } return fileLists; } @@ -558,8 +560,10 @@ public class HiveMetaStoreCache { throw new CacheException("failed to get partition in catalog %s", e, catalog.getName()); } - LOG.debug("get #{} partitions in catalog {} cost: {} ms", partitions.size(), catalog.getName(), - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("get #{} partitions in catalog {} cost: {} ms", partitions.size(), catalog.getName(), + (System.currentTimeMillis() - start)); + } return partitions; } @@ -578,9 +582,11 @@ public class HiveMetaStoreCache { } } partitionValuesCache.invalidate(key); - LOG.debug("invalid table cache for {}.{} in catalog {}, cache num: {}, cost: {} ms", - dbName, tblName, catalog.getName(), partitionValues.partitionValuesMap.size(), - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid table cache for {}.{} in catalog {}, cache num: {}, cost: {} ms", + dbName, tblName, catalog.getName(), partitionValues.partitionValuesMap.size(), + (System.currentTimeMillis() - start)); + } } else { /** * A file cache entry can be created reference to @@ -620,15 +626,19 @@ public class HiveMetaStoreCache { invalidateTableCache(dbName, key.tblName); } } - LOG.debug("invalid db cache for {} in catalog {}, cache num: {}, cost: {} ms", dbName, catalog.getName(), - keys.size(), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid db cache for {} in catalog {}, cache num: {}, cost: {} ms", dbName, catalog.getName(), + keys.size(), (System.currentTimeMillis() - start)); + } } public void invalidateAll() { partitionValuesCache.invalidateAll(); partitionCache.invalidateAll(); fileCacheRef.get().invalidateAll(); - LOG.debug("invalid all meta cache in catalog {}", catalog.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid all meta cache in catalog {}", catalog.getName()); + } } // partition name format: nation=cn/city=beijing @@ -1120,4 +1130,3 @@ public class HiveMetaStoreCache { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveTransaction.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveTransaction.java index f6bae24723..35d96068f0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveTransaction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveTransaction.java @@ -89,4 +89,3 @@ public class HiveTransaction { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PostgreSQLJdbcHMSCachedClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PostgreSQLJdbcHMSCachedClient.java index afbd929c5a..062d6394b0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PostgreSQLJdbcHMSCachedClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/PostgreSQLJdbcHMSCachedClient.java @@ -70,7 +70,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { String nameFiled = JdbcTable.databaseProperName(TOdbcTableType.POSTGRESQL, "NAME"); String tableName = JdbcTable.databaseProperName(TOdbcTableType.POSTGRESQL, "DBS"); String sql = String.format("SELECT %s FROM %s;", nameFiled, tableName); - LOG.debug("getAllDatabases exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getAllDatabases exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); ResultSet rs = stmt.executeQuery()) { @@ -89,7 +91,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { public List getAllTables(String dbName) { String sql = "SELECT \"TBL_NAME\" FROM \"TBLS\" join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\"" + " WHERE \"DBS\".\"NAME\" = '" + dbName + "';"; - LOG.debug("getAllTables exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getAllTables exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -121,7 +125,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { String sql = String.format("SELECT \"PART_NAME\" from \"PARTITIONS\" WHERE \"TBL_ID\" = (" + "SELECT \"TBL_ID\" FROM \"TBLS\" join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\"" + " WHERE \"DBS\".\"NAME\" = '%s' AND \"TBLS\".\"TBL_NAME\"='%s');", dbName, tblName); - LOG.debug("listPartitionNames exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("listPartitionNames exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -140,10 +146,14 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { // not used @Override public Partition getPartition(String dbName, String tblName, List partitionValues) { - LOG.debug("getPartition partitionValues: {}", partitionValues); + if (LOG.isDebugEnabled()) { + LOG.debug("getPartition partitionValues: {}", partitionValues); + } String partitionName = Joiner.on("/").join(partitionValues); ImmutableList partitionNames = ImmutableList.of(partitionName); - LOG.debug("getPartition partitionNames: {}", partitionNames); + if (LOG.isDebugEnabled()) { + LOG.debug("getPartition partitionNames: {}", partitionNames); + } List partitions = getPartitionsByNames(dbName, tblName, partitionNames); if (!partitions.isEmpty()) { return partitions.get(0); @@ -169,7 +179,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { + " WHERE \"DBS\".\"NAME\" = '%s' AND \"TBLS\".\"TBL_NAME\"='%s'" + " AND \"PART_NAME\" in (%s);", dbName, tblName, partitionNamesString); - LOG.debug("getPartitionsByNames exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getPartitionsByNames exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -201,7 +213,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private List getPartitionValues(int partitionId) { String sql = String.format("SELECT \"PART_KEY_VAL\" FROM \"PARTITION_KEY_VALS\"" + " WHERE \"PART_ID\" = " + partitionId); - LOG.debug("getPartitionValues exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getPartitionValues exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); ResultSet rs = stmt.executeQuery()) { @@ -222,7 +236,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { + " \"IS_REWRITE_ENABLED\", \"VIEW_EXPANDED_TEXT\", \"VIEW_ORIGINAL_TEXT\", \"DBS\".\"OWNER_TYPE\"" + " FROM \"TBLS\" join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " + " WHERE \"DBS\".\"NAME\" = '" + dbName + "' AND \"TBLS\".\"TBL_NAME\"='" + tblName + "';"; - LOG.debug("getTable exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getTable exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -254,7 +270,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private StorageDescriptor getStorageDescriptor(int sdId) { String sql = "SELECT * from \"SDS\" WHERE \"SD_ID\" = " + sdId; - LOG.debug("getStorageDescriptorByDbAndTable exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getStorageDescriptorByDbAndTable exec sql: {}", sql); + } StorageDescriptor sd = new StorageDescriptor(); sd.setCols(getSchemaExcludePartitionKeys(sdId)); @@ -280,7 +298,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private SerDeInfo getSerdeInfo(int serdeId) { String sql = "SELECT * FROM \"SERDES\" WHERE \"SERDE_ID\" = " + serdeId; - LOG.debug("getSerdeInfo exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getSerdeInfo exec sql: {}", sql); + } SerDeInfo serDeInfo = new SerDeInfo(); serDeInfo.setParameters(getSerdeInfoParameters(serdeId)); @@ -301,7 +321,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private Map getSerdeInfoParameters(int serdeId) { String sql = "SELECT \"PARAM_KEY\", \"PARAM_VALUE\" from \"SERDE_PARAMS\" WHERE \"SERDE_ID\" = " + serdeId; - LOG.debug("getSerdeInfoParameters exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getSerdeInfoParameters exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -319,7 +341,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private List getTablePartitionKeys(int tableId) { String sql = "SELECT \"PKEY_NAME\", \"PKEY_TYPE\", \"PKEY_COMMENT\" from \"PARTITION_KEYS\"" + " WHERE \"TBL_ID\"= " + tableId; - LOG.debug("getTablePartitionKeys exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getTablePartitionKeys exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -343,7 +367,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { private Map getTableParameters(int tableId) { String sql = "SELECT \"PARAM_KEY\", \"PARAM_VALUE\" from \"TABLE_PARAMS\" WHERE \"TBL_ID\" = " + tableId; - LOG.debug("getParameters exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getParameters exec sql: {}", sql); + } try (Connection conn = getConnection(); PreparedStatement stmt = conn.prepareStatement(sql); @@ -378,7 +404,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { + " join \"SDS\" on \"SDS\".\"SD_ID\" = \"TBLS\".\"SD_ID\"" + " join \"COLUMNS_V2\" on \"COLUMNS_V2\".\"CD_ID\" = \"SDS\".\"CD_ID\"" + " WHERE \"DBS\".\"NAME\" = '" + dbName + "' AND \"TBLS\".\"TBL_NAME\"='" + tblName + "';"; - LOG.debug("getSchema exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getSchema exec sql: {}", sql); + } Builder builder = ImmutableList.builder(); int tableId = -1; @@ -405,7 +433,9 @@ public class PostgreSQLJdbcHMSCachedClient extends JdbcHMSCachedClient { String sql = "SELECT \"COLUMN_NAME\", \"TYPE_NAME\", \"COMMENT\"" + " FROM \"SDS\" join \"COLUMNS_V2\" on \"COLUMNS_V2\".\"CD_ID\" = \"SDS\".\"CD_ID\"" + " WHERE \"SDS\".\"SD_ID\" = " + sdId; - LOG.debug("getSchema exec sql: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("getSchema exec sql: {}", sql); + } Builder colsExcludePartitionKeys = ImmutableList.builder(); try (Connection conn = getConnection(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/ThriftHMSCachedClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/ThriftHMSCachedClient.java index 1e5e2f1287..17e0bbcc74 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/ThriftHMSCachedClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/ThriftHMSCachedClient.java @@ -480,4 +480,3 @@ public class ThriftHMSCachedClient implements HMSCachedClient { return HiveMetaStoreClientHelper.ugiDoAs(hiveConf, action); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java index a9d165b4d0..67348494be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java @@ -206,7 +206,9 @@ public abstract class MetastoreEvent { } String formatString = LOG_FORMAT_EVENT_ID_TYPE + logFormattedStr; Object[] formatArgs = getLogFormatArgs(args); - LOG.debug(formatString, formatArgs); + if (LOG.isDebugEnabled()) { + LOG.debug(formatString, formatArgs); + } } protected String getPartitionName(Map part, List partitionColNames) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java index 28793aecf2..a94c5bbb65 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java @@ -136,7 +136,9 @@ public class MetastoreEventsProcessor extends MasterDaemon { * {@link Config#hms_events_batch_size_per_rpc} */ private List getNextHMSEvents(HMSExternalCatalog hmsExternalCatalog) throws Exception { - LOG.debug("Start to pull events on catalog [{}]", hmsExternalCatalog.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Start to pull events on catalog [{}]", hmsExternalCatalog.getName()); + } NotificationEventResponse response; if (Env.getCurrentEnv().isMaster()) { response = getNextEventResponseForMaster(hmsExternalCatalog); @@ -195,8 +197,10 @@ public class MetastoreEventsProcessor extends MasterDaemon { return null; } - LOG.debug("Catalog [{}] getNextEventResponse, currentEventId is {}, lastSyncedEventId is {}", - hmsExternalCatalog.getName(), currentEventId, lastSyncedEventId); + if (LOG.isDebugEnabled()) { + LOG.debug("Catalog [{}] getNextEventResponse, currentEventId is {}, lastSyncedEventId is {}", + hmsExternalCatalog.getName(), currentEventId, lastSyncedEventId); + } if (currentEventId == lastSyncedEventId) { LOG.info("Event id not updated when pulling events on catalog [{}]", hmsExternalCatalog.getName()); return null; @@ -246,8 +250,10 @@ public class MetastoreEventsProcessor extends MasterDaemon { return null; } - LOG.debug("Catalog [{}] getNextEventResponse, masterLastSyncedEventId is {}, lastSyncedEventId is {}", - hmsExternalCatalog.getName(), masterLastSyncedEventId, lastSyncedEventId); + if (LOG.isDebugEnabled()) { + LOG.debug("Catalog [{}] getNextEventResponse, masterLastSyncedEventId is {}, lastSyncedEventId is {}", + hmsExternalCatalog.getName(), masterLastSyncedEventId, lastSyncedEventId); + } // For slave FE nodes, only fetch events which id is lower than masterLastSyncedEventId int maxEventSize = Math.min((int) (masterLastSyncedEventId - lastSyncedEventId), @@ -310,7 +316,9 @@ public class MetastoreEventsProcessor extends MasterDaemon { OriginStatement originStmt = new OriginStatement(sql, 0); MasterOpExecutor masterOpExecutor = new MasterOpExecutor(originStmt, new ConnectContext(), RedirectStatus.FORWARD_WITH_SYNC, false); - LOG.debug("Transfer to master to refresh catalog, stmt: {}", sql); + if (LOG.isDebugEnabled()) { + LOG.debug("Transfer to master to refresh catalog, stmt: {}", sql); + } masterOpExecutor.execute(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/HiveCompatibleCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/HiveCompatibleCatalog.java index 4cdba3523d..6431b02308 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/HiveCompatibleCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/HiveCompatibleCatalog.java @@ -179,4 +179,3 @@ public abstract class HiveCompatibleCatalog extends BaseMetastoreCatalog impleme return conf; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java index 85b24fdba0..e2f5658ee0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java @@ -115,8 +115,10 @@ public abstract class JdbcClient { // set parent ClassLoader to null, we can achieve class loading isolation. ClassLoader parent = getClass().getClassLoader(); ClassLoader classLoader = URLClassLoader.newInstance(urls, parent); - LOG.debug("parent ClassLoader: {}, old ClassLoader: {}, class Loader: {}.", - parent, oldClassLoader, classLoader); + if (LOG.isDebugEnabled()) { + LOG.debug("parent ClassLoader: {}, old ClassLoader: {}, class Loader: {}.", + parent, oldClassLoader, classLoader); + } Thread.currentThread().setContextClassLoader(classLoader); dataSource = new DruidDataSource(); dataSource.setDriverClassLoader(classLoader); @@ -195,7 +197,9 @@ public abstract class JdbcClient { try { stmt = conn.createStatement(); int effectedRows = stmt.executeUpdate(origStmt); - LOG.debug("finished to execute dml stmt: {}, effected rows: {}", origStmt, effectedRows); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to execute dml stmt: {}, effected rows: {}", origStmt, effectedRows); + } } catch (SQLException e) { throw new JdbcClientException("Failed to execute stmt. error: " + e.getMessage(), e); } finally { @@ -448,4 +452,3 @@ public abstract class JdbcClient { return ScalarType.createStringType(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java index efcf56d358..aee2aadac2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/kafka/KafkaUtil.java @@ -93,7 +93,9 @@ public class KafkaUtil { Map convertedCustomProperties, List> timestampOffsets) throws LoadException { TNetworkAddress address = null; - LOG.debug("begin to get offsets for times of topic: {}, {}", topic, timestampOffsets); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to get offsets for times of topic: {}, {}", topic, timestampOffsets); + } try { List backendIds = Env.getCurrentSystemInfo().getAllBackendIds(true); if (backendIds.isEmpty()) { @@ -138,7 +140,9 @@ public class KafkaUtil { for (InternalService.PIntegerPair pair : pairs) { partitionOffsets.add(Pair.of(pair.getKey(), pair.getVal())); } - LOG.debug("finish to get offsets for times of topic: {}, {}", topic, partitionOffsets); + if (LOG.isDebugEnabled()) { + LOG.debug("finish to get offsets for times of topic: {}, {}", topic, partitionOffsets); + } return partitionOffsets; } } catch (Exception e) { @@ -152,8 +156,10 @@ public class KafkaUtil { Map convertedCustomProperties, List partitionIds) throws LoadException { TNetworkAddress address = null; - LOG.debug("begin to get latest offsets for partitions {} in topic: {}, task {}, job {}", - partitionIds, topic, taskId, jobId); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to get latest offsets for partitions {} in topic: {}, task {}, job {}", + partitionIds, topic, taskId, jobId); + } try { List backendIds = Env.getCurrentSystemInfo().getAllBackendIds(true); if (backendIds.isEmpty()) { @@ -196,8 +202,10 @@ public class KafkaUtil { for (InternalService.PIntegerPair pair : pairs) { partitionOffsets.add(Pair.of(pair.getKey(), pair.getVal())); } - LOG.debug("finish to get latest offsets for partitions {} in topic: {}, task {}, job {}", - partitionOffsets, topic, taskId, jobId); + if (LOG.isDebugEnabled()) { + LOG.debug("finish to get latest offsets for partitions {} in topic: {}, task {}, job {}", + partitionOffsets, topic, taskId, jobId); + } return partitionOffsets; } } catch (Exception e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/PropertyConverter.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/PropertyConverter.java index 66396e2c33..f9b315cc5c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/PropertyConverter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/PropertyConverter.java @@ -570,4 +570,3 @@ public class PropertyConverter { return props; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/test/TestExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/test/TestExternalCatalog.java index f680c79d02..fddc21a294 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/test/TestExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/test/TestExternalCatalog.java @@ -109,4 +109,3 @@ public class TestExternalCatalog extends ExternalCatalog { Map>> getMetadata(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java index 79b8e9392d..11230daf97 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java @@ -368,7 +368,9 @@ public class DeployManager extends MasterDaemon { continue; } List remoteHosts = getGroupHostInfos(nodeType); - LOG.debug("get serviceName: {},remoteHosts: {}", nodeTypeAttr.getServiceName(), remoteHosts); + if (LOG.isDebugEnabled()) { + LOG.debug("get serviceName: {},remoteHosts: {}", nodeTypeAttr.getServiceName(), remoteHosts); + } process(nodeType, remoteHosts); } } @@ -468,11 +470,15 @@ public class DeployManager extends MasterDaemon { if (LOG.isDebugEnabled()) { for (HostInfo hostInfo : remoteHostInfos) { - LOG.debug("inspectNodeChange: remote host info: {}", hostInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("inspectNodeChange: remote host info: {}", hostInfo); + } } for (HostInfo hostInfo : localHostInfos) { - LOG.debug("inspectNodeChange: local host info: {}", hostInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("inspectNodeChange: local host info: {}", hostInfo); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/K8sDeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/K8sDeployManager.java index ee10a90073..772582f933 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/K8sDeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/K8sDeployManager.java @@ -299,7 +299,9 @@ public class K8sDeployManager extends DeployManager { for (int i = 0; i < num; i++) { String domainName = getDomainName(nodeType, i); hostInfos.add(new HostInfo(domainName, servicePort)); - LOG.debug("get hostInfo from domainName: {}, hostInfo: {}", domainName, hostInfos.get(i).toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("get hostInfo from domainName: {}, hostInfo: {}", domainName, hostInfos.get(i).toString()); + } } return hostInfos; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java index efe8cdf7a6..72a3fd773d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/impl/LocalFileDeployManager.java @@ -97,7 +97,9 @@ public class LocalFileDeployManager extends DeployManager { if (!str.startsWith(groupName)) { continue; } - LOG.debug("read line: {}", str); + if (LOG.isDebugEnabled()) { + LOG.debug("read line: {}", str); + } String[] parts = str.split("="); if (parts.length != 2 || Strings.isNullOrEmpty(parts[1])) { return result; diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java index ef8d484bda..6e3d20a2df 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/BrokerFileSystem.java @@ -156,7 +156,9 @@ public class BrokerFileSystem extends RemoteFileSystem { @Override public Status downloadWithFileSize(String remoteFilePath, String localFilePath, long fileSize) { - LOG.debug("download from {} to {}, file size: {}.", remoteFilePath, localFilePath, fileSize); + if (LOG.isDebugEnabled()) { + LOG.debug("download from {} to {}, file size: {}.", remoteFilePath, localFilePath, fileSize); + } long start = System.currentTimeMillis(); @@ -228,10 +230,14 @@ public class BrokerFileSystem extends RemoteFileSystem { status = new Status(Status.ErrCode.COMMON_ERROR, lastErrMsg); } if (rep.opStatus.statusCode != TBrokerOperationStatusCode.END_OF_FILE) { - LOG.debug("download. readLen: {}, read data len: {}, left size:{}. total size: {}", - readLen, rep.getData().length, leftSize, fileSize); + if (LOG.isDebugEnabled()) { + LOG.debug("download. readLen: {}, read data len: {}, left size:{}. total size: {}", + readLen, rep.getData().length, leftSize, fileSize); + } } else { - LOG.debug("read eof: " + remoteFilePath); + if (LOG.isDebugEnabled()) { + LOG.debug("read eof: " + remoteFilePath); + } } break; } catch (TTransportException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java index f91c50d709..db0062cf98 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java @@ -112,4 +112,3 @@ public class S3FileSystem extends ObjFileSystem { return ((S3ObjStorage) objStorage).deleteObjects(absolutePath); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java index 4d21ebbdf9..f28e2eb66a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java +++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/dfs/DFSFileSystem.java @@ -102,7 +102,9 @@ public class DFSFileSystem extends RemoteFileSystem { @Override public Status downloadWithFileSize(String remoteFilePath, String localFilePath, long fileSize) { - LOG.debug("download from {} to {}, file size: {}.", remoteFilePath, localFilePath, fileSize); + if (LOG.isDebugEnabled()) { + LOG.debug("download from {} to {}, file size: {}.", remoteFilePath, localFilePath, fileSize); + } final long start = System.currentTimeMillis(); HDFSOpParams hdfsOpParams = OpParams.of(remoteFilePath); Status st = operations.openReader(hdfsOpParams); @@ -202,8 +204,10 @@ public class DFSFileSystem extends RemoteFileSystem { } if (currentStreamOffset != readOffset) { // it's ok, when reading some format like parquet, it is not a sequential read - LOG.debug("invalid offset, current read offset is " + currentStreamOffset - + " is not equal to request offset " + readOffset + " seek to it"); + if (LOG.isDebugEnabled()) { + LOG.debug("invalid offset, current read offset is " + currentStreamOffset + + " is not equal to request offset " + readOffset + " seek to it"); + } try { fsDataInputStream.seek(readOffset); } catch (IOException e) { @@ -301,7 +305,9 @@ public class DFSFileSystem extends RemoteFileSystem { @Override public Status upload(String localPath, String remotePath) { long start = System.currentTimeMillis(); - LOG.debug("local path {}, remote path {}", localPath, remotePath); + if (LOG.isDebugEnabled()) { + LOG.debug("local path {}, remote path {}", localPath, remotePath); + } HDFSOpParams hdfsOpParams = OpParams.of(remotePath); Status wst = operations.openWriter(hdfsOpParams); if (wst != Status.OK) { @@ -446,4 +452,3 @@ public class DFSFileSystem extends RemoteFileSystem { return new Status(Status.ErrCode.COMMON_ERROR, "mkdir is not implemented."); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java index dec2419a28..9ecd82a134 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/HttpAuthManager.java @@ -61,8 +61,10 @@ public final class HttpAuthManager { for (String sessionId : sessionIds) { SessionValue sv = authSessions.getIfPresent(sessionId); if (sv != null) { - LOG.debug("get session value {} by session id: {}, left size: {}", - sv == null ? null : sv.currentUser, sessionId, authSessions.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("get session value {} by session id: {}, left size: {}", + sv == null ? null : sv.currentUser, sessionId, authSessions.size()); + } return sv; } } @@ -72,7 +74,9 @@ public final class HttpAuthManager { public void removeSession(String sessionId) { if (!Strings.isNullOrEmpty(sessionId)) { authSessions.invalidate(sessionId); - LOG.debug("remove session id: {}, left size: {}", sessionId, authSessions.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("remove session id: {}, left size: {}", sessionId, authSessions.size()); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java index 0027b6dd64..e3d6d90ead 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/BaseController.java @@ -82,8 +82,10 @@ public class BaseController { ctx.setCurrentUserIdentity(currentUser); ctx.setEnv(Env.getCurrentEnv()); ctx.setThreadLocalInfo(); - LOG.debug("check auth without cookie success for user: {}, thread: {}", - currentUser, Thread.currentThread().getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("check auth without cookie success for user: {}, thread: {}", + currentUser, Thread.currentThread().getId()); + } return authInfo; } @@ -107,7 +109,9 @@ public class BaseController { cookie.setPath("/"); cookie.setHttpOnly(true); response.addCookie(cookie); - LOG.debug("add session cookie: {} {}", PALO_SESSION_ID, key); + if (LOG.isDebugEnabled()) { + LOG.debug("add session cookie: {} {}", PALO_SESSION_ID, key); + } HttpAuthManager.getInstance().addSessionValue(key, value); } @@ -138,8 +142,10 @@ public class BaseController { ctx.setCurrentUserIdentity(sessionValue.currentUser); ctx.setEnv(Env.getCurrentEnv()); ctx.setThreadLocalInfo(); - LOG.debug("check cookie success for user: {}, thread: {}", - sessionValue.currentUser, Thread.currentThread().getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("check cookie success for user: {}, thread: {}", + sessionValue.currentUser, Thread.currentThread().getId()); + } ActionAuthorizationInfo authInfo = new ActionAuthorizationInfo(); authInfo.fullUserName = sessionValue.currentUser.getQualifiedUser(); authInfo.remoteIp = request.getRemoteHost(); @@ -238,7 +244,9 @@ public class BaseController { request.getHeader("Authorization"), request.getRequestURI()); throw new UnauthorizedException("Need auth information."); } - LOG.debug("get auth info: {}", authInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("get auth info: {}", authInfo); + } return authInfo; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java index 35f682b020..533eaa25d5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/SystemController.java @@ -64,7 +64,9 @@ public class SystemController extends BaseController { if (Strings.isNullOrEmpty(currentPath)) { currentPath = "/"; } - LOG.debug("get /system request, thread id: {}", Thread.currentThread().getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("get /system request, thread id: {}", Thread.currentThread().getId()); + } ResponseEntity entity = appendSystemInfo(currentPath, currentPath, request); return entity; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java index a857834f60..cbaea7b0c8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/interceptor/AuthInterceptor.java @@ -35,7 +35,9 @@ public class AuthInterceptor extends BaseController implements HandlerIntercepto @Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception { - LOG.debug("get prehandle. thread: {}", Thread.currentThread().getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("get prehandle. thread: {}", Thread.currentThread().getId()); + } // String sessionId = getCookieValue(request, BaseController.PALO_SESSION_ID, response); // SessionValue sessionValue = HttpAuthManager.getInstance().getSessionValue(sessionId); String method = request.getMethod(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java index d9aa39ebff..d6b539269a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java @@ -83,7 +83,9 @@ public class SetConfigAction extends RestBaseController { Map setConfigs = Maps.newHashMap(); List errConfigs = Lists.newArrayList(); - LOG.debug("get config from url: {}, need persist: {}", configs, needPersist); + if (LOG.isDebugEnabled()) { + LOG.debug("get config from url: {}, need persist: {}", configs, needPersist); + } for (Map.Entry config : configs.entrySet()) { String confKey = config.getKey(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/common/JobStatus.java b/fe/fe-core/src/main/java/org/apache/doris/job/common/JobStatus.java index 22b799225d..94cb06db62 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/common/JobStatus.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/common/JobStatus.java @@ -41,4 +41,3 @@ public enum JobStatus { */ FINISHED } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java index 6a861200e6..172f6d0bff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java @@ -157,7 +157,10 @@ public class MTMVTask extends AbstractTask { ConnectContext ctx = MTMVPlanUtil.createMTMVContext(mtmv); if (LOG.isDebugEnabled()) { String taskSessionContext = ctx.getSessionVariable().toJson().toJSONString(); - LOG.debug("mtmv task session variable, taskId: {}, session: {}", super.getTaskId(), taskSessionContext); + if (LOG.isDebugEnabled()) { + LOG.debug("mtmv task session variable, taskId: {}, session: {}", + super.getTaskId(), taskSessionContext); + } } // Every time a task is run, the relation is regenerated because baseTables and baseViews may change, // such as deleting a table and creating a view with the same name diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index e356bbaa65..2c3e37ed02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -187,7 +187,9 @@ public class JournalEntity implements Writable { // set it to true after the entity is truly read, // to avoid someone forget to call read method. boolean isRead = false; - LOG.debug("get opcode: {}", opCode); + if (LOG.isDebugEnabled()) { + LOG.debug("get opcode: {}", opCode); + } switch (opCode) { case OperationType.OP_LOCAL_EOF: { data = null; diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java index 1f604f96a4..f973ed8259 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBEnvironment.java @@ -365,7 +365,9 @@ public class BDBEnvironment { if (StringUtils.isNumeric(name)) { ret.add(Long.parseLong(name)); } else { - // LOG.debug("get database names, skipped {}", name); + if (LOG.isDebugEnabled()) { + // LOG.debug("get database names, skipped {}", name); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java index 9f90a3e2a1..e00288eac5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/bdbje/BDBJEJournal.java @@ -146,7 +146,9 @@ public class BDBJEJournal implements Journal { // CHECKSTYLE IGNORE THIS LINE: B currentJournalDB.put(txn, theKey, theData); // Put with overwrite, it always success dataSize += theData.getSize(); if (i == 0) { - LOG.debug("opCode = {}, journal size = {}", entity.getOpCode(), theData.getSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("opCode = {}, journal size = {}", entity.getOpCode(), theData.getSize()); + } } } @@ -239,7 +241,9 @@ public class BDBJEJournal implements Journal { // CHECKSTYLE IGNORE THIS LINE: B MetricRepo.COUNTER_EDIT_LOG_SIZE_BYTES.increase((long) theData.getSize()); MetricRepo.COUNTER_CURRENT_EDIT_LOG_SIZE_BYTES.increase((long) theData.getSize()); } - LOG.debug("opCode = {}, journal size = {}", op, theData.getSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("opCode = {}, journal size = {}", op, theData.getSize()); + } // Write the key value pair to bdb. boolean writeSucceed = false; for (int i = 0; i < RETRY_TIME; i++) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java index e3b9309756..231b10b1e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapAuthenticate.java @@ -48,7 +48,9 @@ public class LdapAuthenticate { public static boolean authenticate(ConnectContext context, String password, String qualifiedUser) { String usePasswd = (Strings.isNullOrEmpty(password)) ? "NO" : "YES"; String userName = ClusterNamespace.getNameFromFullName(qualifiedUser); - LOG.debug("user:{}", userName); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{}", userName); + } // check user password by ldap server. try { @@ -70,7 +72,9 @@ public class LdapAuthenticate { UserIdentity userIdentity; if (userIdentities.isEmpty()) { userIdentity = tempUserIdentity; - LOG.debug("User:{} does not exists in doris, login as temporary users.", userName); + if (LOG.isDebugEnabled()) { + LOG.debug("User:{} does not exists in doris, login as temporary users.", userName); + } context.setIsTempUser(true); } else { userIdentity = userIdentities.get(0); @@ -78,7 +82,9 @@ public class LdapAuthenticate { context.setCurrentUserIdentity(userIdentity); context.setRemoteIP(remoteIp); - LOG.debug("ldap authentication success: identity:{}", context.getCurrentUserIdentity()); + if (LOG.isDebugEnabled()) { + LOG.debug("ldap authentication success: identity:{}", context.getCurrentUserIdentity()); + } return true; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java index 02894eaffd..20b10635ed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapClient.java @@ -129,7 +129,9 @@ public class LdapClient { boolean doesUserExist(String userName) { String user = getUserDn(userName); if (user == null) { - LOG.debug("User:{} does not exist in LDAP.", userName); + if (LOG.isDebugEnabled()) { + LOG.debug("User:{} does not exist in LDAP.", userName); + } return false; } return true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapManager.java b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapManager.java index bb0d1bd868..df538c8122 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/ldap/LdapManager.java @@ -216,7 +216,9 @@ public class LdapManager { roles.add(Env.getCurrentEnv().getAuth().getRoleByName(qualifiedRole)); } } - LOG.debug("get user:{} ldap groups:{} and doris roles:{}", userName, ldapGroups, roles); + if (LOG.isDebugEnabled()) { + LOG.debug("get user:{} ldap groups:{} and doris roles:{}", userName, ldapGroups, roles); + } Role ldapGroupsPrivs = new Role(LDAP_DEFAULT_ROLE); grantDefaultPrivToTempUser(ldapGroupsPrivs); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java index 332c82e361..7c2f72dd16 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/BrokerFileGroup.java @@ -542,4 +542,3 @@ public class BrokerFileGroup implements Writable { return fileGroup; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java index 815bf91f90..ac51854f9f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteHandler.java @@ -346,6 +346,8 @@ public class DeleteHandler implements Writable { iter1.remove(); } } - LOG.debug("remove expired delete job info num: {}", counter); + if (LOG.isDebugEnabled()) { + LOG.debug("remove expired delete job info num: {}", counter); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java index a764b42773..89d116253a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/DeleteJob.java @@ -380,8 +380,10 @@ public class DeleteJob extends AbstractTxnStateChangeCallback implements DeleteJ checkAndUpdateQuorum(); Thread.sleep(1000); nowQuorumTimeMs = System.currentTimeMillis(); - LOG.debug("wait for quorum finished delete job: {}, txn id: {}", - id, signature); + if (LOG.isDebugEnabled()) { + LOG.debug("wait for quorum finished delete job: {}, txn id: {}", + id, signature); + } } break; default: diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java index f6da9a6a97..9fb827f6d6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java @@ -273,9 +273,13 @@ public class ExportJob implements Writable { // debug LOG output if (LOG.isDebugEnabled()) { for (int i = 0; i < selectStmtListPerParallel.size(); ++i) { - LOG.debug("ExportTaskExecutor {} is responsible for outfile:", i); + if (LOG.isDebugEnabled()) { + LOG.debug("ExportTaskExecutor {} is responsible for outfile:", i); + } for (StatementBase outfile : selectStmtListPerParallel.get(i)) { - LOG.debug("outfile sql: [{}]", outfile.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("outfile sql: [{}]", outfile.toSql()); + } } } } @@ -328,7 +332,9 @@ public class ExportJob implements Writable { // Because there is no division of tablets in view and external table // we set parallelism = 1; this.parallelism = 1; - LOG.debug("Because there is no division of tablets in view and external table, we set parallelism = 1"); + if (LOG.isDebugEnabled()) { + LOG.debug("Because there is no division of tablets in view and external table, we set parallelism = 1"); + } // build source columns List selectLists = Lists.newArrayList(); @@ -421,9 +427,13 @@ public class ExportJob implements Writable { // debug LOG output if (LOG.isDebugEnabled()) { for (int i = 0; i < tableRefListPerParallel.size(); i++) { - LOG.debug("ExportTaskExecutor {} is responsible for tablets:", i); + if (LOG.isDebugEnabled()) { + LOG.debug("ExportTaskExecutor {} is responsible for tablets:", i); + } for (TableRef tableRef : tableRefListPerParallel.get(i)) { - LOG.debug("Tablet id: [{}]", tableRef.getSampleTabletIds()); + if (LOG.isDebugEnabled()) { + LOG.debug("Tablet id: [{}]", tableRef.getSampleTabletIds()); + } } } } @@ -448,9 +458,13 @@ public class ExportJob implements Writable { // debug LOG output if (LOG.isDebugEnabled()) { for (int i = 0; i < selectStmtListPerParallel.size(); ++i) { - LOG.debug("ExportTaskExecutor {} is responsible for outfile:", i); + if (LOG.isDebugEnabled()) { + LOG.debug("ExportTaskExecutor {} is responsible for outfile:", i); + } for (StatementBase outfile : selectStmtListPerParallel.get(i)) { - LOG.debug("outfile sql: [{}]", outfile.toSql()); + if (LOG.isDebugEnabled()) { + LOG.debug("outfile sql: [{}]", outfile.toSql()); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java index 3f9685739a..aaace9b37d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/Load.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/Load.java @@ -324,7 +324,9 @@ public class Load { } else { columnDesc = new ImportColumnDesc(column.getName().toLowerCase()); } - LOG.debug("add base column {} to stream load task", column.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("add base column {} to stream load task", column.getName()); + } copiedColumnExprs.add(columnDesc); } if (hiddenColumns != null) { @@ -332,7 +334,9 @@ public class Load { Column column = tbl.getColumn(columnName); if (column != null && !column.isVisible()) { ImportColumnDesc columnDesc = new ImportColumnDesc(column.getName()); - LOG.debug("add hidden column {} to stream load task", column.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("add hidden column {} to stream load task", column.getName()); + } copiedColumnExprs.add(columnDesc); } } @@ -449,7 +453,9 @@ public class Load { } } - LOG.debug("plan srcTupleDesc {}", srcTupleDesc.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("plan srcTupleDesc {}", srcTupleDesc.toString()); + } /* * The extension column of the materialized view is added to the expression evaluation of load @@ -467,12 +473,17 @@ public class Load { } } - LOG.debug("slotDescByName: {}, exprsByName: {}, mvDefineExpr: {}", slotDescByName, exprsByName, mvDefineExpr); + if (LOG.isDebugEnabled()) { + LOG.debug("slotDescByName: {}, exprsByName: {}, mvDefineExpr: {}", + slotDescByName, exprsByName, mvDefineExpr); + } // in vectorized load, reanalyze exprs with castExpr type // otherwise analyze exprs with varchar type analyzeAllExprs(tbl, analyzer, exprsByName, mvDefineExpr, slotDescByName); - LOG.debug("after init column, exprMap: {}", exprsByName); + if (LOG.isDebugEnabled()) { + LOG.debug("after init column, exprMap: {}", exprsByName); + } } private static SlotRef getSlotFromDesc(SlotDescriptor slotDesc) { @@ -713,7 +724,9 @@ public class Load { exprs.add(NullLiteral.create(Type.VARCHAR)); } - LOG.debug("replace_value expr: {}", exprs); + if (LOG.isDebugEnabled()) { + LOG.debug("replace_value expr: {}", exprs); + } FunctionCallExpr newFn = new FunctionCallExpr("if", exprs); return newFn; } else if (funcName.equalsIgnoreCase("strftime")) { @@ -1041,7 +1054,9 @@ public class Load { } long start = System.currentTimeMillis(); - LOG.debug("begin to get load job info, size: {}", loadJobs.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to get load job info, size: {}", loadJobs.size()); + } for (LoadJob loadJob : loadJobs) { // filter first @@ -1063,7 +1078,9 @@ public class Load { loadJobInfos.add(composeJobInfoByLoadJob(loadJob)); } // end for loadJobs - LOG.debug("finished to get load job info, cost: {}", (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to get load job info, cost: {}", (System.currentTimeMillis() - start)); + } } finally { readUnlock(); } @@ -1178,7 +1195,9 @@ public class Load { } long start = System.currentTimeMillis(); - LOG.debug("begin to get load job info, size: {}", loadJobs.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to get load job info, size: {}", loadJobs.size()); + } PatternMatcher matcher = null; if (labelValue != null && !accurateMatch) { matcher = PatternMatcherWrapper.createMysqlPattern(labelValue, @@ -1233,7 +1252,9 @@ public class Load { loadJobInfos.add(composeJobInfoByLoadJob(loadJob)); } // end for loadJobs - LOG.debug("finished to get load job info, cost: {}", (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("finished to get load job info, cost: {}", (System.currentTimeMillis() - start)); + } } finally { readUnlock(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java index 073f47a66c..c83ce68338 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java @@ -577,10 +577,12 @@ public abstract class LoadJob extends AbstractTxnStateChangeCallback implements if (abortTxn) { // abort txn try { - LOG.debug(new LogBuilder(LogKey.LOAD_JOB, id) - .add("transaction_id", transactionId) - .add("msg", "begin to abort txn") - .build()); + if (LOG.isDebugEnabled()) { + LOG.debug(new LogBuilder(LogKey.LOAD_JOB, id) + .add("transaction_id", transactionId) + .add("msg", "begin to abort txn") + .build()); + } Env.getCurrentGlobalTransactionMgr().abortTransaction(dbId, transactionId, failMsg.getMsg()); } catch (UserException e) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/MysqlLoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/MysqlLoadManager.java index 7f9ff29c69..5c46ed5d86 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/MysqlLoadManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/MysqlLoadManager.java @@ -211,7 +211,9 @@ public class MysqlLoadManager { while (buffer != null && buffer.limit() != 0) { buffer = context.getMysqlChannel().fetchOnePacket(); } - LOG.debug("Finished reading the left bytes."); + if (LOG.isDebugEnabled()) { + LOG.debug("Finished reading the left bytes."); + } } // make cancel message to user if (loadContextMap.containsKey(loadId) && loadContextMap.get(loadId).isCancelled()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ProgressManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ProgressManager.java index 25829d7c24..c333f88ad9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ProgressManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/ProgressManager.java @@ -36,7 +36,9 @@ public class ProgressManager { private Map idToProgress = Maps.newConcurrentMap(); public void registerProgress(String id, int scannerNum) { - LOG.debug("create {} with initial scannerNum {}", id, scannerNum); + if (LOG.isDebugEnabled()) { + LOG.debug("create {} with initial scannerNum {}", id, scannerNum); + } idToProgress.remove(id); idToProgress.put(id, new Progress(scannerNum)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java index ec3a0bbede..75e3a6e171 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkEtlJobHandler.java @@ -348,7 +348,9 @@ public class SparkEtlJobHandler { } filePathToSize.put(fstatus.getPath(), fstatus.getSize()); } - LOG.debug("get spark etl file paths. files map: {}", filePathToSize); + if (LOG.isDebugEnabled()) { + LOG.debug("get spark etl file paths. files map: {}", filePathToSize); + } return filePathToSize; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java index 304ac0218e..4b919cd993 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLauncherMonitor.java @@ -154,8 +154,10 @@ public class SparkLauncherMonitor { } } - LOG.debug("spark appId that handle get is {}, state: {}", - handle.getAppId(), handle.getState().toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("spark appId that handle get is {}, state: {}", + handle.getAppId(), handle.getState().toString()); + } switch (newState) { case UNKNOWN: case CONNECTED: diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java index 69e1420ff3..b8ff96394d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java @@ -515,11 +515,13 @@ public class SparkLoadJob extends BulkLoadJob { tBrokerScanRange.getBrokerAddresses().add( new TNetworkAddress(fsBroker.host, fsBroker.port)); - LOG.debug("push task for replica {}, broker {}:{}," - + " backendId {}, filePath {}, fileSize {}", - replicaId, fsBroker.host, - fsBroker.port, backendId, tBrokerRangeDesc.path, - tBrokerRangeDesc.file_size); + if (LOG.isDebugEnabled()) { + LOG.debug("push task for replica {}, broker {}:{}," + + " backendId {}, filePath {}, fileSize {}", + replicaId, fsBroker.host, + fsBroker.port, backendId, tBrokerRangeDesc.path, + tBrokerRangeDesc.file_size); + } PushTask pushTask = new PushTask(backendId, dbId, olapTable.getId(), partitionId, indexId, tabletId, replicaId, schemaHash, 0, id, @@ -667,7 +669,9 @@ public class SparkLoadJob extends BulkLoadJob { private void clearJob() { Preconditions.checkState(state == JobState.FINISHED || state == JobState.CANCELLED); - LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); + if (LOG.isDebugEnabled()) { + LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); + } SparkEtlJobHandler handler = new SparkEtlJobHandler(); if (state == JobState.CANCELLED) { if ((!Strings.isNullOrEmpty(appId) && sparkResource.isYarnMaster()) || sparkLoadAppHandle != null) { @@ -688,7 +692,9 @@ public class SparkLoadJob extends BulkLoadJob { } } - LOG.debug("clear push tasks and infos that not persist. id: {}, state: {}", id, state); + if (LOG.isDebugEnabled()) { + LOG.debug("clear push tasks and infos that not persist. id: {}, state: {}", id, state); + } writeLock(); try { // clear push task first diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java index bfdad97aa0..4efd2d1727 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkRepository.java @@ -256,7 +256,9 @@ public class SparkRepository { try (FileInputStream fis = new FileInputStream(file)) { md5sum = DigestUtils.md5Hex(fis); Preconditions.checkNotNull(md5sum); - LOG.debug("get md5sum from file {}, md5sum={}", filePath, md5sum); + if (LOG.isDebugEnabled()) { + LOG.debug("get md5sum from file {}, md5sum={}", filePath, md5sum); + } return md5sum; } catch (FileNotFoundException e) { throw new LoadException("file " + filePath + " does not exist"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/TokenManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/TokenManager.java index fd06fa9813..6443e6b232 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/TokenManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/TokenManager.java @@ -86,7 +86,9 @@ public class TokenManager { FrontendService.Client client = getClient(thriftAddress); - LOG.debug("Send acquire token to Master {}", thriftAddress); + if (LOG.isDebugEnabled()) { + LOG.debug("Send acquire token to Master {}", thriftAddress); + } boolean isReturnToPool = false; try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaProgress.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaProgress.java index bb31d3ce54..49542cd140 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaProgress.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaProgress.java @@ -191,8 +191,10 @@ public class KafkaProgress extends RoutineLoadProgress { // + 1 to point to the next msg offset to be consumed newProgress.partitionIdToOffset.entrySet().stream() .forEach(entity -> this.partitionIdToOffset.put(entity.getKey(), entity.getValue() + 1)); - LOG.debug("update kafka progress: {}, task: {}, job: {}", - newProgress.toJsonString(), DebugUtil.printId(attachment.getTaskId()), attachment.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("update kafka progress: {}, task: {}, job: {}", + newProgress.toJsonString(), DebugUtil.printId(attachment.getTaskId()), attachment.getJobId()); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java index 7d41194d93..24929520ec 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java @@ -236,7 +236,9 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { unprotectUpdateState(JobState.RUNNING, null, false); } } else { - LOG.debug("Ignore to divide routine load job while job state {}", state); + if (LOG.isDebugEnabled()) { + LOG.debug("Ignore to divide routine load job while job state {}", state); + } } // save task into queue of needScheduleTasks Env.getCurrentEnv().getRoutineLoadTaskScheduler().addTasksInQueue(result); @@ -252,9 +254,11 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { desireTaskConcurrentNum = Config.max_routine_load_task_concurrent_num; } - LOG.debug("current concurrent task number is min" - + "(partition num: {}, desire task concurrent num: {} config: {})", - partitionNum, desireTaskConcurrentNum, Config.max_routine_load_task_concurrent_num); + if (LOG.isDebugEnabled()) { + LOG.debug("current concurrent task number is min" + + "(partition num: {}, desire task concurrent num: {} config: {})", + partitionNum, desireTaskConcurrentNum, Config.max_routine_load_task_concurrent_num); + } currentTaskConcurrentNum = Math.min(partitionNum, Math.min(desireTaskConcurrentNum, Config.max_routine_load_task_concurrent_num)); return currentTaskConcurrentNum; @@ -272,10 +276,12 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { // Running here, the status of the transaction should be ABORTED, // and it is caused by other errors. In this case, we should not update the offset. - LOG.debug("no need to update the progress of kafka routine load. txn status: {}, " - + "txnStatusChangeReason: {}, task: {}, job: {}", - txnState.getTransactionStatus(), txnStatusChangeReason, - DebugUtil.printId(rlTaskTxnCommitAttachment.getTaskId()), id); + if (LOG.isDebugEnabled()) { + LOG.debug("no need to update the progress of kafka routine load. txn status: {}, " + + "txnStatusChangeReason: {}, task: {}, job: {}", + txnState.getTransactionStatus(), txnStatusChangeReason, + DebugUtil.printId(rlTaskTxnCommitAttachment.getTaskId()), id); + } return false; } @@ -717,8 +723,11 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { // (because librdkafa's query_watermark_offsets() will return the next offset. // For example, there 4 msg in partition with offset 0,1,2,3, // query_watermark_offsets() will return 4.) - LOG.debug("has more data to consume. offsets to be consumed: {}, latest offsets: {}, task {}, job {}", - partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + if (LOG.isDebugEnabled()) { + LOG.debug("has more data to consume. offsets to be consumed: {}, " + + "latest offsets: {}, task {}, job {}", + partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + } } else { needUpdateCache = true; break; @@ -748,9 +757,11 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { long partitionLatestOffset = cachedPartitionWithLatestOffsets.get(partitionId); long recordPartitionOffset = entry.getValue(); if (recordPartitionOffset < partitionLatestOffset) { - LOG.debug("has more data to consume. offsets to be consumed: {}," - + " latest offsets: {}, task {}, job {}", - partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + if (LOG.isDebugEnabled()) { + LOG.debug("has more data to consume. offsets to be consumed: {}," + + " latest offsets: {}, task {}, job {}", + partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + } return true; } else if (recordPartitionOffset > partitionLatestOffset) { String msg = "offset set in job: " + recordPartitionOffset @@ -762,8 +773,10 @@ public class KafkaRoutineLoadJob extends RoutineLoadJob { } } - LOG.debug("no more data to consume. offsets to be consumed: {}, latest offsets: {}, task {}, job {}", - partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + if (LOG.isDebugEnabled()) { + LOG.debug("no more data to consume. offsets to be consumed: {}, latest offsets: {}, task {}, job {}", + partitionIdToOffset, cachedPartitionWithLatestOffsets, taskId, id); + } return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java index 9ce8bb2e5a..1a87328b60 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java @@ -999,7 +999,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl } finally { if (!passCheck) { writeUnlock(); - LOG.debug("unlock write lock of routine load job before check: {}", id); + if (LOG.isDebugEnabled()) { + LOG.debug("unlock write lock of routine load job before check: {}", id); + } } } } @@ -1021,7 +1023,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl taskBeId = routineLoadTaskInfo.getBeId(); executeTaskOnTxnStatusChanged(routineLoadTaskInfo, txnState, TransactionStatus.COMMITTED, null); ++this.jobStatistic.committedTaskNum; - LOG.debug("routine load task committed. task id: {}, job id: {}", txnState.getLabel(), id); + if (LOG.isDebugEnabled()) { + LOG.debug("routine load task committed. task id: {}, job id: {}", txnState.getLabel(), id); + } } } catch (Throwable e) { LOG.warn("after committed failed", e); @@ -1032,7 +1036,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl new ErrorReason(InternalErrorCode.INTERNAL_ERR, errmsg), false /* not replay */); } finally { writeUnlock(); - LOG.debug("unlock write lock of routine load job after committed: {}", id); + if (LOG.isDebugEnabled()) { + LOG.debug("unlock write lock of routine load job after committed: {}", id); + } } } @@ -1041,7 +1047,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl Preconditions.checkNotNull(txnState.getTxnCommitAttachment(), txnState); replayUpdateProgress((RLTaskTxnCommitAttachment) txnState.getTxnCommitAttachment()); this.jobStatistic.committedTaskNum++; - LOG.debug("replay on committed: {}", txnState); + if (LOG.isDebugEnabled()) { + LOG.debug("replay on committed: {}", txnState); + } } /* @@ -1197,7 +1205,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl .build(), e); } finally { writeUnlock(); - LOG.debug("unlock write lock of routine load job after aborted: {}", id); + if (LOG.isDebugEnabled()) { + LOG.debug("unlock write lock of routine load job after aborted: {}", id); + } } } @@ -1208,7 +1218,9 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl replayUpdateProgress((RLTaskTxnCommitAttachment) txnState.getTxnCommitAttachment()); } this.jobStatistic.abortedTaskNum++; - LOG.debug("replay on aborted: {}, has attachment: {}", txnState, txnState.getTxnCommitAttachment() == null); + if (LOG.isDebugEnabled()) { + LOG.debug("replay on aborted: {}, has attachment: {}", txnState, txnState.getTxnCommitAttachment() == null); + } } // check task exists or not before call method diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java index ef96b9a42e..13a86f5d17 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadManager.java @@ -712,7 +712,9 @@ public class RoutineLoadManager implements Writable { // This function is called periodically. // Cancelled and stopped job will be removed after Configure.label_keep_max_second seconds public void cleanOldRoutineLoadJobs() { - LOG.debug("begin to clean old routine load jobs "); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to clean old routine load jobs "); + } clearRoutineLoadJobIf(RoutineLoadJob::isExpired); } @@ -728,7 +730,9 @@ public class RoutineLoadManager implements Writable { } writeLock(); try { - LOG.debug("begin to clean routine load jobs"); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to clean routine load jobs"); + } Deque finishedJobs = idToRoutineLoadJob .values() .stream() diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java index 9c28dbfc6a..93ec573717 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java @@ -178,7 +178,9 @@ public abstract class RoutineLoadTaskInfo { // this should not happen for a routine load task, throw it out throw e; } catch (AnalysisException | BeginTransactionException e) { - LOG.debug("begin txn failed for routine load task: {}, {}", DebugUtil.printId(id), e.getMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug("begin txn failed for routine load task: {}, {}", DebugUtil.printId(id), e.getMessage()); + } return false; } catch (MetaNotFoundException | QuotaExceedException e) { LOG.warn("failed to begin txn for routine load task: {}, job id: {}", diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java index a6cc796027..b4661ba32b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskScheduler.java @@ -116,8 +116,10 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { private void scheduleOneTask(RoutineLoadTaskInfo routineLoadTaskInfo) throws Exception { routineLoadTaskInfo.setLastScheduledTime(System.currentTimeMillis()); - LOG.debug("schedule routine load task info {} for job {}", - routineLoadTaskInfo.id, routineLoadTaskInfo.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("schedule routine load task info {} for job {}", + routineLoadTaskInfo.id, routineLoadTaskInfo.getJobId()); + } // check if task has been abandoned if (!routineLoadManager.checkTaskInJob(routineLoadTaskInfo)) { // task has been abandoned while renew task has been added in queue @@ -182,8 +184,10 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { try { long startTime = System.currentTimeMillis(); tRoutineLoadTask = routineLoadTaskInfo.createRoutineLoadTask(); - LOG.debug("create routine load task cost(ms): {}, job id: {}", - (System.currentTimeMillis() - startTime), routineLoadTaskInfo.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("create routine load task cost(ms): {}, job id: {}", + (System.currentTimeMillis() - startTime), routineLoadTaskInfo.getJobId()); + } } catch (MetaNotFoundException e) { // this means database or table has been dropped, just stop this routine load job. // set BE id to -1 to release the BE slot @@ -206,12 +210,16 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { try { long startTime = System.currentTimeMillis(); submitTask(routineLoadTaskInfo.getBeId(), tRoutineLoadTask); - LOG.debug("send routine load task cost(ms): {}, job id: {}", - (System.currentTimeMillis() - startTime), routineLoadTaskInfo.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("send routine load task cost(ms): {}, job id: {}", + (System.currentTimeMillis() - startTime), routineLoadTaskInfo.getJobId()); + } if (tRoutineLoadTask.isSetKafkaLoadInfo()) { - LOG.debug("send kafka routine load task {} with partition offset: {}, job: {}", - tRoutineLoadTask.label, tRoutineLoadTask.kafka_load_info.partition_begin_offset, - tRoutineLoadTask.getJobId()); + if (LOG.isDebugEnabled()) { + LOG.debug("send kafka routine load task {} with partition offset: {}, job: {}", + tRoutineLoadTask.label, tRoutineLoadTask.kafka_load_info.partition_begin_offset, + tRoutineLoadTask.getJobId()); + } } } catch (LoadException e) { // submit task failed (such as TOO_MANY_TASKS error), but txn has already begun. @@ -237,19 +245,25 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { || (currentTime - lastBackendSlotUpdateTime > BACKEND_SLOT_UPDATE_INTERVAL_MS)) { routineLoadManager.updateBeIdToMaxConcurrentTasks(); lastBackendSlotUpdateTime = currentTime; - LOG.debug("update backend max slot for routine load task scheduling. current task num per BE: {}", - Config.max_routine_load_task_num_per_be); + if (LOG.isDebugEnabled()) { + LOG.debug("update backend max slot for routine load task scheduling. current task num per BE: {}", + Config.max_routine_load_task_num_per_be); + } } } public void addTaskInQueue(RoutineLoadTaskInfo routineLoadTaskInfo) { needScheduleTasksQueue.add(routineLoadTaskInfo); - LOG.debug("total tasks num in routine load task queue: {}", needScheduleTasksQueue.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("total tasks num in routine load task queue: {}", needScheduleTasksQueue.size()); + } } public void addTasksInQueue(List routineLoadTaskInfoList) { needScheduleTasksQueue.addAll(routineLoadTaskInfoList); - LOG.debug("total tasks num in routine load task queue: {}", needScheduleTasksQueue.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("total tasks num in routine load task queue: {}", needScheduleTasksQueue.size()); + } } private void submitTask(long beId, TRoutineLoadTask tTask) throws LoadException { @@ -271,7 +285,9 @@ public class RoutineLoadTaskScheduler extends MasterDaemon { throw new LoadException("failed to submit task. error code: " + tStatus.getStatusCode() + ", msg: " + (tStatus.getErrorMsgsSize() > 0 ? tStatus.getErrorMsgs().get(0) : "NaN")); } - LOG.debug("send routine load task {} to BE: {}", DebugUtil.printId(tTask.id), beId); + if (LOG.isDebugEnabled()) { + LOG.debug("send routine load task {} to BE: {}", DebugUtil.printId(tTask.id), beId); + } } catch (Exception e) { throw new LoadException("failed to send task: " + e.getMessage(), e); } finally { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java index 3499d3f8f6..e2eaf4d825 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/ScheduleRule.java @@ -48,23 +48,29 @@ public class ScheduleRule { return false; } if (jobRoutine.autoResumeLock) { //only manual resume for unlock - LOG.debug("routine load job {}'s autoResumeLock is true, skip", jobRoutine.id); + if (LOG.isDebugEnabled()) { + LOG.debug("routine load job {}'s autoResumeLock is true, skip", jobRoutine.id); + } return false; } /* * Handle all backends are down. */ - LOG.debug("try to auto reschedule routine load {}, firstResumeTimestamp: {}, autoResumeCount: {}, " - + "pause reason: {}", - jobRoutine.id, jobRoutine.firstResumeTimestamp, jobRoutine.autoResumeCount, - jobRoutine.pauseReason == null ? "null" : jobRoutine.pauseReason.getCode().name()); + if (LOG.isDebugEnabled()) { + LOG.debug("try to auto reschedule routine load {}, firstResumeTimestamp: {}, autoResumeCount: {}, " + + "pause reason: {}", + jobRoutine.id, jobRoutine.firstResumeTimestamp, jobRoutine.autoResumeCount, + jobRoutine.pauseReason == null ? "null" : jobRoutine.pauseReason.getCode().name()); + } if (jobRoutine.pauseReason != null && jobRoutine.pauseReason.getCode() == InternalErrorCode.REPLICA_FEW_ERR) { int dead = deadBeCount(); if (dead > Config.max_tolerable_backend_down_num) { - LOG.debug("dead backend num {} is larger than config {}, " - + "routine load job {} can not be auto rescheduled", - dead, Config.max_tolerable_backend_down_num, jobRoutine.id); + if (LOG.isDebugEnabled()) { + LOG.debug("dead backend num {} is larger than config {}, " + + "routine load job {} can not be auto rescheduled", + dead, Config.max_tolerable_backend_down_num, jobRoutine.id); + } return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java index 319589c9aa..532c0e8e49 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncChecker.java @@ -41,7 +41,9 @@ public class SyncChecker extends MasterDaemon { @Override protected void runAfterCatalogReady() { - LOG.debug("start check sync jobs."); + if (LOG.isDebugEnabled()) { + LOG.debug("start check sync jobs."); + } try { process(); cleanOldSyncJobs(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java index 5be922d6e2..9455d2c9ea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/sync/SyncJobManager.java @@ -288,7 +288,9 @@ public class SyncJobManager implements Writable { // Remove old sync jobs. Called periodically. // Stopped jobs will be removed after Config.label_keep_max_second. public void cleanOldSyncJobs() { - LOG.debug("begin to clean old sync jobs "); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to clean old sync jobs "); + } cleanFinishedSyncJobsIf(job -> job.isExpired(System.currentTimeMillis())); } @@ -301,7 +303,9 @@ public class SyncJobManager implements Writable { } writeLock(); try { - LOG.debug("begin to clean finished sync jobs "); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to clean finished sync jobs "); + } Deque finishedJobs = idToSyncJob .values() .stream() diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java index 64b771663b..485463d8da 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java @@ -216,7 +216,9 @@ public class MasterImpl { } if (tStatus.getStatusCode() == TStatusCode.OK) { - LOG.debug("report task success. {}", request.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("report task success. {}", request.toString()); + } } return result; @@ -262,8 +264,10 @@ public class MasterImpl { request.getReportVersion(), task.getDbId(), task.getTableId()); createReplicaTask.countDownLatch(task.getBackendId(), task.getSignature()); - LOG.debug("finish create replica. tablet id: {}, be: {}, report version: {}", - tabletId, task.getBackendId(), request.getReportVersion()); + if (LOG.isDebugEnabled()) { + LOG.debug("finish create replica. tablet id: {}, be: {}, report version: {}", + tabletId, task.getBackendId(), request.getReportVersion()); + } } } finally { AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.CREATE, task.getSignature()); @@ -281,8 +285,10 @@ public class MasterImpl { + request.getTaskStatus().getErrorMsgs().toString()); } else { tabletTask.countDownLatch(task.getBackendId(), tabletTask.getTablets()); - LOG.debug("finish update tablet meta. tablet id: {}, be: {}", - tabletTask.getTablets(), task.getBackendId()); + if (LOG.isDebugEnabled()) { + LOG.debug("finish update tablet meta. tablet id: {}, be: {}", + tabletTask.getTablets(), task.getBackendId()); + } } } finally { AgentTaskQueue.removeTask(task.getBackendId(), TTaskType.UPDATE_TABLET_META_INFO, task.getSignature()); @@ -329,7 +335,9 @@ public class MasterImpl { LOG.warn("invalid push report infos. finishTabletInfos' size: " + finishTabletInfos.size()); return; } - LOG.debug("push report state: {}", pushState.name()); + if (LOG.isDebugEnabled()) { + LOG.debug("push report state: {}", pushState.name()); + } OlapTable olapTable = (OlapTable) db.getTableNullable(tableId); if (olapTable == null || !olapTable.writeLockIfExist()) { @@ -395,7 +403,9 @@ public class MasterImpl { } AgentTaskQueue.removeTask(backendId, TTaskType.REALTIME_PUSH, signature); - LOG.debug("finish push replica. tabletId: {}, backendId: {}", pushTabletId, backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("finish push replica. tabletId: {}, backendId: {}", pushTabletId, backendId); + } } catch (MetaNotFoundException e) { AgentTaskQueue.removeTask(backendId, TTaskType.REALTIME_PUSH, signature); LOG.warn("finish push replica error", e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index 7d9d9ac825..6950dc83e9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -511,7 +511,9 @@ public class ReportHandler extends Daemon { } private static void taskReport(long backendId, Map> runningTasks) { - LOG.debug("begin to handle task report from backend {}", backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("begin to handle task report from backend {}", backendId); + } long start = System.currentTimeMillis(); if (LOG.isDebugEnabled()) { @@ -519,7 +521,9 @@ public class ReportHandler extends Daemon { Set taskSet = runningTasks.get(type); if (!taskSet.isEmpty()) { String signatures = StringUtils.join(taskSet, ", "); - LOG.debug("backend task[{}]: {}", type.name(), signatures); + if (LOG.isDebugEnabled()) { + LOG.debug("backend task[{}]: {}", type.name(), signatures); + } } } } @@ -547,7 +551,9 @@ public class ReportHandler extends Daemon { } - LOG.debug("get {} diff task(s) to resend", batchTask.getTaskNum()); + if (LOG.isDebugEnabled()) { + LOG.debug("get {} diff task(s) to resend", batchTask.getTaskNum()); + } if (batchTask.getTaskNum() > 0) { AgentTaskExecutor.submit(batchTask); } @@ -716,13 +722,17 @@ public class ReportHandler extends Daemon { } ++syncCounter; - LOG.debug("sync replica {} of tablet {} in backend {} in db {}. report version: {}", - replica.getId(), tabletId, backendId, dbId, backendReportVersion); + if (LOG.isDebugEnabled()) { + LOG.debug("sync replica {} of tablet {} in backend {} in db {}. report version: {}", + replica.getId(), tabletId, backendId, dbId, backendReportVersion); + } } else { - LOG.debug("replica {} of tablet {} in backend {} version is changed" - + " between check and real sync. meta[{}]. backend[{}]", - replica.getId(), tabletId, backendId, metaVersion, - backendVersion); + if (LOG.isDebugEnabled()) { + LOG.debug("replica {} of tablet {} in backend {} version is changed" + + " between check and real sync. meta[{}]. backend[{}]", + replica.getId(), tabletId, backendId, metaVersion, + backendVersion); + } } } } finally { @@ -912,7 +922,9 @@ public class ReportHandler extends Daemon { TTabletInfo backendTabletInfo = backendTablet.getTabletInfos().get(0); boolean needDelete = false; TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId); - LOG.debug("process tablet [{}], backend[{}]", tabletId, backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("process tablet [{}], backend[{}]", tabletId, backendId); + } if (!tabletFoundInMeta.contains(tabletId)) { if (isBackendReplicaHealthy(backendTabletInfo)) { // if this tablet meta is still in invertedIndex. try to add it. @@ -920,7 +932,9 @@ public class ReportHandler extends Daemon { if (tabletMeta != null && addReplica(tabletId, tabletMeta, backendTabletInfo, backendId)) { // update counter ++addToMetaCounter; - LOG.debug("add to meta. tablet[{}], backend[{}]", tabletId, backendId); + if (LOG.isDebugEnabled()) { + LOG.debug("add to meta. tablet[{}], backend[{}]", tabletId, backendId); + } } else { LOG.info("failed add to meta. tablet[{}], backend[{}]", tabletId, backendId); needDelete = true; diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/AcceptListener.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/AcceptListener.java index e88d91316c..bee00d9e69 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/AcceptListener.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/AcceptListener.java @@ -52,7 +52,9 @@ public class AcceptListener implements ChannelListener {}", roleName, tblPattern, privs); + if (LOG.isDebugEnabled()) { + LOG.debug("rectify privs {}: {} -> {}", roleName, tblPattern, privs); + } PrivBitSet copiedPrivs = privs.copy(); copiedPrivs.and(PrivBitSet.of(Privilege.ADMIN_PRIV, Privilege.NODE_PRIV, Privilege.USAGE_PRIV)); modifiedGlobalPrivs.or(copiedPrivs); @@ -796,8 +808,10 @@ public class Role implements Writable, GsonPostProcessable { privs.unset(Privilege.USAGE_PRIV.getIdx()); privs.unset(Privilege.NODE_PRIV.getIdx()); privs.unset(Privilege.ADMIN_PRIV.getIdx()); - LOG.debug("alter rectify privs {}: {} -> {}, modified global priv: {}", - roleName, tblPattern, privs, modifiedGlobalPrivs); + if (LOG.isDebugEnabled()) { + LOG.debug("alter rectify privs {}: {} -> {}, modified global priv: {}", + roleName, tblPattern, privs, modifiedGlobalPrivs); + } } } if (!modifiedGlobalPrivs.isEmpty()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java index 8f634bccbe..aaeaa992ab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java @@ -239,7 +239,9 @@ public class UserPropertyMgr implements Writable { for (int i = 0; i < size; ++i) { UserProperty userProperty = UserProperty.read(in); propertyMap.put(userProperty.getQualifiedUser(), userProperty); - LOG.debug("read user property: {}: {}", userProperty.getQualifiedUser(), userProperty); + if (LOG.isDebugEnabled()) { + LOG.debug("read user property: {}: {}", userProperty.getQualifiedUser(), userProperty); + } } // Read resource resourceVersion = new AtomicLong(in.readLong()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java index 3b2d726647..156f9abc5a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java @@ -268,29 +268,41 @@ public class NereidsPlanner extends Planner { } private void analyze() { - LOG.debug("Start analyze plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("Start analyze plan"); + } cascadesContext.newAnalyzer().analyze(); getHooks().forEach(hook -> hook.afterAnalyze(this)); NereidsTracer.logImportantTime("EndAnalyzePlan"); - LOG.debug("End analyze plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("End analyze plan"); + } } /** * Logical plan rewrite based on a series of heuristic rules. */ private void rewrite() { - LOG.debug("Start rewrite plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("Start rewrite plan"); + } Rewriter.getWholeTreeRewriter(cascadesContext).execute(); NereidsTracer.logImportantTime("EndRewritePlan"); - LOG.debug("End rewrite plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("End rewrite plan"); + } } // DependsRules: EnsureProjectOnTopJoin.class private void optimize() { - LOG.debug("Start optimize plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("Start optimize plan"); + } new Optimizer(cascadesContext).execute(); NereidsTracer.logImportantTime("EndOptimizePlan"); - LOG.debug("End optimize plan"); + if (LOG.isDebugEnabled()) { + LOG.debug("End optimize plan"); + } } private PhysicalPlan postProcess(PhysicalPlan physicalPlan) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/Cost.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/Cost.java index 7b7c6776aa..fb90b914d9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/Cost.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/Cost.java @@ -45,4 +45,3 @@ public interface Cost { return CostV1.zero(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV1.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV1.java index ea47e1dedb..75624068bc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV1.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV1.java @@ -96,4 +96,3 @@ class CostV1 implements Cost { return sb.toString(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV2.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV2.java index 6f8974268e..cdd1661fb0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/cost/CostV2.java @@ -132,4 +132,3 @@ class CostV2 implements Cost { return new CostV2(0, Double.MAX_VALUE, Double.MAX_VALUE); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/exceptions/NotSupportedException.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/exceptions/NotSupportedException.java index bb707b6562..24b4ebf486 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/exceptions/NotSupportedException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/exceptions/NotSupportedException.java @@ -25,4 +25,3 @@ public class NotSupportedException extends RuntimeException { super(String.format("Not Supported: %s", msg)); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java index 94b2f78cf1..274ab4e712 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java @@ -2474,7 +2474,9 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor { requestPropertyToChildren.add(physicalProperties); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AvgDistinctToSumDivCount.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AvgDistinctToSumDivCount.java index 3745ec51c2..4ed79e7384 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AvgDistinctToSumDivCount.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AvgDistinctToSumDivCount.java @@ -69,4 +69,3 @@ public class AvgDistinctToSumDivCount extends OneRewriteRuleFactory { ); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSlotWithPaths.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSlotWithPaths.java index a904bcf919..04eb8af073 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSlotWithPaths.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSlotWithPaths.java @@ -84,4 +84,3 @@ public class BindSlotWithPaths implements AnalysisRuleFactory { ); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/ExpressionNormalization.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/ExpressionNormalization.java index 6cff3553b4..23fca6b77b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/ExpressionNormalization.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/ExpressionNormalization.java @@ -60,4 +60,3 @@ public class ExpressionNormalization extends ExpressionRewrite { super(new ExpressionRuleExecutor(NORMALIZE_REWRITE_RULES)); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/DistinctPredicatesRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/DistinctPredicatesRule.java index 8f21f7d348..a3466d395d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/DistinctPredicatesRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/DistinctPredicatesRule.java @@ -49,4 +49,3 @@ public class DistinctPredicatesRule extends AbstractExpressionRewriteRule { return expr; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java index 10b1e39da3..c801f749ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java @@ -38,4 +38,3 @@ public class FoldConstantRule extends AbstractExpressionRewriteRule { return FoldConstantRuleOnFE.INSTANCE.rewrite(expr, ctx); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java index a254f9b4d4..27e1b34f9d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java @@ -230,7 +230,9 @@ public class FoldConstantRuleOnBE extends AbstractExpressionRewriteRule { } else { ret = constMap.get(e1.getKey()); } - LOG.debug("Be constant folding convert {} to {}", e1.getKey(), ret); + if (LOG.isDebugEnabled()) { + LOG.debug("Be constant folding convert {} to {}", e1.getKey(), ret); + } resultMap.put(e1.getKey(), ret); } } @@ -246,4 +248,3 @@ public class FoldConstantRuleOnBE extends AbstractExpressionRewriteRule { return resultMap; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java index 8d56dde938..05165f6c31 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java @@ -562,4 +562,3 @@ public class FoldConstantRuleOnFE extends AbstractExpressionRewriteRule { return Optional.empty(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/InferPredicates.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/InferPredicates.java index a9153c1ef5..04db263c36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/InferPredicates.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/InferPredicates.java @@ -127,4 +127,3 @@ public class InferPredicates extends DefaultPlanRewriter implements return PlanUtils.filterOrSelf(predicates, plan); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java index ade445f4d8..fa7cecdd5a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneFileScanPartition.java @@ -109,4 +109,3 @@ public class PruneFileScanPartition extends OneRewriteRuleFactory { return new SelectedPartitions(idToPartitionItem.size(), selectedPartitionItems, true); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanTablet.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanTablet.java index 0b079d7816..e298163560 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanTablet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PruneOlapScanTablet.java @@ -93,4 +93,3 @@ public class PruneOlapScanTablet extends OneRewriteRuleFactory { ).prune(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/SplitLimit.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/SplitLimit.java index aefb2ee8c8..06f7760c43 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/SplitLimit.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/SplitLimit.java @@ -44,4 +44,3 @@ public class SplitLimit extends OneRewriteRuleFactory { }).toRule(RuleType.SPLIT_LIMIT); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java index 874f47a50a..14d5ae8b63 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java @@ -891,4 +891,3 @@ public class ExpressionEstimation extends ExpressionVisitor { // Use -1 for catalog id and db id when failed to get them from metadata. // This is OK because catalog id and db id is not in the hashcode function of ColumnStatistics cache // and the table id is globally unique. - LOG.debug(String.format("Fail to get catalog id and db id for table %s", table.getName())); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Fail to get catalog id and db id for table %s", table.getName())); + } catalogId = -1; dbId = -1; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/CompoundPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/CompoundPredicate.java index 725d92068d..ccc5512212 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/CompoundPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/CompoundPredicate.java @@ -67,4 +67,3 @@ public abstract class CompoundPredicate extends BinaryOperator { public abstract Class flipType(); } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java index 5691f7de94..d1b7442ab5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/EqualPredicate.java @@ -37,4 +37,3 @@ public abstract class EqualPredicate extends ComparisonPredicate { return null; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftLeft.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftLeft.java index aa78d01e32..eaad8d9dfa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftLeft.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftLeft.java @@ -58,4 +58,3 @@ public class BitShiftLeft extends ScalarFunction return new BitShiftLeft(children.get(0), children.get(1)); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftRight.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftRight.java index ab2bce00a0..f24ed47c94 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftRight.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/BitShiftRight.java @@ -60,4 +60,3 @@ public class BitShiftRight extends ScalarFunction return new BitShiftRight(children.get(0), children.get(1)); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonExtract.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonExtract.java index f7d1bd612c..6b4ec4e30c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonExtract.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonExtract.java @@ -67,4 +67,3 @@ public class JsonExtract extends ScalarFunction return visitor.visitJsonExtract(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonInsert.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonInsert.java index 5e7805e7fb..8f9c9900ae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonInsert.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonInsert.java @@ -67,4 +67,3 @@ public class JsonInsert extends ScalarFunction return visitor.visitJsonInsert(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonReplace.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonReplace.java index f3e735136e..c5ced84ac8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonReplace.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonReplace.java @@ -67,4 +67,3 @@ public class JsonReplace extends ScalarFunction return visitor.visitJsonReplace(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSet.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSet.java index 394d27fb9e..1076e7ff4e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSet.java @@ -67,4 +67,3 @@ public class JsonSet extends ScalarFunction return visitor.visitJsonSet(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonbValid.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonbValid.java index 220f3eeeb3..35f6c234d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonbValid.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonbValid.java @@ -67,4 +67,3 @@ public class JsonbValid extends ScalarFunction return visitor.visitJsonbValid(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/StAngleSphere.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/StAngleSphere.java index 00dc3153b7..b00f6ef336 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/StAngleSphere.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/StAngleSphere.java @@ -63,4 +63,3 @@ public class StAngleSphere extends ScalarFunction return visitor.visitStAngleSphere(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/User.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/User.java index efbb24340f..3a53c291f9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/User.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/User.java @@ -52,4 +52,3 @@ public class User extends ScalarFunction return visitor.visitUser(this, context); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/literal/Interval.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/literal/Interval.java index 8b823220e2..5cffc0c7ff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/literal/Interval.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/literal/Interval.java @@ -88,5 +88,3 @@ public class Interval extends Expression implements LeafExpression, AlwaysNotNul } } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java index 8005f07da8..b734bba576 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java @@ -109,5 +109,3 @@ public interface Project { }); } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreatePolicyCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreatePolicyCommand.java index 9c878fc6e2..fa78f399d7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreatePolicyCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreatePolicyCommand.java @@ -82,4 +82,3 @@ public class CreatePolicyCommand extends Command implements ForwardWithSync { throw new AnalysisException("Not support create policy command in Nereids now"); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java index df7d5ffc6a..e821512113 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java @@ -90,8 +90,10 @@ public class CreateTableCommand extends Command implements ForwardWithSync { if (!ctasQuery.isPresent()) { createTableInfo.validate(ctx); CreateTableStmt createTableStmt = createTableInfo.translateToLegacyStmt(); - LOG.debug("Nereids start to execute the create table command, query id: {}, tableName: {}", - ctx.queryId(), createTableInfo.getTableName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids start to execute the create table command, query id: {}, tableName: {}", + ctx.queryId(), createTableInfo.getTableName()); + } try { Env.getCurrentEnv().createTable(createTableStmt); } catch (Exception e) { @@ -140,8 +142,10 @@ public class CreateTableCommand extends Command implements ForwardWithSync { } createTableInfo.validateCreateTableAsSelect(columnsOfQuery.build(), ctx); CreateTableStmt createTableStmt = createTableInfo.translateToLegacyStmt(); - LOG.debug("Nereids start to execute the ctas command, query id: {}, tableName: {}", - ctx.queryId(), createTableInfo.getTableName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids start to execute the ctas command, query id: {}, tableName: {}", + ctx.queryId(), createTableInfo.getTableName()); + } try { Env.getCurrentEnv().createTable(createTableStmt); } catch (Exception e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertExecutor.java index 5bbb27be05..45471429bb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/InsertExecutor.java @@ -203,7 +203,9 @@ public class InsertExecutor { QeProcessorImpl.INSTANCE.registerQuery(ctx.queryId(), coordinator); coordinator.exec(); int execTimeout = ctx.getExecTimeout(); - LOG.debug("insert [{}] with query id {} execution timeout is {}", labelName, queryId, execTimeout); + if (LOG.isDebugEnabled()) { + LOG.debug("insert [{}] with query id {} execution timeout is {}", labelName, queryId, execTimeout); + } boolean notTimeout = coordinator.join(execTimeout); if (!coordinator.isDone()) { coordinator.cancel(); @@ -220,8 +222,10 @@ public class InsertExecutor { LOG.warn("insert [{}] with query id {} failed, {}", labelName, queryId, errMsg); ErrorReport.reportDdlException(errMsg, ErrorCode.ERR_FAILED_WHEN_INSERT); } - LOG.debug("insert [{}] with query id {} delta files is {}", - labelName, queryId, coordinator.getDeltaUrls()); + if (LOG.isDebugEnabled()) { + LOG.debug("insert [{}] with query id {} delta files is {}", + labelName, queryId, coordinator.getDeltaUrls()); + } if (coordinator.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL) != null) { loadedRows = Long.parseLong(coordinator.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/BulkLoadDataDesc.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/BulkLoadDataDesc.java index e0365ca098..7d87d878fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/BulkLoadDataDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/BulkLoadDataDesc.java @@ -330,4 +330,3 @@ public class BulkLoadDataDesc { return false; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateType.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateType.java index 854c50d7f1..d127ab1606 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateType.java @@ -80,4 +80,3 @@ public class DateType extends DateLikeType { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateV2Type.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateV2Type.java index 0437fb0365..2acac33430 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateV2Type.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DateV2Type.java @@ -63,4 +63,3 @@ public class DateV2Type extends DateLikeType { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DecimalV2Type.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DecimalV2Type.java index 523f113d99..8a5a199a8d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DecimalV2Type.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/types/DecimalV2Type.java @@ -231,4 +231,3 @@ public class DecimalV2Type extends FractionalType { } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/TypeCoercionUtils.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/TypeCoercionUtils.java index 4aa2d8ae09..a3d4a84d3c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/TypeCoercionUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/TypeCoercionUtils.java @@ -579,7 +579,9 @@ public class TypeCoercionUtils { } } } catch (Exception e) { - LOG.debug("convert '{}' to type {} failed", value, dataType); + if (LOG.isDebugEnabled()) { + LOG.debug("convert '{}' to type {} failed", value, dataType); + } } return Optional.ofNullable(ret); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 28b8aa31c0..7f343f9469 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -164,7 +164,9 @@ public class EditLog { public static void loadJournal(Env env, Long logId, JournalEntity journal) { short opCode = journal.getOpCode(); if (opCode != OperationType.OP_SAVE_NEXTID && opCode != OperationType.OP_TIMESTAMP) { - LOG.debug("replay journal op code: {}", opCode); + if (LOG.isDebugEnabled()) { + LOG.debug("replay journal op code: {}", opCode); + } } try { switch (opCode) { @@ -534,7 +536,9 @@ public class EditLog { case OperationType.OP_UPSERT_TRANSACTION_STATE: { final TransactionState state = (TransactionState) journal.getData(); Env.getCurrentGlobalTransactionMgr().replayUpsertTransactionState(state); - LOG.debug("logid: {}, opcode: {}, tid: {}", logId, opCode, state.getTransactionId()); + if (LOG.isDebugEnabled()) { + LOG.debug("logid: {}, opcode: {}, tid: {}", logId, opCode, state.getTransactionId()); + } // state.loadedTableIndexIds is updated after replay if (state.getTransactionStatus() == TransactionStatus.VISIBLE) { @@ -546,7 +550,9 @@ public class EditLog { case OperationType.OP_DELETE_TRANSACTION_STATE: { final TransactionState state = (TransactionState) journal.getData(); Env.getCurrentGlobalTransactionMgr().replayDeleteTransactionState(state); - LOG.debug("opcode: {}, tid: {}", opCode, state.getTransactionId()); + if (LOG.isDebugEnabled()) { + LOG.debug("opcode: {}, tid: {}", opCode, state.getTransactionId()); + } break; } case OperationType.OP_BATCH_REMOVE_TXNS: { diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java index cb7d82b22e..02330e07c9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java @@ -591,4 +591,3 @@ public class GsonUtils { } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java index 23522625bb..00b5fd0412 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/AggregationNode.java @@ -205,8 +205,10 @@ public class AggregationNode extends PlanNode { for (Expr groupingExpr : groupingExprs) { long numDistinct = groupingExpr.getNumDistinctValues(); // TODO: remove these before 1.0 - LOG.debug("grouping expr: " + groupingExpr.toSql() + " #distinct=" + Long.toString( - numDistinct)); + if (LOG.isDebugEnabled()) { + LOG.debug("grouping expr: " + groupingExpr.toSql() + " #distinct=" + Long.toString( + numDistinct)); + } if (numDistinct == -1) { cardinality = -1; break; @@ -214,16 +216,22 @@ public class AggregationNode extends PlanNode { cardinality *= numDistinct; } // take HAVING predicate into account - LOG.debug("Agg: cardinality=" + Long.toString(cardinality)); + if (LOG.isDebugEnabled()) { + LOG.debug("Agg: cardinality=" + Long.toString(cardinality)); + } if (cardinality > 0) { cardinality = Math.round((double) cardinality * computeOldSelectivity()); - LOG.debug("sel=" + Double.toString(computeOldSelectivity())); + if (LOG.isDebugEnabled()) { + LOG.debug("sel=" + Double.toString(computeOldSelectivity())); + } } // if we ended up with an overflow, the estimate is certain to be wrong if (cardinality < 0) { cardinality = -1; } - LOG.debug("stats Agg: cardinality=" + Long.toString(cardinality)); + if (LOG.isDebugEnabled()) { + LOG.debug("stats Agg: cardinality=" + Long.toString(cardinality)); + } } private void updateplanNodeName() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java index 2586e1147c..e1a8d36424 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DistributedPlanner.java @@ -945,8 +945,10 @@ public class DistributedPlanner { private boolean canColocateAgg(AggregateInfo aggregateInfo, DataPartition childFragmentDataPartition) { // Condition1 if (ConnectContext.get().getSessionVariable().isDisableColocatePlan()) { - LOG.debug("Agg node is not colocate in:" + ConnectContext.get().queryId() - + ", reason:" + DistributedPlanColocateRule.SESSION_DISABLED); + if (LOG.isDebugEnabled()) { + LOG.debug("Agg node is not colocate in:" + ConnectContext.get().queryId() + + ", reason:" + DistributedPlanColocateRule.SESSION_DISABLED); + } return false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java index 720c6b68df..054f2f41e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/EsScanNode.java @@ -257,7 +257,9 @@ public class EsScanNode extends ExternalScanNode { scratchBuilder.append(scanRangeLocations.toString()); scratchBuilder.append(" "); } - LOG.debug("ES table {} scan ranges {}", table.getName(), scratchBuilder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("ES table {} scan ranges {}", table.getName(), scratchBuilder.toString()); + } } return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/FileLoadScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/FileLoadScanNode.java index 4be169b940..d29f969a9a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/FileLoadScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/FileLoadScanNode.java @@ -351,4 +351,3 @@ public class FileLoadScanNode extends FileScanNode { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/GroupCommitPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/GroupCommitPlanner.java index b69ece3b9a..c8e7ef6c4e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/GroupCommitPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/GroupCommitPlanner.java @@ -144,7 +144,9 @@ public class GroupCommitPlanner { if (!backend.isDecommissioned()) { ctx.setInsertGroupCommit(this.table.getId(), backend); find = true; - LOG.debug("choose new be {}", backend.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("choose new be {}", backend.getId()); + } break; } } @@ -211,8 +213,10 @@ public class GroupCommitPlanner { if (selectStmt.getValueList() != null) { for (List row : selectStmt.getValueList().getRows()) { InternalService.PDataRow data = StmtExecutor.getRowStringValue(row); - LOG.debug("add row: [{}]", data.getColList().stream().map(c -> c.getValue()) - .collect(Collectors.joining(","))); + if (LOG.isDebugEnabled()) { + LOG.debug("add row: [{}]", data.getColList().stream().map(c -> c.getValue()) + .collect(Collectors.joining(","))); + } rows.add(data); } } else { @@ -225,11 +229,12 @@ public class GroupCommitPlanner { } } InternalService.PDataRow data = StmtExecutor.getRowStringValue(exprList); - LOG.debug("add row: [{}]", data.getColList().stream().map(c -> c.getValue()) - .collect(Collectors.joining(","))); + if (LOG.isDebugEnabled()) { + LOG.debug("add row: [{}]", data.getColList().stream().map(c -> c.getValue()) + .collect(Collectors.joining(","))); + } rows.add(data); } return rows; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java index d96d79ebe7..f710a3c0a2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HashJoinNode.java @@ -610,7 +610,9 @@ public class HashJoinNode extends JoinNodeBase { // numDistinct = Math.min(numDistinct, rhsTbl.getNumRows()); // } maxNumDistinct = Math.max(maxNumDistinct, numDistinct); - LOG.debug("min slotref: {}, #distinct: {}", rhsSlotRef.toSql(), numDistinct); + if (LOG.isDebugEnabled()) { + LOG.debug("min slotref: {}, #distinct: {}", rhsSlotRef.toSql(), numDistinct); + } } if (maxNumDistinct == 0) { @@ -621,9 +623,13 @@ public class HashJoinNode extends JoinNodeBase { } else { cardinality = Math.round( (double) getChild(0).cardinality * (double) getChild(1).cardinality / (double) maxNumDistinct); - LOG.debug("lhs card: {}, rhs card: {}", getChild(0).cardinality, getChild(1).cardinality); + if (LOG.isDebugEnabled()) { + LOG.debug("lhs card: {}, rhs card: {}", getChild(0).cardinality, getChild(1).cardinality); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("stats HashJoin: cardinality {}", cardinality); } - LOG.debug("stats HashJoin: cardinality {}", cardinality); } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java index c79c3d416f..28daa450c5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/JoinCostEvaluation.java @@ -75,10 +75,10 @@ public class JoinCostEvaluation { } if (LOG.isDebugEnabled()) { LOG.debug(nodeOverview); - LOG.debug("broadcast: cost=" + Long.toString(broadcastCost)); - LOG.debug("rhs card=" + Long.toString(rhsTreeCardinality) - + " rhs row_size=" + Float.toString(rhsTreeAvgRowSize) - + " lhs nodes=" + Integer.toString(lhsTreeNumNodes)); + LOG.debug("broadcast: cost=" + broadcastCost); + LOG.debug("rhs card=" + rhsTreeCardinality + + " rhs row_size=" + rhsTreeAvgRowSize + + " lhs nodes=" + lhsTreeNumNodes); } } @@ -92,12 +92,10 @@ public class JoinCostEvaluation { (double) lhsTreeCardinality * lhsTreeAvgRowSize + (double) rhsTreeCardinality * rhsTreeAvgRowSize); } if (LOG.isDebugEnabled()) { - LOG.debug(nodeOverview); - LOG.debug("partition: cost=" + Long.toString(partitionCost)); - LOG.debug("lhs card=" + Long.toString(lhsTreeCardinality) + " row_size=" - + Float.toString(lhsTreeAvgRowSize)); - LOG.debug("rhs card=" + Long.toString(rhsTreeCardinality) + " row_size=" - + Float.toString(rhsTreeAvgRowSize)); + LOG.debug("nodeOverview: {}", nodeOverview); + LOG.debug("partition: cost={} ", partitionCost); + LOG.debug("lhs card={} row_size={}", lhsTreeCardinality, lhsTreeAvgRowSize); + LOG.debug("rhs card={} row_size={}", rhsTreeCardinality, rhsTreeAvgRowSize); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java b/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java index 4375105e46..e61e09112e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java @@ -135,10 +135,12 @@ public class MaterializedViewSelector { return null; } long bestIndexId = priorities(olapScanNode, candidateIndexIdToSchema); - LOG.debug("The best materialized view is {} for scan node {} in query {}, " - + "isPreAggregation: {}, reasonOfDisable: {}, cost {}", - bestIndexId, scanNode.getId(), selectStmt.toSql(), isPreAggregation, reasonOfDisable, - (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("The best materialized view is {} for scan node {} in query {}, " + + "isPreAggregation: {}, reasonOfDisable: {}, cost {}", + bestIndexId, scanNode.getId(), selectStmt.toSql(), isPreAggregation, reasonOfDisable, + (System.currentTimeMillis() - start)); + } return new BestIndexInfo(bestIndexId, isPreAggregation, reasonOfDisable); } @@ -205,9 +207,11 @@ public class MaterializedViewSelector { // For query like `select v:a from tbl` when column v is variant type but v:a is not expicity // in index, so the above check will filter all index. But we should at least choose the base // index at present.TODO we should better handle it. - LOG.debug("result {}, has variant col {}, tuple {}", result, - analyzer.getTupleDesc(scanNode.getTupleId()).hasVariantCol(), - analyzer.getTupleDesc(scanNode.getTupleId()).toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("result {}, has variant col {}, tuple {}", result, + analyzer.getTupleDesc(scanNode.getTupleId()).hasVariantCol(), + analyzer.getTupleDesc(scanNode.getTupleId()).toString()); + } if (result.keySet().size() == 0 && scanNode.getOlapTable() .getBaseSchema().stream().anyMatch(column -> column.getType().isVariantType())) { LOG.info("Using base schema"); @@ -274,16 +278,22 @@ public class MaterializedViewSelector { } if (prefixMatchCount == maxPrefixMatchCount) { - LOG.debug("find a equal prefix match index {}. match count: {}", indexId, prefixMatchCount); + if (LOG.isDebugEnabled()) { + LOG.debug("find a equal prefix match index {}. match count: {}", indexId, prefixMatchCount); + } indexesMatchingBestPrefixIndex.add(indexId); } else if (prefixMatchCount > maxPrefixMatchCount) { - LOG.debug("find a better prefix match index {}. match count: {}", indexId, prefixMatchCount); + if (LOG.isDebugEnabled()) { + LOG.debug("find a better prefix match index {}. match count: {}", indexId, prefixMatchCount); + } maxPrefixMatchCount = prefixMatchCount; indexesMatchingBestPrefixIndex.clear(); indexesMatchingBestPrefixIndex.add(indexId); } } - LOG.debug("Those mv match the best prefix index:" + Joiner.on(",").join(indexesMatchingBestPrefixIndex)); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv match the best prefix index:" + Joiner.on(",").join(indexesMatchingBestPrefixIndex)); + } return indexesMatchingBestPrefixIndex; } @@ -296,7 +306,9 @@ public class MaterializedViewSelector { for (Long partitionId : partitionIds) { rowCount += olapTable.getPartition(partitionId).getIndex(indexId).getRowCount(); } - LOG.debug("rowCount={} for table={}", rowCount, indexId); + if (LOG.isDebugEnabled()) { + LOG.debug("rowCount={} for table={}", rowCount, indexId); + } if (rowCount < minRowCount) { minRowCount = rowCount; selectedIndexId = indexId; @@ -372,8 +384,10 @@ public class MaterializedViewSelector { iterator.remove(); } } - LOG.debug("Those mv pass the test of compensating predicates:" - + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv pass the test of compensating predicates:" + + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + } } /** @@ -465,8 +479,10 @@ public class MaterializedViewSelector { iterator.remove(); } } - LOG.debug("Those mv pass the test of grouping:" - + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv pass the test of grouping:" + + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + } } // Step4: aggregation functions are available in the view output @@ -524,8 +540,10 @@ public class MaterializedViewSelector { iterator.remove(); } } - LOG.debug("Those mv pass the test of aggregation function:" - + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv pass the test of aggregation function:" + + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + } } private boolean matchAllExpr(List exprs, List indexExprs, TupleId tid) @@ -588,8 +606,10 @@ public class MaterializedViewSelector { candidateIndexSchema .forEach(column -> indexColumnNames.add(CreateMaterializedViewStmt .mvColumnBreaker(MaterializedIndexMeta.normalizeName(column.getName())))); - LOG.debug("candidateIndexSchema {}, indexColumnNames {}, queryColumnNames {}", - candidateIndexSchema, indexColumnNames, queryColumnNames); + if (LOG.isDebugEnabled()) { + LOG.debug("candidateIndexSchema {}, indexColumnNames {}, queryColumnNames {}", + candidateIndexSchema, indexColumnNames, queryColumnNames); + } // Rollup index have no define expr. if (entry.getValue().getWhereClause() == null && indexExprs.isEmpty() && !indexColumnNames.containsAll(queryColumnNames)) { @@ -609,8 +629,10 @@ public class MaterializedViewSelector { iterator.remove(); } } - LOG.debug("Those mv pass the test of output columns:" - + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv pass the test of output columns:" + + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + } } private void compensateCandidateIndex(Map candidateIndexIdToMeta, @@ -624,8 +646,10 @@ public class MaterializedViewSelector { candidateIndexIdToMeta.put(mvIndexId, index.getValue()); } } - LOG.debug("Those mv pass the test of output columns:" - + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + if (LOG.isDebugEnabled()) { + LOG.debug("Those mv pass the test of output columns:" + + Joiner.on(",").join(candidateIndexIdToMeta.keySet())); + } } private void init() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/NestedLoopJoinNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/NestedLoopJoinNode.java index 63cc724001..9feac7e79f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/NestedLoopJoinNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/NestedLoopJoinNode.java @@ -157,7 +157,9 @@ public class NestedLoopJoinNode extends JoinNodeBase { cardinality = Math.round(((double) cardinality) * computeOldSelectivity()); } } - LOG.debug("stats NestedLoopJoin: cardinality={}", Long.toString(cardinality)); + if (LOG.isDebugEnabled()) { + LOG.debug("stats NestedLoopJoin: cardinality={}", Long.toString(cardinality)); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java index 5e779324d5..9f975a6b0a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java @@ -526,7 +526,9 @@ public class OlapScanNode extends ScanNode { slotDescriptor.setColumn(mvColumn); } } - LOG.debug("updateSlotUniqueId() slots: {}", desc.getSlots()); + if (LOG.isDebugEnabled()) { + LOG.debug("updateSlotUniqueId() slots: {}", desc.getSlots()); + } } public OlapTable getOlapTable() { @@ -604,7 +606,9 @@ public class OlapScanNode extends ScanNode { @Override public void finalize(Analyzer analyzer) throws UserException { - LOG.debug("OlapScanNode get scan range locations. Tuple: {}", desc); + if (LOG.isDebugEnabled()) { + LOG.debug("OlapScanNode get scan range locations. Tuple: {}", desc); + } /** * If JoinReorder is turned on, it will be calculated init(), and this value is * not accurate. @@ -794,7 +798,9 @@ public class OlapScanNode extends ScanNode { Collections.shuffle(replicas); } } else { - LOG.debug("use fix replica, value: {}, replica count: {}", useFixReplica, replicas.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("use fix replica, value: {}, replica count: {}", useFixReplica, replicas.size()); + } // sort by replica id replicas.sort(Replica.ID_COMPARATOR); Replica replica = replicas.get(useFixReplica >= replicas.size() ? replicas.size() - 1 : useFixReplica); @@ -802,8 +808,10 @@ public class OlapScanNode extends ScanNode { Backend backend = Env.getCurrentSystemInfo().getBackend(replica.getBackendId()); // If the fixed replica is bad, then not clear the replicas using random replica if (backend == null || !backend.isAlive()) { - LOG.debug("backend {} not exists or is not alive for replica {}", replica.getBackendId(), - replica.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} not exists or is not alive for replica {}", replica.getBackendId(), + replica.getId()); + } Collections.shuffle(replicas); } else { replicas.clear(); @@ -843,8 +851,10 @@ public class OlapScanNode extends ScanNode { for (Replica replica : replicas) { Backend backend = Env.getCurrentSystemInfo().getBackend(replica.getBackendId()); if (backend == null || !backend.isAlive()) { - LOG.debug("backend {} not exists or is not alive for replica {}", replica.getBackendId(), - replica.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} not exists or is not alive for replica {}", replica.getBackendId(), + replica.getId()); + } errs.add("replica " + replica.getId() + "'s backend " + replica.getBackendId() + " does not exist or not alive"); continue; @@ -923,8 +933,10 @@ public class OlapScanNode extends ScanNode { partition.getName(), "RESTORING"); } } - LOG.debug("partition prune cost: {} ms, partitions: {}", - (System.currentTimeMillis() - start), selectedPartitionIds); + if (LOG.isDebugEnabled()) { + LOG.debug("partition prune cost: {} ms, partitions: {}", + (System.currentTimeMillis() - start), selectedPartitionIds); + } } public void selectBestRollupByRollupSelector(Analyzer analyzer) throws UserException { @@ -937,14 +949,18 @@ public class OlapScanNode extends ScanNode { // and the selection logic of materialized view is selected in // "MaterializedViewSelector" selectedIndexId = olapTable.getBaseIndexId(); - LOG.debug("The best index will be selected later in mv selector"); + if (LOG.isDebugEnabled()) { + LOG.debug("The best index will be selected later in mv selector"); + } return; } final RollupSelector rollupSelector = new RollupSelector(analyzer, desc, olapTable); selectedIndexId = rollupSelector.selectBestRollup(selectedPartitionIds, conjuncts, isPreAggregation); updateSlotUniqueId(); - LOG.debug("select best roll up cost: {} ms, best index id: {}", (System.currentTimeMillis() - start), - selectedIndexId); + if (LOG.isDebugEnabled()) { + LOG.debug("select best roll up cost: {} ms, best index id: {}", (System.currentTimeMillis() - start), + selectedIndexId); + } } @Override @@ -959,7 +975,9 @@ public class OlapScanNode extends ScanNode { long start = System.currentTimeMillis(); computeSampleTabletIds(); computeTabletInfo(); - LOG.debug("distribution prune cost: {} ms", (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("distribution prune cost: {} ms", (System.currentTimeMillis() - start)); + } } public void setOutputColumnUniqueIds(Set outputColumnUniqueIds) { @@ -1064,12 +1082,16 @@ public class OlapScanNode extends ScanNode { } if (sampleTabletIds.size() != 0) { sampleTabletIds.retainAll(hitTabletIds); - LOG.debug("after computeSampleTabletIds, hitRows {}, totalRows {}, selectedTablets {}, sampleRows {}", - hitRows, selectedRows, sampleTabletIds.size(), totalSampleRows); + if (LOG.isDebugEnabled()) { + LOG.debug("after computeSampleTabletIds, hitRows {}, totalRows {}, selectedTablets {}, sampleRows {}", + hitRows, selectedRows, sampleTabletIds.size(), totalSampleRows); + } } else { sampleTabletIds = hitTabletIds; - LOG.debug("after computeSampleTabletIds, hitRows {}, selectedRows {}, sampleRows {}", hitRows, selectedRows, - totalSampleRows); + if (LOG.isDebugEnabled()) { + LOG.debug("after computeSampleTabletIds, hitRows {}, selectedRows {}, sampleRows {}", + hitRows, selectedRows, totalSampleRows); + } } } @@ -1101,14 +1123,18 @@ public class OlapScanNode extends ScanNode { final MaterializedIndex selectedTable = partition.getIndex(selectedIndexId); final List tablets = Lists.newArrayList(); Collection tabletIds = distributionPrune(selectedTable, partition.getDistributionInfo()); - LOG.debug("distribution prune tablets: {}", tabletIds); + if (LOG.isDebugEnabled()) { + LOG.debug("distribution prune tablets: {}", tabletIds); + } if (sampleTabletIds.size() != 0) { if (tabletIds != null) { tabletIds.retainAll(sampleTabletIds); } else { tabletIds = sampleTabletIds; } - LOG.debug("after sample tablets: {}", tabletIds); + if (LOG.isDebugEnabled()) { + LOG.debug("after sample tablets: {}", tabletIds); + } } List allTabletIds = selectedTable.getTabletIdsInOrder(); @@ -1718,5 +1744,3 @@ public class OlapScanNode extends ScanNode { return getScanTabletIds().size(); } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OriginalPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OriginalPlanner.java index 3f79ae6cba..36087176f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/OriginalPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OriginalPlanner.java @@ -253,12 +253,16 @@ public class OriginalPlanner extends Planner { } else { List resExprs = Expr.substituteList(queryStmt.getResultExprs(), rootFragment.getPlanRoot().getOutputSmap(), analyzer, false); - LOG.debug("result Exprs {}", queryStmt.getResultExprs()); - LOG.debug("substitute result Exprs {}", resExprs); + if (LOG.isDebugEnabled()) { + LOG.debug("result Exprs {}", queryStmt.getResultExprs()); + LOG.debug("substitute result Exprs {}", resExprs); + } rootFragment.setOutputExprs(resExprs); } rootFragment.setResultSinkType(ConnectContext.get().getResultSinkType()); - LOG.debug("finalize plan fragments"); + if (LOG.isDebugEnabled()) { + LOG.debug("finalize plan fragments"); + } for (PlanFragment fragment : fragments) { fragment.finalize(queryStmt); } @@ -273,16 +277,22 @@ public class OriginalPlanner extends Planner { SelectStmt selectStmt = (SelectStmt) queryStmt; if (queryStmt.getSortInfo() != null || selectStmt.getAggInfo() != null) { isBlockQuery = true; - LOG.debug("this is block query"); + if (LOG.isDebugEnabled()) { + LOG.debug("this is block query"); + } } else { isBlockQuery = false; - LOG.debug("this isn't block query"); + if (LOG.isDebugEnabled()) { + LOG.debug("this isn't block query"); + } } // Check SelectStatement if optimization condition satisfied if (selectStmt.isPointQueryShortCircuit()) { // Optimize for point query like: SELECT * FROM t1 WHERE pk1 = 1 and pk2 = 2 // such query will use direct RPC to do point query - LOG.debug("it's a point query"); + if (LOG.isDebugEnabled()) { + LOG.debug("it's a point query"); + } Map eqConjuncts = ((SelectStmt) selectStmt).getPointQueryEQPredicates(); OlapScanNode olapScanNode = (OlapScanNode) singleNodePlan; olapScanNode.setDescTable(analyzer.getDescTbl()); @@ -388,7 +398,9 @@ public class OriginalPlanner extends Planner { private SlotDescriptor injectRowIdColumnSlot(Analyzer analyzer, TupleDescriptor tupleDesc) { SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(tupleDesc); - LOG.debug("inject slot {}", slotDesc); + if (LOG.isDebugEnabled()) { + LOG.debug("inject slot {}", slotDesc); + } String name = Column.ROWID_COL; Column col = new Column(name, Type.STRING, false, null, false, "", "rowid column"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruneV2ForShortCircuitPlan.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruneV2ForShortCircuitPlan.java index 8d39ed4d4f..0b98cac972 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruneV2ForShortCircuitPlan.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PartitionPruneV2ForShortCircuitPlan.java @@ -56,7 +56,9 @@ public class PartitionPruneV2ForShortCircuitPlan extends PartitionPrunerV2Base { Range partitionRange = Range.closedOpen(partitionLowerBound, partitionUpperBound); partitionRangeMapByLiteral.put(partitionRange, entry.getKey()); } - LOG.debug("update partitionRangeMapByLiteral"); + if (LOG.isDebugEnabled()) { + LOG.debug("update partitionRangeMapByLiteral"); + } this.lastPartitionRangeMapUpdateTimestampMs = System.currentTimeMillis(); return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PredicatePushDown.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PredicatePushDown.java index 9534fe6886..c342a816ea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PredicatePushDown.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PredicatePushDown.java @@ -108,7 +108,9 @@ public class PredicatePushDown { if (otherSlot.isBound(leftSlot.getSlotId()) && rightSlot.isBound(rightSideTuple)) { Expr pushDownConjunct = rewritePredicate(analyzer, conjunct, rightSlot); - LOG.debug("pushDownConjunct: {}", pushDownConjunct); + if (LOG.isDebugEnabled()) { + LOG.debug("pushDownConjunct: {}", pushDownConjunct); + } if (!analyzer.getGlobalInDeDuplication().contains(pushDownConjunct) && !analyzer.getGlobalSlotToLiteralDeDuplication() .contains(Pair.of(pushDownConjunct.getChild(0), pushDownConjunct.getChild(1)))) { @@ -117,7 +119,9 @@ public class PredicatePushDown { } else if (otherSlot.isBound(rightSlot.getSlotId()) && leftSlot.isBound(rightSideTuple)) { Expr pushDownConjunct = rewritePredicate(analyzer, conjunct, leftSlot); - LOG.debug("pushDownConjunct: {}", pushDownConjunct); + if (LOG.isDebugEnabled()) { + LOG.debug("pushDownConjunct: {}", pushDownConjunct); + } if (!analyzer.getGlobalInDeDuplication().contains(pushDownConjunct) && !analyzer.getGlobalSlotToLiteralDeDuplication() .contains(Pair.of(pushDownConjunct.getChild(0), pushDownConjunct.getChild(1)))) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java index 643d9ae863..06caaf8f7f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ProjectPlanner.java @@ -49,7 +49,9 @@ public class ProjectPlanner { planNode.initOutputSlotIds(outputSlotIds, analyzer); planNode.projectOutputTuple(); } catch (NotImplementedException e) { - LOG.debug(e); + if (LOG.isDebugEnabled()) { + LOG.debug(e); + } } if (planNode.getChildren().size() == 0) { return; @@ -58,7 +60,9 @@ public class ProjectPlanner { try { inputSlotIds = planNode.computeInputSlotIds(analyzer); } catch (NotImplementedException e) { - LOG.debug(e); + if (LOG.isDebugEnabled()) { + LOG.debug(e); + } } for (PlanNode child : planNode.getChildren()) { projectPlanNode(inputSlotIds, child); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java index 916a339e55..8fed65ad83 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/RollupSelector.java @@ -88,7 +88,9 @@ public final class RollupSelector { for (Long partitionId : partitionIds) { rowCount += table.getPartition(partitionId).getIndex(indexId).getRowCount(); } - LOG.debug("rowCount={} for table={}", rowCount, indexId); + if (LOG.isDebugEnabled()) { + LOG.debug("rowCount={} for table={}", rowCount, indexId); + } if (rowCount < minRowCount) { minRowCount = rowCount; selectedIndexId = indexId; @@ -123,7 +125,9 @@ public final class RollupSelector { } final List rollups = table.getVisibleIndex(); - LOG.debug("num of rollup(base included): {}, pre aggr: {}", rollups.size(), isPreAggregation); + if (LOG.isDebugEnabled()) { + LOG.debug("num of rollup(base included): {}, pre aggr: {}", rollups.size(), isPreAggregation); + } // 1. find all rollup indexes which contains all tuple columns final List rollupsContainsOutput = Lists.newArrayList(); @@ -138,16 +142,22 @@ public final class RollupSelector { // or those rollup tables which key columns is the same with base table // (often in different order) if (isPreAggregation) { - LOG.debug("preAggregation is on. add index {} which contains all output columns", - rollup.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("preAggregation is on. add index {} which contains all output columns", + rollup.getId()); + } rollupsContainsOutput.add(rollup); } else if (table.getKeyColumnsByIndexId(rollup.getId()).size() == baseTableColumns.size()) { - LOG.debug("preAggregation is off, but index {} have same key columns with base index.", - rollup.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("preAggregation is off, but index {} have same key columns with base index.", + rollup.getId()); + } rollupsContainsOutput.add(rollup); } } else { - LOG.debug("exclude index {} because it does not contain all output columns", rollup.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("exclude index {} because it does not contain all output columns", rollup.getId()); + } } } @@ -203,10 +213,16 @@ public final class RollupSelector { } if (prefixMatchCount == maxPrefixMatchCount) { - LOG.debug("s3: find a equal prefix match index {}. match count: {}", index.getId(), prefixMatchCount); + if (LOG.isDebugEnabled()) { + LOG.debug("s3: find a equal prefix match index {}. match count: {}", + index.getId(), prefixMatchCount); + } rollupsMatchingBestPrefixIndex.add(index.getId()); } else if (prefixMatchCount > maxPrefixMatchCount) { - LOG.debug("s3: find a better prefix match index {}. match count: {}", index.getId(), prefixMatchCount); + if (LOG.isDebugEnabled()) { + LOG.debug("s3: find a better prefix match index {}. match count: {}", + index.getId(), prefixMatchCount); + } maxPrefixMatchCount = prefixMatchCount; rollupsMatchingBestPrefixIndex.clear(); rollupsMatchingBestPrefixIndex.add(index.getId()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java index 3633f2d1fd..199538241b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/ScanNode.java @@ -454,7 +454,9 @@ public abstract class ScanNode extends PlanNode { } } - LOG.debug("partitionColumnFilter: {}", partitionColumnFilter); + if (LOG.isDebugEnabled()) { + LOG.debug("partitionColumnFilter: {}", partitionColumnFilter); + } return partitionColumnFilter; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java index a60648bcda..6c6b665b00 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SelectNode.java @@ -93,7 +93,9 @@ public class SelectNode extends PlanNode { } else { this.cardinality = Math.round(cardinality * selectivity); } - LOG.debug("stats Select: cardinality={}", this.cardinality); + if (LOG.isDebugEnabled()) { + LOG.debug("stats Select: cardinality={}", this.cardinality); + } } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java index 06d234c191..c42d91d690 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java @@ -582,7 +582,9 @@ public class SingleNodePlanner { TableRef olapTableRef = selectStmt.getTableRefs().get(0); if (Expr.isBound(Lists.newArrayList(aggExpr), Lists.newArrayList(olapTableRef.getId()))) { // do nothing - LOG.debug("All agg exprs is bound to olapTable: {}" + olapTableRef.getTable().getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("All agg exprs is bound to olapTable: {}" + olapTableRef.getTable().getName()); + } } else { List tupleIds = Lists.newArrayList(); List slotIds = Lists.newArrayList(); @@ -603,10 +605,12 @@ public class SingleNodePlanner { + selectStmt.getTableRefs().get(0).toSql() + "]"; aggTableValidate = false; } else { - LOG.debug("The table which agg expr [{}] is bound to, is not OLAP table [{}]", - aggExpr.debugString(), - analyzer.getTupleDesc(tupleId).getTable() == null ? "inline view" : - analyzer.getTupleDesc(tupleId).getTable().getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("The table which agg expr [{}] is bound to, is not OLAP table [{}]", + aggExpr.debugString(), + analyzer.getTupleDesc(tupleId).getTable() == null ? "inline view" : + analyzer.getTupleDesc(tupleId).getTable().getName()); + } } } @@ -932,14 +936,18 @@ public class SingleNodePlanner { // use 0 for the size to avoid it becoming the leftmost input // TODO: Consider raw size of scanned partitions in the absence of stats. candidates.add(Pair.of(ref, new Long(0))); - LOG.debug("The candidate of " + ref.getUniqueAlias() + ": -1. " - + "Using 0 instead of -1 to avoid error"); + if (LOG.isDebugEnabled()) { + LOG.debug("The candidate of " + ref.getUniqueAlias() + ": -1. " + + "Using 0 instead of -1 to avoid error"); + } continue; } Preconditions.checkState(ref.isAnalyzed()); long materializedSize = plan.getCardinality(); candidates.add(Pair.of(ref, new Long(materializedSize))); - LOG.debug("The candidate of " + ref.getUniqueAlias() + ": " + materializedSize); + if (LOG.isDebugEnabled()) { + LOG.debug("The candidate of " + ref.getUniqueAlias() + ": " + materializedSize); + } } if (candidates.isEmpty()) { // This branch should not be reached, because the first one should be inner join. @@ -991,7 +999,9 @@ public class SingleNodePlanner { // (ML): change the function name private PlanNode createJoinPlan(Analyzer analyzer, TableRef leftmostRef, List> refPlans) throws UserException { - LOG.debug("Try to create a query plan starting with " + leftmostRef.getUniqueAlias()); + if (LOG.isDebugEnabled()) { + LOG.debug("Try to create a query plan starting with " + leftmostRef.getUniqueAlias()); + } // the refs that have yet to be joined List> remainingRefs = new ArrayList<>(); @@ -1089,7 +1099,9 @@ public class SingleNodePlanner { stringBuilder.append("The " + tblRefOfCandidate.getUniqueAlias() + " is right child of join node."); stringBuilder.append("The join cardinality is " + candidate.getCardinality() + "."); stringBuilder.append("In round " + successfulSelectionTimes); - LOG.debug(stringBuilder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug(stringBuilder.toString()); + } } // Use 'candidate' as the new root; don't consider any other table refs at this @@ -1153,8 +1165,10 @@ public class SingleNodePlanner { ++successfulSelectionTimes; } - LOG.debug("The final join sequence is " - + joinedRefs.stream().map(TableRef::getUniqueAlias).collect(Collectors.joining(","))); + if (LOG.isDebugEnabled()) { + LOG.debug("The final join sequence is " + + joinedRefs.stream().map(TableRef::getUniqueAlias).collect(Collectors.joining(","))); + } return root; } @@ -1170,7 +1184,9 @@ public class SingleNodePlanner { } if (analyzer.enableStarJoinReorder()) { - LOG.debug("use old reorder logical in select stmt"); + if (LOG.isDebugEnabled()) { + LOG.debug("use old reorder logical in select stmt"); + } selectStmt.reorderTable(analyzer); } @@ -1210,7 +1226,9 @@ public class SingleNodePlanner { AggregateInfo aggInfo = selectStmt.getAggInfo(); if (analyzer.safeIsEnableJoinReorderBasedCost()) { - LOG.debug("Using new join reorder strategy when enable_join_reorder_based_cost is true"); + if (LOG.isDebugEnabled()) { + LOG.debug("Using new join reorder strategy when enable_join_reorder_based_cost is true"); + } // create plans for our table refs; use a list here instead of a map to // maintain a deterministic order of traversing the TableRefs during join // plan generation (helps with tests) @@ -2016,14 +2034,18 @@ public class SingleNodePlanner { errMsg.setRef("non-equal " + op.toString() + " is not supported"); LOG.warn(errMsg); } - LOG.debug("no candidates for join."); + if (LOG.isDebugEnabled()) { + LOG.debug("no candidates for join."); + } return; } for (Expr e : candidates) { // Ignore predicate if one of its children is a constant. if (e.getChild(0).isLiteral() || e.getChild(1).isLiteral()) { - LOG.debug("double is constant."); + if (LOG.isDebugEnabled()) { + LOG.debug("double is constant."); + } continue; } @@ -2049,7 +2071,9 @@ public class SingleNodePlanner { } else if (e.getChild(0).isBoundByTupleIds(lhsIds)) { lhsExpr = e.getChild(0); } else { - LOG.debug("not an equi-join condition between lhsIds and rhsId"); + if (LOG.isDebugEnabled()) { + LOG.debug("not an equi-join condition between lhsIds and rhsId"); + } continue; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java index 27d4238536..33a04c5dfa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SortNode.java @@ -225,7 +225,9 @@ public class SortNode extends PlanNode { cardinality = Math.min(cardinality, limit); } } - LOG.debug("stats Sort: cardinality=" + Double.toString(cardinality)); + if (LOG.isDebugEnabled()) { + LOG.debug("stats Sort: cardinality=" + Double.toString(cardinality)); + } } public void init(Analyzer analyzer) throws UserException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java index 346a752860..5364c81868 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java @@ -333,7 +333,6 @@ public class StreamLoadPlanner { params.setQueryGlobals(queryGlobals); params.setTableName(destTable.getName()); - // LOG.debug("stream load txn id: {}, plan: {}", streamLoadTask.getTxnId(), params); return params; } @@ -618,4 +617,3 @@ public class StreamLoadPlanner { return descTable; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalScanNode.java index cbfe318881..ad6699d223 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/ExternalScanNode.java @@ -80,7 +80,9 @@ public abstract class ExternalScanNode extends ScanNode { @Override public List getScanRangeLocations(long maxScanRangeLength) { - LOG.debug("There is {} scanRangeLocations for execution.", scanRangeLocations.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("There is {} scanRangeLocations for execution.", scanRangeLocations.size()); + } return scanRangeLocations; } @@ -94,4 +96,3 @@ public abstract class ExternalScanNode extends ScanNode { return scanRangeLocations.size(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FederationBackendPolicy.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FederationBackendPolicy.java index df0e955ea8..785d76478c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FederationBackendPolicy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FederationBackendPolicy.java @@ -166,7 +166,9 @@ public class FederationBackendPolicy { } } } else { - LOG.debug("user info in ExternalFileScanNode should not be null, add log to observer"); + if (LOG.isDebugEnabled()) { + LOG.debug("user info in ExternalFileScanNode should not be null, add log to observer"); + } } // scan node is used for query @@ -292,8 +294,10 @@ public class FederationBackendPolicy { } } if (candidateNodes.isEmpty()) { - LOG.debug("No nodes available to schedule {}. Available nodes {}", split, - backends); + if (LOG.isDebugEnabled()) { + LOG.debug("No nodes available to schedule {}. Available nodes {}", split, + backends); + } throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG); } @@ -504,4 +508,3 @@ public class FederationBackendPolicy { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileGroupInfo.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileGroupInfo.java index 2b412da825..1293f295b9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileGroupInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileGroupInfo.java @@ -333,5 +333,3 @@ public class FileGroupInfo { return rangeDesc; } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileQueryScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileQueryScanNode.java index a374df5e60..d719bce817 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileQueryScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileQueryScanNode.java @@ -374,9 +374,12 @@ public abstract class FileQueryScanNode extends FileScanNode { location.setBackendId(backend.getId()); location.setServer(new TNetworkAddress(backend.getHost(), backend.getBePort())); curLocations.addToLocations(location); - LOG.debug("assign to backend {} with table split: {} ({}, {}), location: {}", - curLocations.getLocations().get(0).getBackendId(), fileSplit.getPath(), fileSplit.getStart(), - fileSplit.getLength(), Joiner.on("|").join(fileSplit.getHosts())); + if (LOG.isDebugEnabled()) { + LOG.debug("assign to backend {} with table split: {} ({}, {}), location: {}", + curLocations.getLocations().get(0).getBackendId(), fileSplit.getPath(), + fileSplit.getStart(), fileSplit.getLength(), + Joiner.on("|").join(fileSplit.getHosts())); + } scanRangeLocations.add(curLocations); this.totalFileSize += fileSplit.getLength(); } @@ -385,8 +388,10 @@ public abstract class FileQueryScanNode extends FileScanNode { if (ConnectContext.get().getExecutor() != null) { ConnectContext.get().getExecutor().getSummaryProfile().setCreateScanRangeFinishTime(); } - LOG.debug("create #{} ScanRangeLocations cost: {} ms", - scanRangeLocations.size(), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("create #{} ScanRangeLocations cost: {} ms", + scanRangeLocations.size(), (System.currentTimeMillis() - start)); + } } private void setLocationPropertiesIfNecessary(Backend selectedBackend, TFileType locationType, @@ -405,9 +410,11 @@ public abstract class FileQueryScanNode extends FileScanNode { FsBroker broker; if (brokerName != null) { broker = Env.getCurrentEnv().getBrokerMgr().getBroker(brokerName, selectedBackend.getHost()); - LOG.debug(String.format( - "Set location for broker [%s], selected BE host: [%s] selected broker host: [%s]", - brokerName, selectedBackend.getHost(), broker.host)); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format( + "Set location for broker [%s], selected BE host: [%s] selected broker host: [%s]", + brokerName, selectedBackend.getHost(), broker.host)); + } } else { broker = Env.getCurrentEnv().getBrokerMgr().getAnyAliveBroker(); } @@ -516,4 +523,3 @@ public abstract class FileQueryScanNode extends FileScanNode { protected abstract Map getLocationProperties() throws UserException; } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java index fb214df5a6..f18642292d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/FileScanNode.java @@ -226,7 +226,9 @@ public abstract class FileScanNode extends ExternalScanNode { List result = Lists.newArrayList(); TFileCompressType compressType = Util.inferFileCompressTypeByPath(path.toString()); if (!splittable || compressType != TFileCompressType.PLAIN) { - LOG.debug("Path {} is not splittable.", path); + if (LOG.isDebugEnabled()) { + LOG.debug("Path {} is not splittable.", path); + } String[] hosts = blockLocations.length == 0 ? null : blockLocations[0].getHosts(); result.add(splitCreator.create(path, 0, length, length, modificationTime, hosts, partitionValues)); return result; @@ -252,7 +254,9 @@ public abstract class FileScanNode extends ExternalScanNode { length, modificationTime, hosts, partitionValues)); } - LOG.debug("Path {} includes {} splits.", path, result.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Path {} includes {} splits.", path, result.size()); + } return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java index 2dd9331ef2..ac8a4ebd67 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/HiveScanNode.java @@ -148,8 +148,10 @@ public class HiveScanNode extends FileQueryScanNode { hivePartitionValues.getSingleColumnRangeMap(), true); Collection filteredPartitionIds = pruner.prune(); - LOG.debug("hive partition fetch and prune for table {}.{} cost: {} ms", - hmsTable.getDbName(), hmsTable.getName(), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("hive partition fetch and prune for table {}.{} cost: {} ms", + hmsTable.getDbName(), hmsTable.getName(), (System.currentTimeMillis() - start)); + } partitionItems = Lists.newArrayListWithCapacity(filteredPartitionIds.size()); for (Long id : filteredPartitionIds) { partitionItems.add(idToPartitionItem.get(id)); @@ -199,8 +201,11 @@ public class HiveScanNode extends FileQueryScanNode { String bindBrokerName = hmsTable.getCatalog().bindBrokerName(); List allFiles = Lists.newArrayList(); getFileSplitByPartitions(cache, getPartitions(), allFiles, bindBrokerName); - LOG.debug("get #{} files for table: {}.{}, cost: {} ms", - allFiles.size(), hmsTable.getDbName(), hmsTable.getName(), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("get #{} files for table: {}.{}, cost: {} ms", + allFiles.size(), hmsTable.getDbName(), hmsTable.getName(), + (System.currentTimeMillis() - start)); + } return allFiles; } catch (Throwable t) { LOG.warn("get file split failed for table: {}", hmsTable.getName(), t); diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/LoadScanProvider.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/LoadScanProvider.java index 872739313a..c51c0373e4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/LoadScanProvider.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/LoadScanProvider.java @@ -266,4 +266,3 @@ public class LoadScanProvider { return fileGroupInfo.getTargetTable(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/NodeSelectionStrategy.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/NodeSelectionStrategy.java index 4b4d10a424..cda56b27cd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/NodeSelectionStrategy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/NodeSelectionStrategy.java @@ -22,4 +22,3 @@ public enum NodeSelectionStrategy { RANDOM, CONSISTENT_HASHING } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/SplitWeight.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/SplitWeight.java index d8724017b6..31f123de17 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/SplitWeight.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/SplitWeight.java @@ -127,4 +127,3 @@ public final class SplitWeight { return BigDecimal.valueOf(value, -UNIT_SCALE).stripTrailingZeros().toPlainString(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiScanNode.java index 64e11024af..d197635cef 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiScanNode.java @@ -94,9 +94,13 @@ public class HudiScanNode extends HiveScanNode { isCowOrRoTable = hmsTable.isHoodieCowTable() || "skip_merge".equals( hmsTable.getCatalogProperties().get("hoodie.datasource.merge.type")); if (isCowOrRoTable) { - LOG.debug("Hudi table {} can read as cow/read optimize table", hmsTable.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Hudi table {} can read as cow/read optimize table", hmsTable.getName()); + } } else { - LOG.debug("Hudi table {} is a mor table, and will use JNI to read data in BE", hmsTable.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Hudi table {} is a mor table, and will use JNI to read data in BE", hmsTable.getName()); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiSplit.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiSplit.java index 202d24895a..6e7a877a1a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiSplit.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/hudi/HudiSplit.java @@ -43,5 +43,3 @@ public class HudiSplit extends FileSplit { } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergSplit.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergSplit.java index b58514dcf3..67e7e9ad4e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergSplit.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/iceberg/IcebergSplit.java @@ -43,5 +43,3 @@ public class IcebergSplit extends FileSplit { } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/paimon/PaimonValueConverter.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/paimon/PaimonValueConverter.java index 56b6dcdbdb..7c294d64c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/paimon/PaimonValueConverter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/paimon/PaimonValueConverter.java @@ -160,4 +160,3 @@ public class PaimonValueConverter extends DataTypeDefaultVisitor { return null; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/plsql/Column.java b/fe/fe-core/src/main/java/org/apache/doris/plsql/Column.java index 6e49e45598..39bb44eecf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plsql/Column.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plsql/Column.java @@ -66,5 +66,3 @@ public class Column { } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/plsql/Row.java b/fe/fe-core/src/main/java/org/apache/doris/plsql/Row.java index a5de30fbf1..d05e2fe863 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plsql/Row.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plsql/Row.java @@ -100,5 +100,3 @@ public class Row { } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLoaderPlugin.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLoaderPlugin.java index 148fc460d5..d8941605c3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLoaderPlugin.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLoaderPlugin.java @@ -109,7 +109,9 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { try { loadThread.join(); } catch (InterruptedException e) { - LOG.debug("encounter exception when closing the audit loader", e); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when closing the audit loader", e); + } } } } @@ -120,7 +122,9 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { public void exec(AuditEvent event) { if (!GlobalVariable.enableAuditLoader) { - LOG.debug("builtin audit loader is disabled, discard current audit event"); + if (LOG.isDebugEnabled()) { + LOG.debug("builtin audit loader is disabled, discard current audit event"); + } return; } try { @@ -130,8 +134,10 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { // discard the current audit_event. If this problem occurs frequently, // improvement can be considered. ++discardLogNum; - LOG.debug("encounter exception when putting current audit batch, discard current audit event." - + " total discard num: {}", discardLogNum, e); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when putting current audit batch, discard current audit event." + + " total discard num: {}", discardLogNum, e); + } } } @@ -167,7 +173,9 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { String stmt = truncateByBytes(event.stmt).replace("\n", " ") .replace("\t", " ") .replace("\r", " "); - LOG.debug("receive audit event with stmt: {}", stmt); + if (LOG.isDebugEnabled()) { + LOG.debug("receive audit event with stmt: {}", stmt); + } logBuffer.append(stmt).append("\n"); } @@ -204,9 +212,13 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { return; } AuditStreamLoader.LoadResponse response = loader.loadBatch(auditLogBuffer, token); - LOG.debug("audit loader response: {}", response); + if (LOG.isDebugEnabled()) { + LOG.debug("audit loader response: {}", response); + } } catch (Exception e) { - LOG.debug("encounter exception when putting current audit batch, discard current batch", e); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when putting current audit batch, discard current batch", e); + } discardLogNum += auditLogNum; } finally { // make a new string builder to receive following events. @@ -243,7 +255,9 @@ public class AuditLoaderPlugin extends Plugin implements AuditPlugin { loadIfNecessary(loader); } } catch (InterruptedException ie) { - LOG.debug("encounter exception when loading current audit batch", ie); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when loading current audit batch", ie); + } } catch (Exception e) { LOG.error("run audit logger error:", e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLogBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLogBuilder.java index f06dfb6eef..210081b101 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLogBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/audit/AuditLogBuilder.java @@ -90,7 +90,9 @@ public class AuditLogBuilder extends Plugin implements AuditPlugin { break; } } catch (Exception e) { - LOG.debug("failed to process audit event", e); + if (LOG.isDebugEnabled()) { + LOG.debug("failed to process audit event", e); + } } } @@ -160,4 +162,3 @@ public class AuditLogBuilder extends Plugin implements AuditPlugin { AuditLog.getStreamLoadAudit().log(auditLog); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/dialect/HttpDialectUtils.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/dialect/HttpDialectUtils.java index 39c6417988..3cb6244d51 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plugin/dialect/HttpDialectUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/dialect/HttpDialectUtils.java @@ -56,7 +56,9 @@ public class HttpDialectUtils { } int responseCode = connection.getResponseCode(); - LOG.debug("POST Response Code: {}, post data: {}", responseCode, requestStr); + if (LOG.isDebugEnabled()) { + LOG.debug("POST Response Code: {}, post data: {}", responseCode, requestStr); + } if (responseCode == HttpURLConnection.HTTP_OK) { try (InputStreamReader inputStreamReader @@ -72,7 +74,9 @@ public class HttpDialectUtils { Type type = new TypeToken() { }.getType(); ConvertResponse result = new Gson().fromJson(response.toString(), type); - LOG.debug("convert response: {}", result); + if (LOG.isDebugEnabled()) { + LOG.debug("convert response: {}", result); + } if (result.code == 0) { if (!"v1".equals(result.version)) { LOG.warn("failed to convert sql, response version is not v1: {}", result.version); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditEventProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditEventProcessor.java index f9ab35f9c2..ed0d028315 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditEventProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditEventProcessor.java @@ -88,7 +88,9 @@ public class AuditEventProcessor { if (auditPlugins == null || System.currentTimeMillis() - lastUpdateTime > UPDATE_PLUGIN_INTERVAL_MS) { auditPlugins = pluginMgr.getActivePluginList(PluginType.AUDIT); lastUpdateTime = System.currentTimeMillis(); - LOG.debug("update audit plugins. num: {}", auditPlugins.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("update audit plugins. num: {}", auditPlugins.size()); + } } try { @@ -97,7 +99,9 @@ public class AuditEventProcessor { continue; } } catch (InterruptedException e) { - LOG.debug("encounter exception when getting audit event from queue, ignore", e); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when getting audit event from queue, ignore", e); + } continue; } @@ -108,7 +112,9 @@ public class AuditEventProcessor { } } } catch (Exception e) { - LOG.debug("encounter exception when processing audit event.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("encounter exception when processing audit event.", e); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java index 09db53e473..770e18a0fa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java @@ -146,7 +146,8 @@ public class ConnectContext { protected volatile boolean isTempUser = false; // username@host combination for the Doris account // that the server used to authenticate the current client. - // In other word, currentUserIdentity is the entry that matched in Doris auth table. + // In other word, currentUserIdentity is the entry that matched in Doris auth + // table. // This account determines user's access privileges. protected volatile UserIdentity currentUserIdentity; // Variables belong to this session. @@ -179,14 +180,19 @@ public class ConnectContext { // So in the query planning stage, do not use any value in this attribute. protected QueryDetail queryDetail = null; - // If set to true, the nondeterministic function will not be rewrote to constant. + // If set to true, the nondeterministic function will not be rewrote to + // constant. private boolean notEvalNondeterministicFunction = false; - // The resource tag is used to limit the node resources that the user can use for query. + // The resource tag is used to limit the node resources that the user can use + // for query. // The default is empty, that is, unlimited. - // This property is obtained from UserProperty when the client connection is created. - // Only when the connection is created again, the new resource tags will be retrieved from the UserProperty + // This property is obtained from UserProperty when the client connection is + // created. + // Only when the connection is created again, the new resource tags will be + // retrieved from the UserProperty private Set resourceTags = Sets.newHashSet(); - // If set to true, the resource tags set in resourceTags will be used to limit the query resources. + // If set to true, the resource tags set in resourceTags will be used to limit + // the query resources. // If set to false, the system will not restrict query resources. private boolean isResourceTagsSet = false; @@ -203,7 +209,6 @@ public class ConnectContext { private SessionContext sessionContext; - // This context is used for SSL connection between server and mysql client. private final MysqlSslContext mysqlSslContext = new MysqlSslContext(SSL_PROTOCOL); @@ -219,7 +224,8 @@ public class ConnectContext { // internal call like `insert overwrite` need skipAuth // For example, `insert overwrite` only requires load permission, // but the internal implementation will call the logic of `AlterTable`. - // In this case, `skipAuth` needs to be set to `true` to skip the permission check of `AlterTable` + // In this case, `skipAuth` needs to be set to `true` to skip the permission + // check of `AlterTable` private boolean skipAuth = false; private Exec exec; private boolean runProcedure = false; @@ -516,7 +522,8 @@ public class ConnectContext { } } - // Get variable value through variable name, used to satisfy statement like `SELECT @@comment_version` + // Get variable value through variable name, used to satisfy statement like + // `SELECT @@comment_version` public void fillValueForUserDefinedVar(VariableExpr desc) { String varName = desc.getName().toLowerCase(); if (userVars.containsKey(varName)) { @@ -737,7 +744,8 @@ public class ConnectContext { } public CatalogIf getCurrentCatalog() { - // defaultCatalog is switched by SwitchStmt, so we don't need to check to exist of catalog. + // defaultCatalog is switched by SwitchStmt, so we don't need to check to exist + // of catalog. return getCatalog(defaultCatalog); } @@ -957,7 +965,8 @@ public class ConnectContext { */ public int getExecTimeout() { if (executor != null && executor.isSyncLoadKindStmt()) { - // particular for insert stmt, we can expand other type of timeout in the same way + // particular for insert stmt, we can expand other type of timeout in the same + // way return Math.max(sessionVariable.getInsertTimeoutS(), sessionVariable.getQueryTimeoutS()); } else if (executor != null && executor.isAnalyzeStmt()) { return sessionVariable.getAnalyzeTimeoutS(); @@ -1015,7 +1024,6 @@ public class ConnectContext { } } - public void startAcceptQuery(ConnectProcessor connectProcessor) { mysqlChannel.startAcceptQuery(this, connectProcessor); } @@ -1092,4 +1100,3 @@ public class ConnectContext { return this.sessionVariable.getNetWriteTimeout(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java index 2df6033e1f..6fd2abbf5f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java @@ -156,7 +156,9 @@ public abstract class ConnectProcessor { } protected void handleStmtClose(int stmtId) { - LOG.debug("close stmt id: {}", stmtId); + if (LOG.isDebugEnabled()) { + LOG.debug("close stmt id: {}", stmtId); + } ConnectContext.get().removePrepareStmt(String.valueOf(stmtId)); // No response packet is sent back to the client, see // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_stmt_close.html @@ -205,8 +207,10 @@ public abstract class ConnectProcessor { return; } catch (Exception e) { // TODO: We should catch all exception here until we support all query syntax. - LOG.debug("Nereids parse sql failed. Reason: {}. Statement: \"{}\".", - e.getMessage(), convertedStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids parse sql failed. Reason: {}. Statement: \"{}\".", + e.getMessage(), convertedStmt); + } nereidsParseException = e; } } @@ -349,7 +353,9 @@ public abstract class ConnectProcessor { // analyze the origin stmt and return multi-statements protected List parse(String originStmt) throws AnalysisException, DdlException { - LOG.debug("the originStmts are: {}", originStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("the originStmts are: {}", originStmt); + } // Parse statement with parser generated by CUP&FLEX SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode()); SqlParser parser = new SqlParser(input); @@ -359,7 +365,9 @@ public abstract class ConnectProcessor { throw new AnalysisException("Please check your sql, we meet an error when parsing.", e); } catch (AnalysisException | DdlException e) { String errorMessage = parser.getErrorMsg(originStmt); - LOG.debug("origin stmt: {}; Analyze error message: {}", originStmt, parser.getErrorMsg(originStmt), e); + if (LOG.isDebugEnabled()) { + LOG.debug("origin stmt: {}; Analyze error message: {}", originStmt, parser.getErrorMsg(originStmt), e); + } if (errorMessage == null) { throw e; } else { @@ -454,14 +462,18 @@ public abstract class ConnectProcessor { executor.sendResultSet(resultSet); packet = getResultPacket(); if (packet == null) { - LOG.debug("packet == null"); + if (LOG.isDebugEnabled()) { + LOG.debug("packet == null"); + } return; } } } else { packet = getResultPacket(); if (packet == null) { - LOG.debug("packet == null"); + if (LOG.isDebugEnabled()) { + LOG.debug("packet == null"); + } return; } } @@ -613,5 +625,3 @@ public abstract class ConnectProcessor { throw new NotImplementedException("Not Impl processOnce"); } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/CoordInterface.java b/fe/fe-core/src/main/java/org/apache/doris/qe/CoordInterface.java index e57d0d261e..50a6bd6495 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/CoordInterface.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/CoordInterface.java @@ -33,4 +33,3 @@ public interface CoordInterface { // some resource. public default void close() {} } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java index 15165bad15..b4d6b9220c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java @@ -532,8 +532,10 @@ public class Coordinator implements CoordInterface { .append(backend.getBePort()).append("-") .append(backend.getProcessEpoch()); } - LOG.debug("query {}, backend size: {}, {}", - DebugUtil.printId(queryId), backendNum, backendInfos.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("query {}, backend size: {}, {}", + DebugUtil.printId(queryId), backendNum, backendInfos.toString()); + } } } @@ -560,7 +562,9 @@ public class Coordinator implements CoordInterface { entry.getValue().appendTo(sb); } sb.append("]"); - LOG.debug(sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug(sb.toString()); + } } } @@ -974,8 +978,10 @@ public class Coordinator implements CoordInterface { for (PipelineExecContext pec : ctxs.ctxs) { infos += pec.fragmentId + " "; } - LOG.debug("query {}, sending pipeline fragments: {} to be {} bprc address {}", - DebugUtil.printId(queryId), infos, ctxs.beId, ctxs.brpcAddr.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("query {}, sending pipeline fragments: {} to be {} bprc address {}", + DebugUtil.printId(queryId), infos, ctxs.beId, ctxs.brpcAddr.toString()); + } } ctxs.unsetFields(); @@ -1363,7 +1369,9 @@ public class Coordinator implements CoordInterface { Long numLimitRows = fragments.get(0).getPlanRoot().getLimit(); boolean hasLimit = numLimitRows > 0; if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) { - LOG.debug("no block query, return num >= limit rows, need cancel"); + if (LOG.isDebugEnabled()) { + LOG.debug("no block query, return num >= limit rows, need cancel"); + } cancelInternal(Types.PPlanFragmentCancelReason.LIMIT_REACH); } if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().dryRunQuery) { @@ -2082,7 +2090,9 @@ public class Coordinator implements CoordInterface { sharedScan = false; } - LOG.debug("scan range number per instance is: {}", perInstanceScanRanges.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("scan range number per instance is: {}", perInstanceScanRanges.size()); + } for (int j = 0; j < perInstanceScanRanges.size(); j++) { List scanRangeParams = perInstanceScanRanges.get(j); @@ -2460,10 +2470,12 @@ public class Coordinator implements CoordInterface { if (LOG.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); ctx.printProfile(builder); - LOG.debug("profile for query_id={} fragment_id={}\n{}", - DebugUtil.printId(queryId), - params.getFragmentId(), - builder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("profile for query_id={} fragment_id={}\n{}", + DebugUtil.printId(queryId), + params.getFragmentId(), + builder.toString()); + } } Status status = new Status(params.status); @@ -2497,8 +2509,10 @@ public class Coordinator implements CoordInterface { Preconditions.checkArgument(params.isSetDetailedReport()); if (ctx.done) { - LOG.debug("Query {} fragment {} is marked done", - DebugUtil.printId(queryId), ctx.profileFragmentId); + if (LOG.isDebugEnabled()) { + LOG.debug("Query {} fragment {} is marked done", + DebugUtil.printId(queryId), ctx.profileFragmentId); + } executionProfile.markOneFragmentDone(ctx.profileFragmentId); } } else if (enablePipelineEngine) { @@ -2511,10 +2525,12 @@ public class Coordinator implements CoordInterface { if (LOG.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); ctx.printProfile(builder); - LOG.debug("profile for query_id={} instance_id={}\n{}", - DebugUtil.printId(queryId), - DebugUtil.printId(params.getFragmentInstanceId()), - builder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("profile for query_id={} instance_id={}\n{}", + DebugUtil.printId(queryId), + DebugUtil.printId(params.getFragmentInstanceId()), + builder.toString()); + } } Status status = new Status(params.status); @@ -2555,12 +2571,16 @@ public class Coordinator implements CoordInterface { if (params.isSetErrorTabletInfos()) { updateErrorTabletInfos(params.getErrorTabletInfos()); } - LOG.debug("Query {} instance {} is marked done", - DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); + if (LOG.isDebugEnabled()) { + LOG.debug("Query {} instance {} is marked done", + DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); + } executionProfile.markOneInstanceDone(params.getFragmentInstanceId()); } else { - LOG.debug("Query {} instance {} is not marked done", - DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); + if (LOG.isDebugEnabled()) { + LOG.debug("Query {} instance {} is not marked done", + DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); + } } } else { if (params.backend_num >= backendExecStates.size()) { @@ -2578,10 +2598,12 @@ public class Coordinator implements CoordInterface { if (LOG.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); execState.printProfile(builder); - LOG.debug("profile for query_id={} instance_id={}\n{}", - DebugUtil.printId(queryId), - DebugUtil.printId(params.getFragmentInstanceId()), - builder.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("profile for query_id={} instance_id={}\n{}", + DebugUtil.printId(queryId), + DebugUtil.printId(params.getFragmentInstanceId()), + builder.toString()); + } } Status status = new Status(params.status); @@ -4021,5 +4043,3 @@ public class Coordinator implements CoordInterface { } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java index d9b19f7533..286169b036 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObservable.java @@ -38,12 +38,16 @@ public class JournalObservable { } obs.add(o); - LOG.debug("JournalObservable addObserver=[{}], the size is {}", o, obs.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("JournalObservable addObserver=[{}], the size is {}", o, obs.size()); + } } private synchronized void deleteObserver(JournalObserver o) { obs.remove(o); - LOG.debug("JournalObservable deleteObserver=[{}], the size is {}", o, obs.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("JournalObservable deleteObserver=[{}], the size is {}", o, obs.size()); + } } public void waitOn(Long expectedJournalVersion, int timeoutMs) throws DdlException { @@ -87,7 +91,9 @@ public class JournalObservable { } int pos = upperBound(arrLocal, size, journalId); - LOG.debug("notify observers: journal: {}, pos: {}, size: {}, obs: {}", journalId, pos, size, obs); + if (LOG.isDebugEnabled()) { + LOG.debug("notify observers: journal: {}, pos: {}, size: {}, obs: {}", journalId, pos, size, obs); + } for (int i = 0; i < pos; i++) { JournalObserver observer = ((JournalObserver) arrLocal[i]); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObserver.java b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObserver.java index b3e2b1d61c..6519d6cbc8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObserver.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/JournalObserver.java @@ -69,8 +69,10 @@ public class JournalObserver implements Comparable { // check if the replayed journal version is already larger than the expected version long replayedJournalId = Env.getCurrentEnv().getReplayedJournalId(); if (replayedJournalId >= targetJournalVersion || timeoutMs <= 0) { - LOG.debug("the replayed journal version {} already large than expected version: {}", - replayedJournalId, targetJournalVersion); + if (LOG.isDebugEnabled()) { + LOG.debug("the replayed journal version {} already large than expected version: {}", + replayedJournalId, targetJournalVersion); + } return; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java index 448f1b344c..7e9ec55186 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java @@ -76,7 +76,9 @@ public class MysqlConnectProcessor extends ConnectProcessor { } printB.append(" "); } - LOG.debug("debug packet {}", printB.toString().substring(0, 200)); + if (LOG.isDebugEnabled()) { + LOG.debug("debug packet {}", printB.toString().substring(0, 200)); + } } // process COM_EXECUTE, parse binary row data @@ -90,10 +92,14 @@ public class MysqlConnectProcessor extends ConnectProcessor { packetBuf.get(); // iteration_count always 1, packetBuf.getInt(); - LOG.debug("execute prepared statement {}", stmtId); + if (LOG.isDebugEnabled()) { + LOG.debug("execute prepared statement {}", stmtId); + } PrepareStmtContext prepareCtx = ctx.getPreparedStmt(String.valueOf(stmtId)); if (prepareCtx == null) { - LOG.debug("No such statement in context, stmtId:{}", stmtId); + if (LOG.isDebugEnabled()) { + LOG.debug("No such statement in context, stmtId:{}", stmtId); + } ctx.getState().setError(ErrorCode.ERR_UNKNOWN_COM_ERROR, "msg: Not supported such prepared statement"); return; @@ -114,7 +120,9 @@ public class MysqlConnectProcessor extends ConnectProcessor { // parse params's types for (int i = 0; i < paramCount; ++i) { int typeCode = packetBuf.getChar(); - LOG.debug("code {}", typeCode); + if (LOG.isDebugEnabled()) { + LOG.debug("code {}", typeCode); + } prepareCtx.stmt.placeholders().get(i).setTypeCode(typeCode); } } @@ -133,7 +141,9 @@ public class MysqlConnectProcessor extends ConnectProcessor { // TODO set real origin statement executeStmt.setOrigStmt(new OriginStatement("null", 0)); executeStmt.setUserInfo(ctx.getCurrentUserIdentity()); - LOG.debug("executeStmt {}", executeStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("executeStmt {}", executeStmt); + } executor = new StmtExecutor(ctx, executeStmt); ctx.setExecutor(executor); executor.execute(); @@ -175,7 +185,9 @@ public class MysqlConnectProcessor extends ConnectProcessor { LOG.warn("Unknown command(" + code + ")"); return; } - LOG.debug("handle command {}", command); + if (LOG.isDebugEnabled()) { + LOG.debug("handle command {}", command); + } ctx.setCommand(command); ctx.setStartTime(); @@ -263,5 +275,3 @@ public class MysqlConnectProcessor extends ConnectProcessor { } } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/PointQueryExec.java b/fe/fe-core/src/main/java/org/apache/doris/qe/PointQueryExec.java index 06308fd634..40d244184b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/PointQueryExec.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/PointQueryExec.java @@ -88,7 +88,9 @@ public class PointQueryExec implements CoordInterface { private OlapScanNode getPlanRoot() { List fragments = planner.getFragments(); PlanFragment fragment = fragments.get(0); - LOG.debug("execPointGet fragment {}", fragment); + if (LOG.isDebugEnabled()) { + LOG.debug("execPointGet fragment {}", fragment); + } OlapScanNode planRoot = (OlapScanNode) fragment.getPlanRoot(); Preconditions.checkNotNull(planRoot); return planRoot; @@ -137,7 +139,9 @@ public class PointQueryExec implements CoordInterface { } // Random read replicas Collections.shuffle(this.candidateBackends); - LOG.debug("set scan locations, backend ids {}, tablet id {}", candidateBackends, tabletID); + if (LOG.isDebugEnabled()) { + LOG.debug("set scan locations, backend ids {}, tablet id {}", candidateBackends, tabletID); + } } public void setTimeout(long timeoutMs) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java index 97a0a95d24..55536639bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java @@ -199,7 +199,9 @@ public final class QeProcessorImpl implements QeProcessor { LOG.info("ReportExecStatus(): fragment_instance_id={}, query id={}, backend num: {}, ip: {}", DebugUtil.printId(params.fragment_instance_id), DebugUtil.printId(params.query_id), params.backend_num, beAddr); - LOG.debug("params: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("params: {}", params); + } } final TReportExecStatusResult result = new TReportExecStatusResult(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java index 3e99b22c39..4c8a500682 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java @@ -3053,7 +3053,9 @@ public class SessionVariable implements Serializable, Writable { continue; } - LOG.debug("set forward variable: {} = {}", varAttr.name(), val); + if (LOG.isDebugEnabled()) { + LOG.debug("set forward variable: {} = {}", varAttr.name(), val); + } // set config field switch (f.getType().getSimpleName()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index 84657e0d9a..aa1f8d3acc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -1377,7 +1377,9 @@ public class ShowExecutor { throw new AnalysisException("job is not exist."); } } else { - LOG.debug("load_job_id={}", jobId); + if (LOG.isDebugEnabled()) { + LOG.debug("load_job_id={}", jobId); + } jobId = showWarningsStmt.getJobId(); job = load.getLoadJob(jobId); if (job == null) { @@ -3040,4 +3042,3 @@ public class ShowExecutor { } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java index 9898f15aa6..0cafcae58c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SimpleScheduler.java @@ -65,7 +65,9 @@ public class SimpleScheduler { if (CollectionUtils.isEmpty(locations) || backends == null || backends.isEmpty()) { throw new UserException(SystemInfoService.NO_SCAN_NODE_BACKEND_AVAILABLE_MSG); } - LOG.debug("getHost backendID={}, backendSize={}", backendId, backends.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("getHost backendID={}, backendSize={}", backendId, backends.size()); + } Backend backend = backends.get(backendId); if (isAvailable(backend)) { @@ -198,7 +200,9 @@ public class SimpleScheduler { @Override public void run() { - LOG.debug("UpdateBlacklistThread is start to run"); + if (LOG.isDebugEnabled()) { + LOG.debug("UpdateBlacklistThread is start to run"); + } while (true) { try { Thread.sleep(1000L); @@ -220,8 +224,10 @@ public class SimpleScheduler { iterator.remove(); LOG.warn("remove backend {} from black list. reach max try time", backendId); } else { - LOG.debug("blacklistBackends backendID={} retryTimes={}", - backendId, entry.getValue().first); + if (LOG.isDebugEnabled()) { + LOG.debug("blacklistBackends backendID={} retryTimes={}", + backendId, entry.getValue().first); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index 5c89a5c059..f8d8a1fda2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -494,8 +494,10 @@ public class StmtExecutor { MinidumpUtils.saveMinidumpString(context.getMinidump(), DebugUtil.printId(context.queryId())); } // try to fall back to legacy planner - LOG.debug("nereids cannot process statement\n" + originStmt.originStmt - + "\n because of " + e.getMessage(), e); + if (LOG.isDebugEnabled()) { + LOG.debug("nereids cannot process statement\n" + originStmt.originStmt + + "\n because of " + e.getMessage(), e); + } if (notAllowFallback()) { LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); throw ((NereidsException) e).getException(); @@ -507,7 +509,9 @@ public class StmtExecutor { LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); throw ((NereidsException) e).getException(); } - LOG.debug("fall back to legacy planner on statement:\n{}", originStmt.originStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("fall back to legacy planner on statement:\n{}", originStmt.originStmt); + } parsedStmt = null; planner = null; // Attention: currently exception from nereids does not mean an Exception to user terminal @@ -566,7 +570,9 @@ public class StmtExecutor { } private void executeByNereids(TUniqueId queryId) throws Exception { - LOG.debug("Nereids start to execute query:\n {}", originStmt.originStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids start to execute query:\n {}", originStmt.originStmt); + } context.setQueryId(queryId); context.setStartTime(); profile.getSummaryProfile().setQueryBeginTime(); @@ -609,19 +615,25 @@ public class StmtExecutor { try { ((Command) logicalPlan).run(context, this); } catch (QueryStateException e) { - LOG.debug("Command(" + originStmt.originStmt + ") process failed.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Command(" + originStmt.originStmt + ") process failed.", e); + } context.setState(e.getQueryState()); throw new NereidsException("Command(" + originStmt.originStmt + ") process failed", new AnalysisException(e.getMessage(), e)); } catch (UserException e) { // Return message to info client what happened. - LOG.debug("Command(" + originStmt.originStmt + ") process failed.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Command(" + originStmt.originStmt + ") process failed.", e); + } context.getState().setError(e.getMysqlErrorCode(), e.getMessage()); throw new NereidsException("Command (" + originStmt.originStmt + ") process failed", new AnalysisException(e.getMessage(), e)); } catch (Exception e) { // Maybe our bug - LOG.debug("Command (" + originStmt.originStmt + ") process failed.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Command (" + originStmt.originStmt + ") process failed.", e); + } context.getState().setError(ErrorCode.ERR_UNKNOWN_ERROR, e.getMessage()); throw new NereidsException("Command (" + originStmt.originStmt + ") process failed.", new AnalysisException(e.getMessage(), e)); @@ -637,7 +649,9 @@ public class StmtExecutor { planner.plan(parsedStmt, context.getSessionVariable().toThrift()); checkBlockRules(); } catch (Exception e) { - LOG.debug("Nereids plan query failed:\n{}", originStmt.originStmt); + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids plan query failed:\n{}", originStmt.originStmt); + } throw new NereidsException(new AnalysisException(e.getMessage(), e)); } profile.getSummaryProfile().setQueryPlanFinishTime(); @@ -758,7 +772,9 @@ public class StmtExecutor { } return; } else { - LOG.debug("no need to transfer to Master. stmt: {}", context.getStmtId()); + if (LOG.isDebugEnabled()) { + LOG.debug("no need to transfer to Master. stmt: {}", context.getStmtId()); + } } } else { analyzer = new Analyzer(context.getEnv(), context); @@ -928,7 +944,9 @@ public class StmtExecutor { private void forwardToMaster() throws Exception { masterOpExecutor = new MasterOpExecutor(originStmt, context, redirectStatus, isQuery()); - LOG.debug("need to transfer to Master. stmt: {}", context.getStmtId()); + if (LOG.isDebugEnabled()) { + LOG.debug("need to transfer to Master. stmt: {}", context.getStmtId()); + } masterOpExecutor.execute(); if (parsedStmt instanceof SetStmt) { SetStmt setStmt = (SetStmt) parsedStmt; @@ -978,7 +996,9 @@ public class StmtExecutor { planner = preparedStmtCtx.planner; analyzer = preparedStmtCtx.analyzer; prepareStmt = preparedStmtCtx.stmt; - LOG.debug("already prepared stmt: {}", preparedStmtCtx.stmtString); + if (LOG.isDebugEnabled()) { + LOG.debug("already prepared stmt: {}", preparedStmtCtx.stmtString); + } isExecuteStmt = true; if (!preparedStmtCtx.stmt.needReAnalyze()) { // Return directly to bypass analyze and plan @@ -1100,7 +1120,9 @@ public class StmtExecutor { } } if (preparedStmtReanalyzed) { - LOG.debug("update planner and analyzer after prepared statement reanalyzed"); + if (LOG.isDebugEnabled()) { + LOG.debug("update planner and analyzer after prepared statement reanalyzed"); + } preparedStmtCtx.planner = planner; preparedStmtCtx.analyzer = analyzer; Preconditions.checkNotNull(preparedStmtCtx.stmt); @@ -1164,7 +1186,9 @@ public class StmtExecutor { parsedStmt.analyze(analyzer); if (parsedStmt instanceof QueryStmt || parsedStmt instanceof InsertStmt) { if (parsedStmt instanceof NativeInsertStmt && ((NativeInsertStmt) parsedStmt).isGroupCommit()) { - LOG.debug("skip generate query plan for group commit insert"); + if (LOG.isDebugEnabled()) { + LOG.debug("skip generate query plan for group commit insert"); + } return; } ExprRewriter rewriter = analyzer.getExprRewriter(); @@ -1545,10 +1569,12 @@ public class StmtExecutor { updateProfile(false); if (coordBase.getInstanceTotalNum() > 1 && LOG.isDebugEnabled()) { try { - LOG.debug("Start to execute fragment. user: {}, db: {}, sql: {}, fragment instance num: {}", - context.getQualifiedUser(), context.getDatabase(), - parsedStmt.getOrigStmt().originStmt.replace("\n", " "), - coordBase.getInstanceTotalNum()); + if (LOG.isDebugEnabled()) { + LOG.debug("Start to execute fragment. user: {}, db: {}, sql: {}, fragment instance num: {}", + context.getQualifiedUser(), context.getDatabase(), + parsedStmt.getOrigStmt().originStmt.replace("\n", " "), + coordBase.getInstanceTotalNum()); + } } catch (Exception e) { LOG.warn("Fail to print fragment concurrency for Query.", e); } @@ -1559,10 +1585,13 @@ public class StmtExecutor { profile.getSummaryProfile().setTempStartTime(); if (coordBase.getInstanceTotalNum() > 1 && LOG.isDebugEnabled()) { try { - LOG.debug("Finish to execute fragment. user: {}, db: {}, sql: {}, fragment instance num: {}", - context.getQualifiedUser(), context.getDatabase(), - parsedStmt.getOrigStmt().originStmt.replace("\n", " "), - coordBase.getInstanceTotalNum()); + if (LOG.isDebugEnabled()) { + LOG.debug("Finish to execute fragment. user: {}, db: {}, sql: {}, " + + "fragment instance num: {}", + context.getQualifiedUser(), context.getDatabase(), + parsedStmt.getOrigStmt().originStmt.replace("\n", " "), + coordBase.getInstanceTotalNum()); + } } catch (Exception e) { LOG.warn("Fail to print fragment concurrency for Query.", e); } @@ -1653,10 +1682,12 @@ public class StmtExecutor { coordBase.close(); if (coordBase.getInstanceTotalNum() > 1 && LOG.isDebugEnabled()) { try { - LOG.debug("Finish to execute fragment. user: {}, db: {}, sql: {}, fragment instance num: {}", - context.getQualifiedUser(), context.getDatabase(), - parsedStmt.getOrigStmt().originStmt.replace("\n", " "), - coordBase.getInstanceTotalNum()); + if (LOG.isDebugEnabled()) { + LOG.debug("Finish to execute fragment. user: {}, db: {}, sql: {}, fragment instance num: {}", + context.getQualifiedUser(), context.getDatabase(), + parsedStmt.getOrigStmt().originStmt.replace("\n", " "), + coordBase.getInstanceTotalNum()); + } } catch (Exception e) { LOG.warn("Fail to print fragment concurrency for Query.", e); } @@ -2018,7 +2049,9 @@ public class StmtExecutor { } coord.exec(); int execTimeout = context.getExecTimeout(); - LOG.debug("Insert {} execution timeout:{}", DebugUtil.printId(context.queryId()), execTimeout); + if (LOG.isDebugEnabled()) { + LOG.debug("Insert {} execution timeout:{}", DebugUtil.printId(context.queryId()), execTimeout); + } boolean notTimeout = coord.join(execTimeout); if (!coord.isDone()) { coord.cancel(Types.PPlanFragmentCancelReason.TIMEOUT); @@ -2037,7 +2070,9 @@ public class StmtExecutor { ErrorReport.reportDdlException(errMsg, ErrorCode.ERR_FAILED_WHEN_INSERT); } - LOG.debug("delta files is {}", coord.getDeltaUrls()); + if (LOG.isDebugEnabled()) { + LOG.debug("delta files is {}", coord.getDeltaUrls()); + } if (coord.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL) != null) { loadedRows = Long.parseLong(coord.getLoadCounters().get(LoadEtlTask.DPP_NORMAL_ALL)); @@ -2170,7 +2205,9 @@ public class StmtExecutor { } } catch (UserException e) { // Return message to info client what happened. - LOG.debug("DDL statement({}) process failed.", originStmt.originStmt, e); + if (LOG.isDebugEnabled()) { + LOG.debug("DDL statement({}) process failed.", originStmt.originStmt, e); + } context.getState().setError(e.getMysqlErrorCode(), e.getMessage()); } catch (Exception e) { // Maybe our bug @@ -2206,8 +2243,10 @@ public class StmtExecutor { private void handlePrepareStmt() throws Exception { // register prepareStmt - LOG.debug("add prepared statement {}, isBinaryProtocol {}", - prepareStmt.getName(), prepareStmt.isBinaryProtocol()); + if (LOG.isDebugEnabled()) { + LOG.debug("add prepared statement {}, isBinaryProtocol {}", + prepareStmt.getName(), prepareStmt.isBinaryProtocol()); + } context.addPreparedStmt(prepareStmt.getName(), new PrepareStmtContext(prepareStmt, context, planner, analyzer, prepareStmt.getName())); @@ -2280,7 +2319,9 @@ public class StmtExecutor { // but for other client, type should be correct List types = exprToStringType(prepareStmt.getPlaceHolderExprList()); List colNames = prepareStmt.getColLabelsOfPlaceHolders(); - LOG.debug("sendFields {}, {}", colNames, types); + if (LOG.isDebugEnabled()) { + LOG.debug("sendFields {}, {}", colNames, types); + } for (int i = 0; i < colNames.size(); ++i) { serializer.reset(); serializer.writeField(colNames.get(i), Type.fromPrimitiveType(types.get(i))); @@ -2300,7 +2341,9 @@ public class StmtExecutor { // sends how many columns serializer.reset(); serializer.writeVInt(colNames.size()); - LOG.debug("sendFields {}", colNames); + if (LOG.isDebugEnabled()) { + LOG.debug("sendFields {}", colNames); + } context.getMysqlChannel().sendOnePacket(serializer.toByteBuffer()); // send field one by one for (int i = 0; i < colNames.size(); ++i) { @@ -2441,7 +2484,9 @@ public class StmtExecutor { } } catch (UserException e) { // Return message to info client what happened. - LOG.debug("DDL statement({}) process failed.", originStmt.originStmt, e); + if (LOG.isDebugEnabled()) { + LOG.debug("DDL statement({}) process failed.", originStmt.originStmt, e); + } context.getState().setError(e.getMysqlErrorCode(), e.getMessage()); } catch (Exception e) { // Maybe our bug @@ -2755,7 +2800,9 @@ public class StmtExecutor { } public List executeInternalQuery() { - LOG.debug("INTERNAL QUERY: " + originStmt.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("INTERNAL QUERY: " + originStmt.toString()); + } UUID uuid = UUID.randomUUID(); TUniqueId queryId = new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); context.setQueryId(queryId); @@ -2910,5 +2957,3 @@ public class StmtExecutor { return true; } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java index 84dfcef543..13fe89acbb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java @@ -303,7 +303,9 @@ public class VariableMgr { try { checkUpdate(setVar, varCtx.getFlag()); } catch (DdlException e) { - LOG.debug("no need to set var for non master fe: {}", setVar.getVariable(), e); + if (LOG.isDebugEnabled()) { + LOG.debug("no need to set var for non master fe: {}", setVar.getVariable(), e); + } return; } setVarInternal(sessionVariable, setVar, varCtx); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java index 815ca6b0d8..73cafad3ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/Cache.java @@ -86,14 +86,18 @@ public abstract class Cache { return false; } if (rowBatchBuilder.getRowSize() > Config.cache_result_max_row_count) { - LOG.debug("can not be cached. rowbatch size {} is more than {}", rowBatchBuilder.getRowSize(), - Config.cache_result_max_row_count); + if (LOG.isDebugEnabled()) { + LOG.debug("can not be cached. rowbatch size {} is more than {}", rowBatchBuilder.getRowSize(), + Config.cache_result_max_row_count); + } rowBatchBuilder.clear(); disableCache = true; return false; } else if (rowBatchBuilder.getDataSize() > Config.cache_result_max_data_size) { - LOG.debug("can not be cached. rowbatch data size {} is more than {}", rowBatchBuilder.getDataSize(), - Config.cache_result_max_data_size); + if (LOG.isDebugEnabled()) { + LOG.debug("can not be cached. rowbatch data size {} is more than {}", rowBatchBuilder.getDataSize(), + Config.cache_result_max_data_size); + } rowBatchBuilder.clear(); disableCache = true; return false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java index 1527cdbeac..30c90d0ae3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java @@ -161,8 +161,10 @@ public class CacheAnalyzer { } public void debug() { - LOG.debug("table {}, partition id {}, ver {}, time {}, partition num {}, sumOfPartitionNum: {}", - table.getName(), latestPartitionId, latestVersion, latestTime, partitionNum, sumOfPartitionNum); + if (LOG.isDebugEnabled()) { + LOG.debug("table {}, partition id {}, ver {}, time {}, partition num {}, sumOfPartitionNum: {}", + table.getName(), latestPartitionId, latestVersion, latestTime, partitionNum, sumOfPartitionNum); + } } } @@ -202,11 +204,15 @@ public class CacheAnalyzer { private CacheMode innerCheckCacheMode(long now) { if (!enableCache()) { - LOG.debug("cache is disabled. queryid {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("cache is disabled. queryid {}", DebugUtil.printId(queryId)); + } return CacheMode.NoNeed; } if (!(parsedStmt instanceof SelectStmt) || scanNodes.size() == 0) { - LOG.debug("not a select stmt or no scan node. queryid {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("not a select stmt or no scan node. queryid {}", DebugUtil.printId(queryId)); + } return CacheMode.NoNeed; } this.selectStmt = (SelectStmt) parsedStmt; @@ -239,11 +245,15 @@ public class CacheAnalyzer { // TODO:wxy support partition cache for hive table later if (!(latestTable.table instanceof OlapTable)) { - LOG.debug("only support partition cache for olap table now. queryid {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("only support partition cache for olap table now. queryid {}", DebugUtil.printId(queryId)); + } return CacheMode.None; } if (!enablePartitionCache()) { - LOG.debug("partition query cache is disabled. queryid {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("partition query cache is disabled. queryid {}", DebugUtil.printId(queryId)); + } return CacheMode.None; } @@ -251,37 +261,47 @@ public class CacheAnalyzer { //Only one table can be updated in Config.cache_last_version_interval_second range for (int i = 1; i < tblTimeList.size(); i++) { if ((now - tblTimeList.get(i).latestTime) < Config.cache_last_version_interval_second * 1000L) { - LOG.debug("the time of other tables is newer than {} s, queryid {}", - Config.cache_last_version_interval_second, DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("the time of other tables is newer than {} s, queryid {}", + Config.cache_last_version_interval_second, DebugUtil.printId(queryId)); + } return CacheMode.None; } } OlapTable olapTable = (OlapTable) latestTable.table; if (olapTable.getPartitionInfo().getType() != PartitionType.RANGE) { - LOG.debug("the partition of OlapTable not RANGE type, queryid {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("the partition of OlapTable not RANGE type, queryid {}", DebugUtil.printId(queryId)); + } return CacheMode.None; } partitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo(); List columns = partitionInfo.getPartitionColumns(); //Partition key has only one column if (columns.size() != 1) { - LOG.debug("more than one partition column {}, queryid {}", columns.size(), - DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("more than one partition column {}, queryid {}", columns.size(), + DebugUtil.printId(queryId)); + } return CacheMode.None; } partColumn = columns.get(0); //Check if group expr contain partition column if (!checkGroupByPartitionKey(this.selectStmt, partColumn)) { - LOG.debug("group by columns does not contains all partition column, queryid {}", - DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("group by columns does not contains all partition column, queryid {}", + DebugUtil.printId(queryId)); + } return CacheMode.None; } //Check if whereClause have one CompoundPredicate of partition column List compoundPredicates = Lists.newArrayList(); getPartitionKeyFromSelectStmt(this.selectStmt, partColumn, compoundPredicates); if (compoundPredicates.size() != 1) { - LOG.debug("empty or more than one predicates contain partition column, queryid {}", - DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("empty or more than one predicates contain partition column, queryid {}", + DebugUtil.printId(queryId)); + } return CacheMode.None; } partitionPredicate = compoundPredicates.get(0); @@ -399,8 +419,10 @@ public class CacheAnalyzer { } if (!(olapScanNodeSize == scanNodes.size() || hiveScanNodeSize == scanNodes.size())) { - LOG.debug("only support olap/hive table with non-federated query, other types are not supported now, " - + "queryId {}", DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("only support olap/hive table with non-federated query, other types are not supported now, " + + "queryId {}", DebugUtil.printId(queryId)); + } return Collections.emptyList(); } @@ -412,8 +434,11 @@ public class CacheAnalyzer { && ((OlapScanNode) node).getSelectedPartitionNum() > 1 && selectStmt != null && selectStmt.hasGroupByClause()) { - LOG.debug("more than one partition scanned when qeury has agg, partition cache cannot use, queryid {}", - DebugUtil.printId(queryId)); + if (LOG.isDebugEnabled()) { + LOG.debug("more than one partition scanned when qeury has agg, " + + "partition cache cannot use, queryid {}", + DebugUtil.printId(queryId)); + } return Collections.emptyList(); } CacheTable cTable = node instanceof OlapScanNode @@ -451,13 +476,17 @@ public class CacheAnalyzer { rowCount += value.getRowsCount(); dataSize += value.getDataSize(); } - LOG.debug("hit cache, mode {}, queryid {}, all count {}, value count {}, row count {}, data size {}", - cacheMode, DebugUtil.printId(queryId), - cacheResult.getAllCount(), cacheResult.getValuesCount(), - rowCount, dataSize); + if (LOG.isDebugEnabled()) { + LOG.debug("hit cache, mode {}, queryid {}, all count {}, value count {}, row count {}, data size {}", + cacheMode, DebugUtil.printId(queryId), + cacheResult.getAllCount(), cacheResult.getValuesCount(), + rowCount, dataSize); + } } else { - LOG.debug("miss cache, mode {}, queryid {}, code {}, msg {}", cacheMode, - DebugUtil.printId(queryId), status.getErrorCode(), status.getErrorMsg()); + if (LOG.isDebugEnabled()) { + LOG.debug("miss cache, mode {}, queryid {}, code {}, msg {}", cacheMode, + DebugUtil.printId(queryId), status.getErrorCode(), status.getErrorMsg()); + } cacheResult = null; } return cacheResult; @@ -667,4 +696,3 @@ public class CacheAnalyzer { cache.updateCache(); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java index 0d77b4d073..232a9400e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheCoordinator.java @@ -86,7 +86,9 @@ public class CacheCoordinator { if (SimpleScheduler.isAvailable(virtualNode)) { break; } else { - LOG.debug("backend {} not alive, key {}, retry {}", virtualNode.getId(), key, retryTimes); + if (LOG.isDebugEnabled()) { + LOG.debug("backend {} not alive, key {}, retry {}", virtualNode.getId(), key, retryTimes); + } virtualNode = null; } tailMap = tailMap.tailMap(key + 1); @@ -133,7 +135,10 @@ public class CacheCoordinator { String nodeName = String.valueOf(bid) + "::" + String.valueOf(i); Types.PUniqueId nodeId = CacheBeProxy.getMd5(nodeName); virtualNodes.remove(nodeId.getHi()); - LOG.debug("remove backend id {}, virtual node name {} hashcode {}", bid, nodeName, nodeId.getHi()); + if (LOG.isDebugEnabled()) { + LOG.debug("remove backend id {}, virtual node name {} hashcode {}", + bid, nodeName, nodeId.getHi()); + } } itr.remove(); } @@ -149,7 +154,10 @@ public class CacheCoordinator { String nodeName = String.valueOf(backend.getId()) + "::" + String.valueOf(i); Types.PUniqueId nodeId = CacheBeProxy.getMd5(nodeName); virtualNodes.put(nodeId.getHi(), backend); - LOG.debug("add backend id {}, virtual node name {} hashcode {}", backend.getId(), nodeName, nodeId.getHi()); + if (LOG.isDebugEnabled()) { + LOG.debug("add backend id {}, virtual node name {} hashcode {}", + backend.getId(), nodeName, nodeId.getHi()); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java index 9a0f87167d..446caab2a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/PartitionRange.java @@ -427,7 +427,9 @@ public class PartitionRange { public boolean rewritePredicate(CompoundPredicate predicate, List rangeList) { if (predicate.getOp() != CompoundPredicate.Operator.AND) { - LOG.debug("predicate op {}", predicate.getOp().toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("predicate op {}", predicate.getOp().toString()); + } return false; } for (Expr expr : predicate.getChildren()) { @@ -570,7 +572,9 @@ public class PartitionRange { private PartitionColumnFilter createPartitionFilter(CompoundPredicate partitionKeyPredicate, Column partitionColumn) { if (partitionKeyPredicate.getOp() != CompoundPredicate.Operator.AND) { - LOG.debug("not and op"); + if (LOG.isDebugEnabled()) { + LOG.debug("not and op"); + } return null; } PartitionColumnFilter partitionColumnFilter = new PartitionColumnFilter(); @@ -583,7 +587,9 @@ public class PartitionRange { continue; } if (binPredicate.getOp() == BinaryPredicate.Operator.NE) { - LOG.debug("not support NE operator"); + if (LOG.isDebugEnabled()) { + LOG.debug("not support NE operator"); + } continue; } Expr slotBinding; @@ -592,7 +598,9 @@ public class PartitionRange { } else if (binPredicate.getChild(0) instanceof LiteralExpr) { slotBinding = binPredicate.getChild(0); } else { - LOG.debug("not find LiteralExpr"); + if (LOG.isDebugEnabled()) { + LOG.debug("not find LiteralExpr"); + } continue; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java index 803e80da0a..52736ab221 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/SqlCache.java @@ -51,7 +51,9 @@ public class SqlCache extends Cache { public String getSqlWithViewStmt() { String originSql = selectStmt != null ? selectStmt.toSql() : this.originSql; String cacheKey = originSql + "|" + allViewExpandStmtListStr; - LOG.debug("Cache key: {}", cacheKey); + if (LOG.isDebugEnabled()) { + LOG.debug("Cache key: {}", cacheKey); + } return cacheKey; } @@ -121,4 +123,3 @@ public class SqlCache extends Cache { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java index 91a516b9c5..b998eb877f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java @@ -196,7 +196,9 @@ public class WorkloadGroup implements Writable, GsonPostProcessable { throw new DdlException(memLimitErr); } } catch (NumberFormatException e) { - LOG.debug(memLimitErr, e); + if (LOG.isDebugEnabled()) { + LOG.debug(memLimitErr, e); + } throw new DdlException(memLimitErr); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java index 44c5de4194..333ba6d98f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java @@ -127,7 +127,9 @@ public class WorkloadGroupMgr implements Writable, GsonPostProcessable { currentQueryQueue.resetQueueProperty(newPropQq.getMaxConcurrency(), newPropQq.getMaxQueueSize(), newPropQq.getQueueTimeout(), newPropQq.getPropVersion()); } - LOG.debug(currentQueryQueue.debugString()); // for test debug + if (LOG.isDebugEnabled()) { + LOG.debug(currentQueryQueue.debugString()); // for test debug + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java index dcc6cd4a18..eafccae493 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/rewrite/ExtractCommonFactorsRule.java @@ -243,7 +243,9 @@ public class ExtractCommonFactorsRule implements ExprRewriteRule { range = range.intersection(predicateRange); } catch (IllegalArgumentException | ClassCastException e) { // (a >1 and a < 0) ignore this OR clause - LOG.debug("The range without intersection", e); + if (LOG.isDebugEnabled()) { + LOG.debug("The range without intersection", e); + } continue OUT_CONJUNCTS; } } @@ -353,7 +355,9 @@ public class ExtractCommonFactorsRule implements ExprRewriteRule { clause1Entry.getValue().add(clause2Value); } catch (ClassCastException e) { // ignore a >1.0 or a = arrayMaxSize || column >= arrayMaxSize) { - LOG.debug("Error row {} or column {}, but max size is {}.", row, column, arrayMaxSize); + if (LOG.isDebugEnabled()) { + LOG.debug("Error row {} or column {}, but max size is {}.", row, column, arrayMaxSize); + } needGenWarshallArray = false; break; } else { diff --git a/fe/fe-core/src/main/java/org/apache/doris/scheduler/executor/TransientTaskExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/scheduler/executor/TransientTaskExecutor.java index f4205a6f4b..5eeb24007d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/scheduler/executor/TransientTaskExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/scheduler/executor/TransientTaskExecutor.java @@ -37,4 +37,3 @@ public interface TransientTaskExecutor { Long getId(); } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendOptions.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendOptions.java index f94e090709..35b6d5e01e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendOptions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendOptions.java @@ -68,7 +68,9 @@ public class FrontendOptions { boolean hasMatchedIp = false; if (!priorityCidrs.isEmpty()) { for (InetAddress addr : hosts) { - LOG.debug("check ip address: {}", addr); + if (LOG.isDebugEnabled()) { + LOG.debug("check ip address: {}", addr); + } if (isInPriorNetwork(addr.getHostAddress())) { localAddr = addr; hasMatchedIp = true; @@ -107,7 +109,9 @@ public class FrontendOptions { try { fqdnString = InetAddress.getLocalHost().getCanonicalHostName(); String ip = InetAddress.getLocalHost().getHostAddress(); - LOG.debug("ip is {}", ip); + if (LOG.isDebugEnabled()) { + LOG.debug("ip is {}", ip); + } } catch (UnknownHostException e) { LOG.error("Got a UnknownHostException when try to get FQDN"); System.exit(-1); @@ -141,10 +145,14 @@ public class FrontendOptions { // Check the InetAddress obtained via FQDN boolean hasInetAddr = false; - LOG.debug("fqdnString is {}", fqdnString); + if (LOG.isDebugEnabled()) { + LOG.debug("fqdnString is {}", fqdnString); + } for (InetAddress addr : hosts) { - LOG.debug("Try to match addr, ip: {}, FQDN: {}", - addr.getHostAddress(), addr.getCanonicalHostName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Try to match addr, ip: {}, FQDN: {}", + addr.getHostAddress(), addr.getCanonicalHostName()); + } if (addr.getCanonicalHostName().equals(uncheckedInetAddress.getCanonicalHostName())) { hasInetAddr = true; break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index 53a32dd8ad..9f33035a24 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -379,7 +379,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TGetDbsResult getDbNames(TGetDbsParams params) throws TException { - LOG.debug("get db request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get db request: {}", params); + } TGetDbsResult result = new TGetDbsResult(); List dbNames = Lists.newArrayList(); @@ -416,7 +418,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { LOG.warn("failed to get database names for catalog {}", catalog.getName(), e); // Some external catalog may fail to get databases due to wrong connection info. } - LOG.debug("get db size: {}, in catalog: {}", dbs.size(), catalog.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("get db size: {}, in catalog: {}", dbs.size(), catalog.getName()); + } if (dbs.isEmpty() && params.isSetGetNullCatalog() && params.get_null_catalog) { catalogNames.add(catalog.getName()); dbNames.add("NULL"); @@ -481,7 +485,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @LogException @Override public TGetTablesResult getTableNames(TGetTablesParams params) throws TException { - LOG.debug("get table name request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get table name request: {}", params); + } TGetTablesResult result = new TGetTablesResult(); List tablesResult = Lists.newArrayList(); result.setTables(tablesResult); @@ -514,7 +520,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { try { tableNames = db.getTableNamesOrEmptyWithLock(); for (String tableName : tableNames) { - LOG.debug("get table: {}, wait to check", tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("get table: {}, wait to check", tableName); + } if (!Env.getCurrentEnv().getAccessManager() .checkTblPriv(currentUser, dbName, tableName, PrivPredicate.SHOW)) { continue; @@ -533,7 +541,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TListTableStatusResult listTableStatus(TGetTablesParams params) throws TException { - LOG.debug("get list table request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get list table request: {}", params); + } TListTableStatusResult result = new TListTableStatusResult(); List tablesResult = Lists.newArrayList(); result.setTables(tablesResult); @@ -615,7 +625,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { } public TListTableMetadataNameIdsResult listTableMetadataNameIds(TGetTablesParams params) throws TException { - LOG.debug("get list simple table request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get list simple table request: {}", params); + } TListTableMetadataNameIdsResult result = new TListTableMetadataNameIdsResult(); List tablesResult = Lists.newArrayList(); @@ -697,7 +709,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TListPrivilegesResult listTablePrivilegeStatus(TGetTablesParams params) throws TException { - LOG.debug("get list table privileges request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get list table privileges request: {}", params); + } TListPrivilegesResult result = new TListPrivilegesResult(); List tblPrivResult = Lists.newArrayList(); result.setPrivileges(tblPrivResult); @@ -714,7 +728,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TListPrivilegesResult listSchemaPrivilegeStatus(TGetTablesParams params) throws TException { - LOG.debug("get list schema privileges request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get list schema privileges request: {}", params); + } TListPrivilegesResult result = new TListPrivilegesResult(); List tblPrivResult = Lists.newArrayList(); result.setPrivileges(tblPrivResult); @@ -731,7 +747,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TListPrivilegesResult listUserPrivilegeStatus(TGetTablesParams params) throws TException { - LOG.debug("get list user privileges request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get list user privileges request: {}", params); + } TListPrivilegesResult result = new TListPrivilegesResult(); List userPrivResult = Lists.newArrayList(); result.setPrivileges(userPrivResult); @@ -755,7 +773,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TDescribeTableResult describeTable(TDescribeTableParams params) throws TException { - LOG.debug("get desc table request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get desc table request: {}", params); + } TDescribeTableResult result = new TDescribeTableResult(); List columns = Lists.newArrayList(); result.setColumns(columns); @@ -821,7 +841,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TDescribeTablesResult describeTables(TDescribeTablesParams params) throws TException { - LOG.debug("get desc tables request: {}", params); + if (LOG.isDebugEnabled()) { + LOG.debug("get desc tables request: {}", params); + } TDescribeTablesResult result = new TDescribeTablesResult(); List columns = Lists.newArrayList(); List tablesOffset = Lists.newArrayList(); @@ -960,7 +982,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { } // add this log so that we can track this stmt - LOG.debug("receive forwarded stmt {} from FE: {}", params.getStmtId(), params.getClientNodeHost()); + if (LOG.isDebugEnabled()) { + LOG.debug("receive forwarded stmt {} from FE: {}", params.getStmtId(), params.getClientNodeHost()); + } ConnectContext context = new ConnectContext(null, true); // Set current connected FE to the client address, so that we can know where this request come from. context.setCurrentConnectedFEIp(params.getClientNodeHost()); @@ -1056,7 +1080,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TLoadTxnBeginResult loadTxnBegin(TLoadTxnBeginRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn begin request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn begin request: {}, backend: {}", request, clientAddr); + } TLoadTxnBeginResult result = new TLoadTxnBeginResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1141,7 +1167,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TBeginTxnResult beginTxn(TBeginTxnRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn begin request: {}, client: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn begin request: {}, client: {}", request, clientAddr); + } TBeginTxnResult result = new TBeginTxnResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1246,7 +1274,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TLoadTxnCommitResult loadTxnPreCommit(TLoadTxnCommitRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn pre-commit request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn pre-commit request: {}, backend: {}", request, clientAddr); + } TLoadTxnCommitResult result = new TLoadTxnCommitResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1296,7 +1326,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { List multiTableIds = tables.stream().map(Table::getId).collect(Collectors.toList()); Env.getCurrentGlobalTransactionMgr() .updateMultiTableRunningTransactionTableIds(db.getId(), request.getTxnId(), multiTableIds); - LOG.debug("txn {} has multi table {}", request.getTxnId(), request.getTbls()); + if (LOG.isDebugEnabled()) { + LOG.debug("txn {} has multi table {}", request.getTxnId(), request.getTbls()); + } } return tables; } @@ -1349,7 +1381,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TLoadTxn2PCResult loadTxn2PC(TLoadTxn2PCRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn 2PC request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn 2PC request: {}, backend: {}", request, clientAddr); + } TLoadTxn2PCResult result = new TLoadTxn2PCResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1407,7 +1441,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { if (transactionState == null) { throw new UserException("transaction [" + request.getTxnId() + "] not found"); } - LOG.debug("txn {} has multi table {}", request.getTxnId(), transactionState.getTableIdList()); + if (LOG.isDebugEnabled()) { + LOG.debug("txn {} has multi table {}", request.getTxnId(), transactionState.getTableIdList()); + } List tableIdList = transactionState.getTableIdList(); List
tableList = new ArrayList<>(); // if table was dropped, stream load must can abort. @@ -1439,7 +1475,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { multiTableFragmentInstanceIdIndexMap.remove(request.getTxnId()); deleteMultiTableStreamLoadJobIndex(request.getTxnId()); String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn commit request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn commit request: {}, backend: {}", request, clientAddr); + } TLoadTxnCommitResult result = new TLoadTxnCommitResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1514,7 +1552,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TCommitTxnResult commitTxn(TCommitTxnRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn commit request: {}, client: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn commit request: {}, client: {}", request, clientAddr); + } TCommitTxnResult result = new TCommitTxnResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1624,7 +1664,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TLoadTxnRollbackResult loadTxnRollback(TLoadTxnRollbackRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn rollback request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn rollback request: {}, backend: {}", request, clientAddr); + } TLoadTxnRollbackResult result = new TLoadTxnRollbackResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); @@ -1703,7 +1745,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TRollbackTxnResult rollbackTxn(TRollbackTxnRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive txn rollback request: {}, client: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive txn rollback request: {}, client: {}", request, clientAddr); + } TRollbackTxnResult result = new TRollbackTxnResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); @@ -1796,7 +1840,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TStreamLoadPutResult streamLoadPut(TStreamLoadPutRequest request) { String clientAddr = getClientAddrAsString(); - LOG.debug("receive stream load put request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive stream load put request: {}, backend: {}", request, clientAddr); + } TStreamLoadPutResult result = new TStreamLoadPutResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -1949,7 +1995,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { } multiTableFragmentInstanceIdIndexMap.put(request.getTxnId(), ++index); } - LOG.debug("receive stream load multi table put request result: {}", result); + if (LOG.isDebugEnabled()) { + LOG.debug("receive stream load multi table put request result: {}", result); + } } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatusCode(TStatusCode.INTERNAL_ERROR); @@ -2316,7 +2364,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TMySqlLoadAcquireTokenResult acquireToken() throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive acquire token request from client: {}", clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive acquire token request from client: {}", clientAddr); + } TMySqlLoadAcquireTokenResult result = new TMySqlLoadAcquireTokenResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); @@ -2336,7 +2386,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TCheckAuthResult checkAuth(TCheckAuthRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive auth request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive auth request: {}, backend: {}", request, clientAddr); + } TCheckAuthResult result = new TCheckAuthResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -2569,7 +2621,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { public TGetTabletReplicaInfosResult getTabletReplicaInfos(TGetTabletReplicaInfosRequest request) { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get replicas request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get replicas request: {}, backend: {}", request, clientAddr); + } TGetTabletReplicaInfosResult result = new TGetTabletReplicaInfosResult(); List tabletIds = request.getTabletIds(); Map> tabletReplicaInfos = Maps.newHashMap(); @@ -2604,7 +2658,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TAutoIncrementRangeResult getAutoIncrementRange(TAutoIncrementRangeRequest request) { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get auto-increement range request: {}, backend: {}", request, clientAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get auto-increement range request: {}, backend: {}", request, clientAddr); + } TAutoIncrementRangeResult result = new TAutoIncrementRangeResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -2638,7 +2694,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { public TGetBinlogResult getBinlog(TGetBinlogRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get binlog request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get binlog request: {}", request); + } TGetBinlogResult result = new TGetBinlogResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -3043,7 +3101,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { public TGetMasterTokenResult getMasterToken(TGetMasterTokenRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get master token request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get master token request: {}", request); + } TGetMasterTokenResult result = new TGetMasterTokenResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -3075,7 +3135,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { // getBinlogLag public TGetBinlogLagResult getBinlogLag(TGetBinlogRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get binlog request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get binlog request: {}", request); + } TGetBinlogLagResult result = new TGetBinlogLagResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -3335,13 +3397,17 @@ public class FrontendServiceImpl implements FrontendService.Iface { } result.setNodes(nodeInfos); result.setStatus(new TStatus(TStatusCode.OK)); - LOG.debug("send create partition result: {}", result); + if (LOG.isDebugEnabled()) { + LOG.debug("send create partition result: {}", result); + } return result; } public TGetMetaResult getMeta(TGetMetaRequest request) throws TException { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get meta request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get meta request: {}", request); + } TGetMetaResult result = new TGetMetaResult(); TStatus status = new TStatus(TStatusCode.OK); @@ -3485,7 +3551,9 @@ public class FrontendServiceImpl implements FrontendService.Iface { public TGetBackendMetaResult getBackendMeta(TGetBackendMetaRequest request) { String clientAddr = getClientAddrAsString(); - LOG.debug("receive get backend meta request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("receive get backend meta request: {}", request); + } TGetBackendMetaResult result = new TGetBackendMetaResult(); TStatus status = new TStatus(TStatusCode.OK); diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java index 1655d69c80..818bd1929c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java @@ -74,7 +74,9 @@ public class FlightSqlConnectProcessor extends ConnectProcessor implements AutoC LOG.warn("Unknown command(" + command + ")"); return; } - LOG.debug("arrow flight sql handle command {}", command); + if (LOG.isDebugEnabled()) { + LOG.debug("arrow flight sql handle command {}", command); + } ctx.setCommand(command); ctx.setStartTime(); } @@ -178,5 +180,3 @@ public class FlightSqlConnectProcessor extends ConnectProcessor implements AutoC ConnectContext.remove(); } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/spi/Split.java b/fe/fe-core/src/main/java/org/apache/doris/spi/Split.java index d42defe5c3..f6e6dc0e36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/spi/Split.java +++ b/fe/fe-core/src/main/java/org/apache/doris/spi/Split.java @@ -49,4 +49,3 @@ public interface Split { void setAlternativeHosts(List alternativeHosts); } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/AggStatsDerive.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/AggStatsDerive.java index 9efac7aff9..9f27860acf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AggStatsDerive.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AggStatsDerive.java @@ -48,8 +48,10 @@ public class AggStatsDerive extends BaseStatsDerive { // rowCount: product of # of distinct values produced by grouping exprs for (Expr groupingExpr : groupingExprs) { long numDistinct = groupingExpr.getNumDistinctValues(); - LOG.debug("grouping expr: " + groupingExpr.toSql() + " #distinct=" + Long.toString( - numDistinct)); + if (LOG.isDebugEnabled()) { + LOG.debug("grouping expr: " + groupingExpr.toSql() + " #distinct=" + Long.toString( + numDistinct)); + } if (numDistinct == -1) { rowCount = -1; break; @@ -66,7 +68,9 @@ public class AggStatsDerive extends BaseStatsDerive { rowCount *= numDistinct; } if (rowCount > 0) { - LOG.debug("sel=" + Double.toString(computeSelectivity())); + if (LOG.isDebugEnabled()) { + LOG.debug("sel=" + Double.toString(computeSelectivity())); + } applyConjunctsSelectivity(); } // if we ended up with an overflow, the estimate is certain to be wrong diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java index c747b0da14..22eab37f92 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java @@ -144,7 +144,9 @@ public class AnalysisJob { if (killed) { return; } - LOG.debug("execute internal sql: {}", stmtExecutor.getOriginStmt()); + if (LOG.isDebugEnabled()) { + LOG.debug("execute internal sql: {}", stmtExecutor.getOriginStmt()); + } try { stmtExecutor.execute(); QueryState queryState = stmtExecutor.getContext().getState(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/BaseAnalysisTask.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/BaseAnalysisTask.java index 5b8f64a5db..789ae0b563 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/BaseAnalysisTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/BaseAnalysisTask.java @@ -313,8 +313,10 @@ public abstract class BaseAnalysisTask { queryId = DebugUtil.printId(stmtExecutor.getContext().queryId()); job.appendBuf(this, Collections.singletonList(colStatsData)); } finally { - LOG.debug("End cost time in millisec: " + (System.currentTimeMillis() - startTime) - + " Analyze SQL: " + sql + " QueryId: " + queryId); + if (LOG.isDebugEnabled()) { + LOG.debug("End cost time in millisec: " + (System.currentTimeMillis() - startTime) + + " Analyze SQL: " + sql + " QueryId: " + queryId); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatistic.java index b01e47ef34..e38ba86c82 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatistic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatistic.java @@ -122,7 +122,9 @@ public class ColumnStatistic { } } } catch (Throwable t) { - LOG.debug("Failed to deserialize column stats", t); + if (LOG.isDebugEnabled()) { + LOG.debug("Failed to deserialize column stats", t); + } return ColumnStatistic.UNKNOWN; } if (columnStatistic == null) { @@ -153,9 +155,11 @@ public class ColumnStatistic { String colName = row.get(5); Column col = StatisticsUtil.findColumn(catalogId, dbID, tblId, idxId, colName); if (col == null) { - LOG.debug("Failed to deserialize column statistics, ctlId: {} dbId: {}" - + "tblId: {} column: {} not exists", - catalogId, dbID, tblId, colName); + if (LOG.isDebugEnabled()) { + LOG.debug("Failed to deserialize column statistics, ctlId: {} dbId: {}" + + "tblId: {} column: {} not exists", + catalogId, dbID, tblId, colName); + } return ColumnStatistic.UNKNOWN; } String min = row.get(10); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticsCacheLoader.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticsCacheLoader.java index eda3645fd0..24c08c8b75 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticsCacheLoader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticsCacheLoader.java @@ -54,13 +54,18 @@ public class ColumnStatisticsCacheLoader extends StatisticsCacheLoader, Long> pair = calcActualSampleTablets(tbl.isPartitionColumn(col.getName())); LOG.info("Number of tablets selected {}, rows in tablets {}", pair.first.size(), pair.second); List tabletIds = pair.first; @@ -197,7 +199,9 @@ public class OlapAnalysisTask extends BaseAnalysisTask { * 3. calculate column stats based on partition stats */ protected void doFull() throws Exception { - LOG.debug("Will do full collection for column {}", col.getName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Will do full collection for column {}", col.getName()); + } Map params = new HashMap<>(); params.put("internalDB", FeConstants.INTERNAL_DB_NAME); params.put("columnStatTbl", StatisticConstants.STATISTIC_TBL_NAME); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/InternalSqlTemplate.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/InternalSqlTemplate.java index 207b560bb3..e1ab0274db 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/InternalSqlTemplate.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/InternalSqlTemplate.java @@ -125,7 +125,9 @@ public class InternalSqlTemplate { } matcher.appendTail(sb); - LOG.debug("Template:{}, params: {}, SQL: {}", template, params, sb.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Template:{}, params: {}, SQL: {}", template, params, sb.toString()); + } return sb.toString(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java index 47d9619b07..1194204c94 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java @@ -553,7 +553,9 @@ public class Backend implements Writable { isChanged = true; } } - LOG.debug("update disk info. backendId: {}, diskInfo: {}", id, diskInfo.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("update disk info. backendId: {}, diskInfo: {}", id, diskInfo.toString()); + } } // remove not exist rootPath in backend diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java index feb3ac8147..1a132b5449 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java @@ -569,7 +569,9 @@ public class SystemInfoService { Preconditions.checkArgument(number >= -1); List candidates = policy.getCandidateBackends(idToBackendRef.values()); if (candidates.size() < number || candidates.isEmpty()) { - LOG.debug("Not match policy: {}. candidates num: {}, expected: {}", policy, candidates.size(), number); + if (LOG.isDebugEnabled()) { + LOG.debug("Not match policy: {}. candidates num: {}, expected: {}", policy, candidates.size(), number); + } return Lists.newArrayList(); } @@ -604,7 +606,9 @@ public class SystemInfoService { } if (candidates.size() < number) { - LOG.debug("Not match policy: {}. candidates num: {}, expected: {}", policy, candidates.size(), number); + if (LOG.isDebugEnabled()) { + LOG.debug("Not match policy: {}. candidates num: {}, expected: {}", policy, candidates.size(), number); + } return Lists.newArrayList(); } @@ -668,8 +672,10 @@ public class SystemInfoService { return; } atomicLong.set(newReportVersion); - LOG.debug("update backend {} report version: {}, db: {}, table: {}", - backendId, newReportVersion, dbId, tableId); + if (LOG.isDebugEnabled()) { + LOG.debug("update backend {} report version: {}, db: {}, table: {}", + backendId, newReportVersion, dbId, tableId); + } } } @@ -753,7 +759,9 @@ public class SystemInfoService { } public void replayDropBackend(Backend backend) { - LOG.debug("replayDropBackend: {}", backend); + if (LOG.isDebugEnabled()) { + LOG.debug("replayDropBackend: {}", backend); + } // update idToBackend Map copiedBackends = Maps.newHashMap(idToBackendRef); copiedBackends.remove(backend.getId()); @@ -841,7 +849,9 @@ public class SystemInfoService { * return Status.OK if not reach the limit */ public Status checkExceedDiskCapacityLimit(Multimap bePathsMap, boolean floodStage) { - LOG.debug("pathBeMap: {}", bePathsMap); + if (LOG.isDebugEnabled()) { + LOG.debug("pathBeMap: {}", bePathsMap); + } ImmutableMap pathHashToDiskInfo = pathHashToDiskInfoRef; for (Long beId : bePathsMap.keySet()) { for (Long pathHash : bePathsMap.get(beId)) { @@ -868,7 +878,9 @@ public class SystemInfoService { } ImmutableMap newPathInfos = ImmutableMap.copyOf(copiedPathInfos); pathHashToDiskInfoRef = newPathInfos; - LOG.debug("update path infos: {}", newPathInfos); + if (LOG.isDebugEnabled()) { + LOG.debug("update path infos: {}", newPathInfos); + } } public void modifyBackendHost(ModifyBackendHostNameClause clause) throws UserException { @@ -945,7 +957,9 @@ public class SystemInfoService { memBe.setQueryDisabled(backend.isQueryDisabled()); memBe.setLoadDisabled(backend.isLoadDisabled()); memBe.setHost(backend.getHost()); - LOG.debug("replay modify backend: {}", backend); + if (LOG.isDebugEnabled()) { + LOG.debug("replay modify backend: {}", backend); + } } // Check if there is enough suitable BE for replica allocation diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java index 4e6fe27398..04ea7d01ea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java @@ -110,4 +110,3 @@ public class BackendsTableValuedFunction extends MetadataTableValuedFunction { return SCHEMA; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java index 09de78a5e6..cfa444ba4b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java @@ -255,7 +255,9 @@ public abstract class ExternalFileTableValuedFunction extends TableValuedFunctio if (FileFormatUtils.isCsv(formatString)) { FileFormatUtils.parseCsvSchema(csvSchema, getOrDefaultAndRemove(copiedProps, FileFormatConstants.PROP_CSV_SCHEMA, "")); - LOG.debug("get csv schema: {}", csvSchema); + if (LOG.isDebugEnabled()) { + LOG.debug("get csv schema: {}", csvSchema); + } } pathPartitionKeys = Optional.ofNullable( @@ -514,5 +516,3 @@ public abstract class ExternalFileTableValuedFunction extends TableValuedFunctio } } - - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsDisksTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsDisksTableValuedFunction.java index 4c16407e33..cc7ff82b8f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsDisksTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsDisksTableValuedFunction.java @@ -94,4 +94,3 @@ public class FrontendsDisksTableValuedFunction extends MetadataTableValuedFuncti return SCHEMA; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsTableValuedFunction.java index c59927a93c..aded1076a8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/FrontendsTableValuedFunction.java @@ -103,4 +103,3 @@ public class FrontendsTableValuedFunction extends MetadataTableValuedFunction { return SCHEMA; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/JobsTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/JobsTableValuedFunction.java index 260a483d18..81823d17d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/JobsTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/JobsTableValuedFunction.java @@ -124,4 +124,3 @@ public class JobsTableValuedFunction extends MetadataTableValuedFunction { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/LocalTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/LocalTableValuedFunction.java index 1f98b055bf..35eca40305 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/LocalTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/LocalTableValuedFunction.java @@ -128,4 +128,3 @@ public class LocalTableValuedFunction extends ExternalFileTableValuedFunction { return Env.getCurrentSystemInfo().getBackend(backendId); } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java index 2e0de09d29..ed7611c4f9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java @@ -275,8 +275,10 @@ public class MetadataGenerator { } // backends proc node get result too slow, add log to observer. - LOG.debug("backends proc get tablet num cost: {}, total cost: {}", - watch.elapsed(TimeUnit.MILLISECONDS), (System.currentTimeMillis() - start)); + if (LOG.isDebugEnabled()) { + LOG.debug("backends proc get tablet num cost: {}, total cost: {}", + watch.elapsed(TimeUnit.MILLISECONDS), (System.currentTimeMillis() - start)); + } result.setDataBatch(dataBatch); result.setStatus(new TStatus(TStatusCode.OK)); @@ -707,4 +709,3 @@ public class MetadataGenerator { return result; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MvInfosTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MvInfosTableValuedFunction.java index 44e3ed5840..0b415ccae8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MvInfosTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MvInfosTableValuedFunction.java @@ -116,4 +116,3 @@ public class MvInfosTableValuedFunction extends MetadataTableValuedFunction { return SCHEMA; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java index bf87b0c8ba..6141222246 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java @@ -178,4 +178,3 @@ public class S3TableValuedFunction extends ExternalFileTableValuedFunction { return "S3TableValuedFunction"; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/TasksTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/TasksTableValuedFunction.java index c7015f57ff..1895fd53e1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/TasksTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/TasksTableValuedFunction.java @@ -124,4 +124,3 @@ public class TasksTableValuedFunction extends MetadataTableValuedFunction { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java index aa654fcd21..1f2e662c75 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java @@ -175,8 +175,10 @@ public class AgentBatchTask implements Runnable { client.submitTasks(agentTaskRequests); if (LOG.isDebugEnabled()) { for (AgentTask task : tasks) { - LOG.debug("send task: type[{}], backend[{}], signature[{}]", - task.getTaskType(), backendId, task.getSignature()); + if (LOG.isDebugEnabled()) { + LOG.debug("send task: type[{}], backend[{}], signature[{}]", + task.getTaskType(), backendId, task.getSignature()); + } } } ok = true; @@ -391,7 +393,9 @@ public class AgentBatchTask implements Runnable { return tAgentTaskRequest; } default: - LOG.debug("could not find task type for task [{}]", task); + if (LOG.isDebugEnabled()) { + LOG.debug("could not find task type for task [{}]", task); + } return null; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentClient.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentClient.java index f90aa67358..d364b9fcbe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentClient.java @@ -49,7 +49,9 @@ public class AgentClient { public TAgentResult makeSnapshot(TSnapshotRequest request) { TAgentResult result = null; - LOG.debug("submit make snapshot task. request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("submit make snapshot task. request: {}", request); + } try { borrowClient(); // submit make snapshot task @@ -65,7 +67,9 @@ public class AgentClient { public TAgentResult releaseSnapshot(String snapshotPath) { TAgentResult result = null; - LOG.debug("submit release snapshot task. snapshotPath: {}", snapshotPath); + if (LOG.isDebugEnabled()) { + LOG.debug("submit release snapshot task. snapshotPath: {}", snapshotPath); + } try { borrowClient(); // submit release snapshot task @@ -81,7 +85,9 @@ public class AgentClient { public Status submitExportTask(TExportTaskRequest request) { Status result = Status.CANCELLED; - LOG.debug("submit export task. request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("submit export task. request: {}", request); + } try { borrowClient(); // submit export task @@ -98,7 +104,9 @@ public class AgentClient { public TExportStatusResult getExportStatus(long jobId, long taskId) { TExportStatusResult result = null; TUniqueId request = new TUniqueId(jobId, taskId); - LOG.debug("get export task status. request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("get export task status. request: {}", request); + } try { borrowClient(); // get export status @@ -115,7 +123,9 @@ public class AgentClient { public Status eraseExportTask(long jobId, long taskId) { Status result = Status.CANCELLED; TUniqueId request = new TUniqueId(jobId, taskId); - LOG.debug("erase export task. request: {}", request); + if (LOG.isDebugEnabled()) { + LOG.debug("erase export task. request: {}", request); + } try { borrowClient(); // erase export task @@ -131,7 +141,9 @@ public class AgentClient { public TCheckStorageFormatResult checkStorageFormat() { TCheckStorageFormatResult result = null; - LOG.debug("submit make snapshot task."); + if (LOG.isDebugEnabled()) { + LOG.debug("submit make snapshot task."); + } try { borrowClient(); result = client.checkStorageFormat(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskQueue.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskQueue.java index c22f0b2ee3..6ea0934854 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskQueue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentTaskQueue.java @@ -66,7 +66,9 @@ public class AgentTaskQueue { } signatureMap.put(signature, task); ++taskNum; - LOG.debug("add task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + if (LOG.isDebugEnabled()) { + LOG.debug("add task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + } return true; } @@ -88,7 +90,9 @@ public class AgentTaskQueue { return; } signatureMap.remove(signature); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + if (LOG.isDebugEnabled()) { + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + } --taskNum; } @@ -114,7 +118,9 @@ public class AgentTaskQueue { } signatureMap.remove(signature); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", taskType, backendId, signature); + if (LOG.isDebugEnabled()) { + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", taskType, backendId, signature); + } --taskNum; } @@ -209,7 +215,9 @@ public class AgentTaskQueue { } else { if (typeTasks.containsKey(tabletId)) { typeTasks.remove(tabletId); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, tabletId); + } --taskNum; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java index 818636fe38..476de574b6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/CreateReplicaTask.java @@ -194,8 +194,10 @@ public class CreateReplicaTask extends AgentTask { public void countDownLatch(long backendId, long tabletId) { if (this.latch != null) { if (latch.markedCountDown(backendId, tabletId)) { - LOG.debug("CreateReplicaTask current latch count: {}, backend: {}, tablet:{}", - latch.getCount(), backendId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("CreateReplicaTask current latch count: {}, backend: {}, tablet:{}", + latch.getCount(), backendId, tabletId); + } } } } @@ -204,7 +206,9 @@ public class CreateReplicaTask extends AgentTask { public void countDownToZero(String errMsg) { if (this.latch != null) { latch.countDownToZero(new Status(TStatusCode.CANCELLED, errMsg)); - LOG.debug("CreateReplicaTask download to zero. error msg: {}", errMsg); + if (LOG.isDebugEnabled()) { + LOG.debug("CreateReplicaTask download to zero. error msg: {}", errMsg); + } } } @@ -276,7 +280,9 @@ public class CreateReplicaTask extends AgentTask { tSchema.setVersionColIdx(versionCol); if (!CollectionUtils.isEmpty(clusterKeyIndexes)) { tSchema.setClusterKeyIdxes(clusterKeyIndexes); - LOG.debug("cluster key index={}, table_id={}, tablet_id={}", clusterKeyIndexes, tableId, tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("cluster key index={}, table_id={}, tablet_id={}", clusterKeyIndexes, tableId, tabletId); + } } if (CollectionUtils.isNotEmpty(indexes)) { List tIndexes = new ArrayList<>(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java index 610e243cd9..8671933f75 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/LoadTaskInfo.java @@ -154,4 +154,3 @@ public interface LoadTaskInfo { } } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java index ab30e1fce5..ca29cc78e6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/PushTask.java @@ -203,8 +203,10 @@ public class PushTask extends AgentTask { public void countDownLatch(long backendId, long tabletId) { if (this.latch != null) { if (latch.markedCountDown(backendId, tabletId)) { - LOG.debug("pushTask current latch count: {}. backend: {}, tablet:{}", latch.getCount(), backendId, - tabletId); + if (LOG.isDebugEnabled()) { + LOG.debug("pushTask current latch count: {}. backend: {}, tablet:{}", latch.getCount(), backendId, + tabletId); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java index 68123b497c..f663eb6591 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/StreamLoadTask.java @@ -546,4 +546,3 @@ public class StreamLoadTask implements LoadTaskInfo { return groupCommit; } } - diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java index 65158cd31a..ad20b7b918 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java @@ -101,8 +101,10 @@ public class UpdateTabletMetaInfoTask extends AgentTask { public void countDownLatch(long backendId, Set> tablets) { if (this.latch != null) { if (latch.markedCountDown(backendId, tablets)) { - LOG.debug("UpdateTabletMetaInfoTask current latch count: {}, backend: {}, tablets:{}", - latch.getCount(), backendId, tablets); + if (LOG.isDebugEnabled()) { + LOG.debug("UpdateTabletMetaInfoTask current latch count: {}, backend: {}, tablets:{}", + latch.getCount(), backendId, tablets); + } } } } @@ -111,7 +113,9 @@ public class UpdateTabletMetaInfoTask extends AgentTask { public void countDownToZero(String errMsg) { if (this.latch != null) { latch.countDownToZero(new Status(TStatusCode.CANCELLED, errMsg)); - LOG.debug("UpdateTabletMetaInfoTask count down to zero. error msg: {}", errMsg); + if (LOG.isDebugEnabled()) { + LOG.debug("UpdateTabletMetaInfoTask count down to zero. error msg: {}", errMsg); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java index fd48b97454..0967d4e776 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java @@ -442,17 +442,23 @@ public class DatabaseTransactionMgr { } if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) { - LOG.debug("transaction is already visible: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already visible: {}", transactionId); + } throw new TransactionCommitFailedException("transaction is already visible"); } if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) { - LOG.debug("transaction is already committed: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already committed: {}", transactionId); + } throw new TransactionCommitFailedException("transaction is already committed"); } if (transactionState.getTransactionStatus() == TransactionStatus.PRECOMMITTED) { - LOG.debug("transaction is already pre-committed: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already pre-committed: {}", transactionId); + } return; } @@ -683,18 +689,24 @@ public class DatabaseTransactionMgr { } if (transactionState == null) { - LOG.debug("transaction not found: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction not found: {}", transactionId); + } throw new TransactionCommitFailedException("transaction [" + transactionId + "] not found."); } if (transactionState.getTransactionStatus() == TransactionStatus.ABORTED) { - LOG.debug("transaction is already aborted: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already aborted: {}", transactionId); + } throw new TransactionCommitFailedException("transaction [" + transactionId + "] is already aborted. abort reason: " + transactionState.getReason()); } if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) { - LOG.debug("transaction is already visible: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already visible: {}", transactionId); + } if (is2PC) { throw new TransactionCommitFailedException("transaction [" + transactionId + "] is already visible, not pre-committed."); @@ -702,7 +714,9 @@ public class DatabaseTransactionMgr { return; } if (transactionState.getTransactionStatus() == TransactionStatus.COMMITTED) { - LOG.debug("transaction is already committed: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is already committed: {}", transactionId); + } if (is2PC) { throw new TransactionCommitFailedException("transaction [" + transactionId + "] is already committed, not pre-committed."); @@ -711,7 +725,9 @@ public class DatabaseTransactionMgr { } if (is2PC && transactionState.getTransactionStatus() == TransactionStatus.PREPARE) { - LOG.debug("transaction is prepare, not pre-committed: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("transaction is prepare, not pre-committed: {}", transactionId); + } throw new TransactionCommitFailedException("transaction [" + transactionId + "] is prepare, not pre-committed."); } @@ -996,7 +1012,9 @@ public class DatabaseTransactionMgr { // a blocking function, the returned result would be the existed table list which hold write lock Database db = env.getInternalCatalog().getDbOrMetaException(transactionState.getDbId()); List tableIdList = transactionState.getTableIdList(); - LOG.debug("finish transaction {} with tables {}", transactionId, tableIdList); + if (LOG.isDebugEnabled()) { + LOG.debug("finish transaction {} with tables {}", transactionId, tableIdList); + } List tableList = db.getTablesOnIdOrderIfExist(tableIdList); tableList = MetaLockUtils.writeLockTablesIfExist(tableList); PublishResult publishResult; @@ -1021,7 +1039,9 @@ public class DatabaseTransactionMgr { // subsequent `updateCatalogAfterVisible()` is called, but it does not seem to be executed here // (because the relevant editlog does not see the log of visible transactions). // So I add a log here for observation. - LOG.debug("after set transaction {} to visible", transactionState); + if (LOG.isDebugEnabled()) { + LOG.debug("after set transaction {} to visible", transactionState); + } } finally { writeUnlock(); try { @@ -1074,10 +1094,12 @@ public class DatabaseTransactionMgr { continue; } if (partition.getVisibleVersion() != partitionCommitInfo.getVersion() - 1) { - LOG.debug("for table {} partition {}, transactionId {} partition commitInfo version {} is not" - + " equal with partition visible version {} plus one, need wait", - table.getId(), partition.getId(), transactionState.getTransactionId(), - partitionCommitInfo.getVersion(), partition.getVisibleVersion()); + if (LOG.isDebugEnabled()) { + LOG.debug("for table {} partition {}, transactionId {} partition commitInfo version {} is not" + + " equal with partition visible version {} plus one, need wait", + table.getId(), partition.getId(), transactionState.getTransactionId(), + partitionCommitInfo.getVersion(), partition.getVisibleVersion()); + } String errMsg = String.format("wait for publishing partition %d version %d." + " self version: %d. table %d", partitionId, partition.getVisibleVersion() + 1, partitionCommitInfo.getVersion(), tableId); @@ -1986,7 +2008,9 @@ public class DatabaseTransactionMgr { tableIdToNumDeltaRows.put(tableId, numRows / replicaNum); } }); - LOG.debug("table id to loaded rows:{}", tableIdToNumDeltaRows); + if (LOG.isDebugEnabled()) { + LOG.debug("table id to loaded rows:{}", tableIdToNumDeltaRows); + } tableIdToNumDeltaRows.forEach(analysisManager::updateUpdatedRows); return true; } @@ -2000,8 +2024,10 @@ public class DatabaseTransactionMgr { continue; } if (entry.getKey() <= endTransactionId) { - LOG.debug("find a running txn with txn_id={} on db: {}, less than watermark txn_id {}", - entry.getKey(), dbId, endTransactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("find a running txn with txn_id={} on db: {}, less than watermark txn_id {}", + entry.getKey(), dbId, endTransactionId); + } return false; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java index 57a8629876..ae331d2b75 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java @@ -101,13 +101,17 @@ public class GlobalTransactionMgr implements Writable { public void addDatabaseTransactionMgr(Long dbId) { if (dbIdToDatabaseTransactionMgrs.putIfAbsent(dbId, new DatabaseTransactionMgr(dbId, env, idGenerator)) == null) { - LOG.debug("add database transaction manager for db {}", dbId); + if (LOG.isDebugEnabled()) { + LOG.debug("add database transaction manager for db {}", dbId); + } } } public void removeDatabaseTransactionMgr(Long dbId) { if (dbIdToDatabaseTransactionMgrs.remove(dbId) != null) { - LOG.debug("remove database transaction manager for db {}", dbId); + if (LOG.isDebugEnabled()) { + LOG.debug("remove database transaction manager for db {}", dbId); + } } } @@ -214,7 +218,9 @@ public class GlobalTransactionMgr implements Writable { throw new TransactionCommitFailedException("disable_load_job is set to true, all load jobs are prevented"); } - LOG.debug("try to pre-commit transaction: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("try to pre-commit transaction: {}", transactionId); + } DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); dbTransactionMgr.preCommitTransaction2PC(tableList, transactionId, tabletCommitInfos, txnCommitAttachment); } @@ -241,7 +247,9 @@ public class GlobalTransactionMgr implements Writable { throw new TransactionCommitFailedException("disable_load_job is set to true, all load jobs are prevented"); } - LOG.debug("try to commit transaction: {}", transactionId); + if (LOG.isDebugEnabled()) { + LOG.debug("try to commit transaction: {}", transactionId); + } DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(dbId); dbTransactionMgr.commitTransaction(tableList, transactionId, tabletCommitInfos, txnCommitAttachment, false); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java index 250f2c5666..0d4a890a68 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/PublishVersionDaemon.java @@ -165,7 +165,9 @@ public class PublishVersionDaemon extends MasterDaemon { // if finish transaction state failed, then update publish version time, should check // to finish after some interval transactionState.updateSendTaskTime(); - LOG.debug("publish version for transaction {} failed", transactionState); + if (LOG.isDebugEnabled()) { + LOG.debug("publish version for transaction {} failed", transactionState); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/fe/fe-core/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 1de756ce80..37bf73b2fa 100644 --- a/fe/fe-core/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/fe/fe-core/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -801,7 +801,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { client.shutdown(); } } catch (TException e) { - LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + } } // Transport would have got closed via client.shutdown(), so we dont need this, but // just in case, we make this call. @@ -2966,7 +2968,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); rqst.setMaxEvents(maxEvents); NotificationEventResponse rsp = client.get_next_notification(rqst); - LOG.debug("Got back " + rsp.getEventsSize() + " events"); + if (LOG.isDebugEnabled()) { + LOG.debug("Got back " + rsp.getEventsSize() + " events"); + } NotificationEventResponse filtered = new NotificationEventResponse(); if (rsp != null && rsp.getEvents() != null) { long nextEventId = lastEventId + 1; @@ -3154,7 +3158,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List colNames, List partNames) throws TException { if (colNames.isEmpty() || partNames.isEmpty()) { - LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + if (LOG.isDebugEnabled()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + } return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); @@ -3582,4 +3588,3 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { return prependCatalogToDbName(catalogName, dbName, conf); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java index f2cf353760..a5cff4fca1 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java @@ -351,4 +351,3 @@ public class CancelExportStmtTest extends TestWithFeService { Assert.assertTrue(job8.getState() == ExportJobState.CANCELLED); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java index f74fcd3432..0d8759aa08 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java @@ -149,4 +149,3 @@ public class InsertArrayStmtTest { Assert.assertEquals(state.getErrorMessage(), MysqlStateType.OK, state.getStateType()); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java index ee0d1949be..87dcecc79d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java @@ -202,4 +202,3 @@ public class JdbcResourceTest { Assert.assertTrue(resultUrl.contains(";")); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/BalanceStatistic.java b/fe/fe-core/src/test/java/org/apache/doris/clone/BalanceStatistic.java index 596a06f668..174001a1bc 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/BalanceStatistic.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/BalanceStatistic.java @@ -99,4 +99,3 @@ public class BalanceStatistic { System.out.println("=== max / min : " + (maxTotalDataSize / (double) minTotalDataSize)); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalogTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalogTest.java index 49c0e07732..35ef5fd56f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalogTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalogTest.java @@ -101,4 +101,3 @@ public class JdbcExternalCatalogTest { } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBDebuggerTest.java b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBDebuggerTest.java index 27c7efc430..3d54affbce 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBDebuggerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBDebuggerTest.java @@ -64,7 +64,9 @@ public class BDBDebuggerTest { } Preconditions.checkArgument(!Strings.isNullOrEmpty(dorisHome)); File dir = Files.createTempDirectory(Paths.get(dorisHome, "fe", "mocked"), "BDBJEJournalTest").toFile(); - LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + } tmpDirs.add(dir); return dir; } @@ -72,7 +74,9 @@ public class BDBDebuggerTest { @AfterAll public static void cleanUp() throws Exception { for (File dir : tmpDirs) { - LOG.debug("deleteTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("deleteTmpDir path {}", dir.getAbsolutePath()); + } FileUtils.deleteDirectory(dir); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBEnvironmentTest.java b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBEnvironmentTest.java index f61fbc6bf9..c3a942cf07 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBEnvironmentTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBEnvironmentTest.java @@ -77,7 +77,9 @@ public class BDBEnvironmentTest { } UUID uuid = UUID.randomUUID(); File dir = Files.createDirectories(Paths.get(dorisHome, "fe", "mocked", "BDBEnvironmentTest-" + uuid.toString())).toFile(); - LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + } tmpDirs.add(dir.getAbsolutePath()); return dir.getAbsolutePath(); } @@ -90,7 +92,9 @@ public class BDBEnvironmentTest { @AfterAll public static void cleanUp() throws Exception { for (String dir : tmpDirs) { - LOG.debug("deleteTmpDir path {}", dir); + if (LOG.isDebugEnabled()) { + LOG.debug("deleteTmpDir path {}", dir); + } FileUtils.deleteDirectory(new File(dir)); } } @@ -127,7 +131,9 @@ public class BDBEnvironmentTest { int port = findValidPort(); String selfNodeName = Env.genFeNodeName("127.0.0.1", port, false); String selfNodeHostPort = "127.0.0.1:" + port; - LOG.debug("selfNodeName:{}, selfNodeHostPort:{}", selfNodeName, selfNodeHostPort); + if (LOG.isDebugEnabled()) { + LOG.debug("selfNodeName:{}, selfNodeHostPort:{}", selfNodeName, selfNodeHostPort); + } BDBEnvironment bdbEnvironment = new BDBEnvironment(true, false); bdbEnvironment.setup(new File(createTmpDir()), selfNodeName, selfNodeHostPort, selfNodeHostPort); @@ -153,7 +159,9 @@ public class BDBEnvironmentTest { String expectedMessage = "Database was closed."; String actualMessage = exception.getMessage(); - LOG.debug("exception:", exception); + if (LOG.isDebugEnabled()) { + LOG.debug("exception:", exception); + } Assertions.assertTrue(actualMessage.contains(expectedMessage)); Database epochDb = bdbEnvironment.getEpochDB(); @@ -190,7 +198,9 @@ public class BDBEnvironmentTest { }); expectedMessage = "Environment is closed."; actualMessage = exception.getMessage(); - LOG.debug("exception:", exception); + if (LOG.isDebugEnabled()) { + LOG.debug("exception:", exception); + } Assertions.assertTrue(actualMessage.contains(expectedMessage)); } @@ -284,7 +294,9 @@ public class BDBEnvironmentTest { int masterPort = findValidPort(); String masterNodeName = Env.genFeNodeName("127.0.0.1", masterPort, false); String masterNodeHostPort = "127.0.0.1:" + masterPort; - LOG.debug("masterNodeName:{}, masterNodeHostPort:{}", masterNodeName, masterNodeHostPort); + if (LOG.isDebugEnabled()) { + LOG.debug("masterNodeName:{}, masterNodeHostPort:{}", masterNodeName, masterNodeHostPort); + } BDBEnvironment masterEnvironment = new BDBEnvironment(true, false); File masterDir = new File(createTmpDir()); @@ -296,7 +308,9 @@ public class BDBEnvironmentTest { int followerPort = findValidPort(); String followerNodeName = Env.genFeNodeName("127.0.0.1", followerPort, false); String followerNodeHostPort = "127.0.0.1:" + followerPort; - LOG.debug("followerNodeName{}:{}, followerNodeHostPort{}:{}", i, i, followerNodeName, followerNodeHostPort); + if (LOG.isDebugEnabled()) { + LOG.debug("followerNodeName{}:{}, followerNodeHostPort{}:{}", i, i, followerNodeName, followerNodeHostPort); + } BDBEnvironment followerEnvironment = new BDBEnvironment(true, false); File followerDir = new File(createTmpDir()); @@ -308,7 +322,9 @@ public class BDBEnvironmentTest { int observerPort = findValidPort(); String observerNodeName = Env.genFeNodeName("127.0.0.1", observerPort, false); String observerNodeHostPort = "127.0.0.1:" + observerPort; - LOG.debug("observerNodeName:{}, observerNodeHostPort:{}", observerNodeName, observerNodeHostPort); + if (LOG.isDebugEnabled()) { + LOG.debug("observerNodeName:{}, observerNodeHostPort:{}", observerNodeName, observerNodeHostPort); + } BDBEnvironment observerEnvironment = new BDBEnvironment(false, false); File observerDir = new File(createTmpDir()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJEJournalTest.java b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJEJournalTest.java index edcbf9c033..718afb6741 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJEJournalTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJEJournalTest.java @@ -66,7 +66,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use } Preconditions.checkArgument(!Strings.isNullOrEmpty(dorisHome)); File dir = Files.createTempDirectory(Paths.get(dorisHome, "fe", "mocked"), "BDBJEJournalTest").toFile(); - LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + } tmpDirs.add(dir); return dir; } @@ -74,7 +76,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use @AfterAll public static void cleanUp() throws Exception { for (File dir : tmpDirs) { - LOG.debug("deleteTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("deleteTmpDir path {}", dir.getAbsolutePath()); + } FileUtils.deleteDirectory(dir); } } @@ -166,7 +170,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use Assertions.assertEquals(1, journal.getMinJournalId()); Assertions.assertEquals(0, journal.getFinalizedJournalId()); - LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + if (LOG.isDebugEnabled()) { + LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + } Assertions.assertEquals(1, journal.getDatabaseNames().size()); Assertions.assertEquals(1, journal.getDatabaseNames().get(0)); @@ -192,7 +198,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use Assertions.assertEquals(1, journal.getMinJournalId()); Assertions.assertEquals(40, journal.getFinalizedJournalId()); - LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + if (LOG.isDebugEnabled()) { + LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + } Assertions.assertEquals(5, journal.getDatabaseNames().size()); Assertions.assertEquals(41, journal.getDatabaseNames().get(4)); @@ -224,7 +232,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use Assertions.assertEquals(ReplicatedEnvironment.State.MASTER, journal.getBDBEnvironment().getReplicatedEnvironment().getState()); journal.deleteJournals(21); - LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + if (LOG.isDebugEnabled()) { + LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + } Assertions.assertEquals(3, journal.getDatabaseNames().size()); Assertions.assertEquals(21, journal.getDatabaseNames().get(0)); journal.close(); @@ -302,7 +312,9 @@ public class BDBJEJournalTest { // CHECKSTYLE IGNORE THIS LINE: BDBJE should use Assertions.assertEquals(1, journal.getMinJournalId()); Assertions.assertEquals(0, journal.getFinalizedJournalId()); - LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + if (LOG.isDebugEnabled()) { + LOG.debug("journal.getDatabaseNames(): {}", journal.getDatabaseNames()); + } Assertions.assertEquals(1, journal.getDatabaseNames().size()); Assertions.assertEquals(1, journal.getDatabaseNames().get(0)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJournalCursorTest.java b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJournalCursorTest.java index d68d616574..fc9e24a822 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJournalCursorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/BDBJournalCursorTest.java @@ -57,7 +57,9 @@ public class BDBJournalCursorTest { } UUID uuid = UUID.randomUUID(); File dir = Files.createDirectories(Paths.get(dorisHome, "fe", "mocked", "BDBEnvironmentTest-" + uuid.toString())).toFile(); - LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("createTmpDir path {}", dir.getAbsolutePath()); + } tmpDirs.add(dir.getAbsolutePath()); return dir.getAbsolutePath(); } @@ -65,7 +67,9 @@ public class BDBJournalCursorTest { @AfterAll public static void cleanUp() throws Exception { for (String dir : tmpDirs) { - LOG.debug("deleteTmpDir path {}", dir); + if (LOG.isDebugEnabled()) { + LOG.debug("deleteTmpDir path {}", dir); + } FileUtils.deleteDirectory(new File(dir)); } } @@ -98,7 +102,9 @@ public class BDBJournalCursorTest { int port = findValidPort(); String selfNodeName = Env.genFeNodeName("127.0.0.1", port, false); String selfNodeHostPort = "127.0.0.1:" + port; - LOG.debug("selfNodeName:{}, selfNodeHostPort:{}", selfNodeName, selfNodeHostPort); + if (LOG.isDebugEnabled()) { + LOG.debug("selfNodeName:{}, selfNodeHostPort:{}", selfNodeName, selfNodeHostPort); + } BDBEnvironment bdbEnvironment = new BDBEnvironment(true, false); bdbEnvironment.setup(new File(createTmpDir()), selfNodeName, selfNodeHostPort, selfNodeHostPort); diff --git a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/TimestampTest.java b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/TimestampTest.java index 36c4bcacb5..73120774c6 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/TimestampTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/journal/bdbje/TimestampTest.java @@ -53,7 +53,9 @@ public class TimestampTest { } UUID uuid = UUID.randomUUID(); File testFile = Files.createFile(Paths.get(dorisHome, "fe", "mocked", "TimestampTest-" + uuid.toString())).toFile(); - LOG.debug("createTmpFile path {}", testFile.getAbsolutePath()); + if (LOG.isDebugEnabled()) { + LOG.debug("createTmpFile path {}", testFile.getAbsolutePath()); + } testFiles.add(testFile.getAbsolutePath()); return testFile.getAbsolutePath(); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/datasets/ssb/SSBUtils.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/datasets/ssb/SSBUtils.java index a728d01dce..3cd3f9d1ec 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/datasets/ssb/SSBUtils.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/datasets/ssb/SSBUtils.java @@ -349,4 +349,3 @@ public class SSBUtils { + ")"); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/AnalyzeSubQueryTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/AnalyzeSubQueryTest.java index 1b69659833..db56b16e6f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/AnalyzeSubQueryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/AnalyzeSubQueryTest.java @@ -186,4 +186,3 @@ public class AnalyzeSubQueryTest extends TestWithFeService implements MemoPatter ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeBothProjectTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeBothProjectTest.java index d48bbff1c1..b69738d70a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeBothProjectTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeBothProjectTest.java @@ -73,4 +73,3 @@ class JoinExchangeBothProjectTest implements MemoPatternMatchSupported { ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeLeftProjectTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeLeftProjectTest.java index 95d9286647..0712000bec 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeLeftProjectTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeLeftProjectTest.java @@ -71,4 +71,3 @@ class JoinExchangeLeftProjectTest implements MemoPatternMatchSupported { ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeRightProjectTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeRightProjectTest.java index 113facaa5d..cd0d8b30f3 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeRightProjectTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeRightProjectTest.java @@ -73,4 +73,3 @@ class JoinExchangeRightProjectTest implements MemoPatternMatchSupported { ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeTest.java index 8faa5d0a85..e5e82f4b69 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/join/JoinExchangeTest.java @@ -65,4 +65,3 @@ class JoinExchangeTest implements MemoPatternMatchSupported { ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/SimplifyArithmeticRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/SimplifyArithmeticRuleTest.java index 6ecbdf8c72..4ea50bf1f8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/SimplifyArithmeticRuleTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/SimplifyArithmeticRuleTest.java @@ -158,4 +158,3 @@ class SimplifyArithmeticRuleTest extends ExpressionRewriteTestHelper { } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDowFilterThroughProjectTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDowFilterThroughProjectTest.java index ad922a8f1c..a737acbf01 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDowFilterThroughProjectTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDowFilterThroughProjectTest.java @@ -109,4 +109,3 @@ class PushDowFilterThroughProjectTest implements MemoPatternMatchSupported { f.getPredicate().toSql().equals("id IS NULL")))); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughWindowTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughWindowTest.java index e62de6cdf9..6edcd01a8e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughWindowTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughWindowTest.java @@ -90,4 +90,3 @@ class PushDownFilterThroughWindowTest implements MemoPatternMatchSupported { ); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/expressions/literal/DateTimeLiteralTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/expressions/literal/DateTimeLiteralTest.java index 8c636e9eca..ecae8f85d5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/expressions/literal/DateTimeLiteralTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/expressions/literal/DateTimeLiteralTest.java @@ -444,4 +444,3 @@ class DateTimeLiteralTest { new DateTimeV2Literal("2017-01-01 00:00:00.0")); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/FederationBackendPolicyTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/FederationBackendPolicyTest.java index 1d213c6a09..e7c5d8acfd 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/FederationBackendPolicyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/FederationBackendPolicyTest.java @@ -724,4 +724,3 @@ public class FederationBackendPolicyTest { return entries1.containsAll(entries2) && entries2.containsAll(entries1); } } - diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/SingleNodePlannerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/SingleNodePlannerTest.java index ce8248de35..38ad63dbdf 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/SingleNodePlannerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/SingleNodePlannerTest.java @@ -147,10 +147,6 @@ public class SingleNodePlannerTest { result = null; scanNode2.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; } }; new MockUp() { @@ -247,10 +243,6 @@ public class SingleNodePlannerTest { result = null; scanNode2.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); @@ -345,10 +337,6 @@ public class SingleNodePlannerTest { result = null; scanNode2.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); @@ -497,12 +485,6 @@ public class SingleNodePlannerTest { result = null; scanNode2.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; - tableRef3.getUniqueAlias(); - result = "t3"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); @@ -659,12 +641,6 @@ public class SingleNodePlannerTest { result = null; scanNode2.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; - tableRef3.getUniqueAlias(); - result = "t3"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); @@ -846,14 +822,6 @@ public class SingleNodePlannerTest { result = null; scanNode4.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; - tableRef3.getUniqueAlias(); - result = "t3"; - tableRef4.getUniqueAlias(); - result = "t4"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); @@ -1053,14 +1021,6 @@ public class SingleNodePlannerTest { result = null; scanNode4.getOutputSmap(); result = null; - tableRef1.getUniqueAlias(); - result = "t1"; - tableRef2.getUniqueAlias(); - result = "t2"; - tableRef3.getUniqueAlias(); - result = "t3"; - tableRef4.getUniqueAlias(); - result = "t4"; tableRef1.getJoinOp(); result = JoinOperator.INNER_JOIN; tableRef2.getJoinOp(); diff --git a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java index 484ff4eca2..a359612e99 100644 --- a/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java +++ b/fe/spark-dpp/src/main/java/org/apache/doris/load/loadv2/etl/SparkEtlJob.java @@ -100,16 +100,22 @@ public class SparkEtlJob { } private void initConfig() throws IOException { - LOG.debug("job config file path: " + jobConfigFilePath); + if (LOG.isDebugEnabled()) { + LOG.debug("job config file path: " + jobConfigFilePath); + } Configuration hadoopConf = SparkHadoopUtil.get().newConfiguration(this.conf); String jsonConfig; Path path = new Path(jobConfigFilePath); try (FileSystem fs = path.getFileSystem(hadoopConf); DataInputStream in = fs.open(path)) { jsonConfig = CharStreams.toString(new InputStreamReader(in)); } - LOG.debug("rdd read json config: " + jsonConfig); + if (LOG.isDebugEnabled()) { + LOG.debug("rdd read json config: " + jsonConfig); + } etlJobConfig = EtlJobConfig.configFromJson(jsonConfig); - LOG.debug("etl job config: " + etlJobConfig); + if (LOG.isDebugEnabled()) { + LOG.debug("etl job config: " + etlJobConfig); + } } /*