diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index e25605f3e0..ae659ec6ab 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -781,8 +781,10 @@ nonterminal List partition_value_list; nonterminal List partition_key_item_list; nonterminal List> list_partition_values_list; nonterminal SinglePartitionDesc single_partition_desc; -nonterminal List opt_single_partition_desc_list; -nonterminal List single_partition_desc_list; +nonterminal List opt_all_partition_desc_list; +nonterminal List all_partition_desc_list; +nonterminal PartitionKeyDesc fixed_multi_partition_key_desc; +nonterminal MultiPartitionDesc multi_partition_desc; nonterminal List privilege_list; nonterminal List string_list; @@ -2812,13 +2814,13 @@ opt_partition ::= :} /* Range partition */ | KW_PARTITION KW_BY KW_RANGE LPAREN ident_list:columns RPAREN - LPAREN opt_single_partition_desc_list:list RPAREN + LPAREN opt_all_partition_desc_list:list RPAREN {: RESULT = new RangePartitionDesc(columns, list); :} /* List partition */ | KW_PARTITION KW_BY KW_LIST LPAREN ident_list:columns RPAREN - LPAREN opt_single_partition_desc_list:list RPAREN + LPAREN opt_all_partition_desc_list:list RPAREN {: RESULT = new ListPartitionDesc(columns, list); :} @@ -2886,19 +2888,19 @@ opt_keys ::= :} ; -opt_single_partition_desc_list ::= +opt_all_partition_desc_list ::= /* Empty */ {: RESULT = null; :} - | single_partition_desc_list:list + | all_partition_desc_list:list {: RESULT = list; :} ; -single_partition_desc_list ::= - single_partition_desc_list:list COMMA single_partition_desc:desc +all_partition_desc_list ::= + all_partition_desc_list:list COMMA single_partition_desc:desc {: list.add(desc); RESULT = list; @@ -2907,6 +2909,15 @@ single_partition_desc_list ::= {: RESULT = Lists.newArrayList(desc); :} + | all_partition_desc_list:list COMMA multi_partition_desc:desc + {: + list.add(desc); + RESULT = list; + :} + | multi_partition_desc:desc + {: + RESULT = Lists.newArrayList(desc); + :} ; single_partition_desc ::= @@ -2928,6 +2939,22 @@ single_partition_desc ::= :} ; +multi_partition_desc ::= + fixed_multi_partition_key_desc:desc + opt_key_value_map:properties + {: + RESULT = new MultiPartitionDesc(desc, properties); + :} + ; + +fixed_multi_partition_key_desc ::= + // FROM (lower) TO (upper) INTERVAL time_interval time_type + KW_FROM LPAREN partition_key_list:lower RPAREN KW_TO LPAREN partition_key_list:upper RPAREN KW_INTERVAL INTEGER_LITERAL:time_interval ident:time_unit + {: + RESULT = PartitionKeyDesc.createMultiFixed(lower, upper, time_interval, time_unit); + :} + ; + partition_key_desc ::= KW_MAX_VALUE {: diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AllPartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AllPartitionDesc.java new file mode 100644 index 0000000000..025c8b4f7f --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AllPartitionDesc.java @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +public interface AllPartitionDesc { +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ListPartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ListPartitionDesc.java index 31aec5fb21..d0b6bebf05 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ListPartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ListPartitionDesc.java @@ -33,8 +33,8 @@ import java.util.Map; public class ListPartitionDesc extends PartitionDesc { public ListPartitionDesc(List partitionColNames, - List singlePartitionDescs) { - super(partitionColNames, singlePartitionDescs); + List allPartitionDescs) throws AnalysisException { + super(partitionColNames, allPartitionDescs); type = PartitionType.LIST; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MultiPartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MultiPartitionDesc.java new file mode 100644 index 0000000000..f3a04cbd6c --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/MultiPartitionDesc.java @@ -0,0 +1,315 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.analysis.TimestampArithmeticExpr.TimeUnit; +import org.apache.doris.catalog.DynamicPartitionProperty; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.util.DynamicPartitionUtil; +import org.apache.doris.common.util.TimeUtils; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; + +import java.time.DayOfWeek; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoField; +import java.time.temporal.WeekFields; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +// to describe the key list partition's information in create table stmt +public class MultiPartitionDesc implements AllPartitionDesc { + public static final String HOURS_FORMAT = "yyyyMMddHH"; + public static final String HOUR_FORMAT = "yyyy-MM-dd HH"; + public static final String DATES_FORMAT = "yyyyMMdd"; + public static final String DATE_FORMAT = "yyyy-MM-dd"; + public static final String MONTHS_FORMAT = "yyyyMM"; + public static final String MONTH_FORMAT = "yyyy-MM"; + public static final String YEAR_FORMAT = "yyyy"; + public static final String DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; + + + + private final String partitionPrefix = "p_"; + private LocalDateTime startTime; + private LocalDateTime endTime; + + private DateTimeFormatter startDateTimeFormat; + private DateTimeFormatter endDateTimeFormat; + + + private Long timeInterval; + private final PartitionKeyDesc partitionKeyDesc; + private TimestampArithmeticExpr.TimeUnit timeUnitType; + private final Map properties; + private final List singlePartitionDescList = Lists.newArrayList(); + + private final ImmutableSet timeUnitTypeMultiPartition = ImmutableSet.of( + TimestampArithmeticExpr.TimeUnit.HOUR, + TimestampArithmeticExpr.TimeUnit.DAY, + TimestampArithmeticExpr.TimeUnit.WEEK, + TimestampArithmeticExpr.TimeUnit.MONTH, + TimestampArithmeticExpr.TimeUnit.YEAR + ); + + private final Integer maxAllowedLimit = Config.max_multi_partition_num; + + public MultiPartitionDesc(PartitionKeyDesc partitionKeyDesc, + Map properties) throws AnalysisException { + this.partitionKeyDesc = partitionKeyDesc; + this.properties = properties; + this.timeIntervalTrans(); + this.timeTrans(); + } + + public List getSinglePartitionDescList() throws AnalysisException { + if (singlePartitionDescList.size() == 0) { + buildMultiPartitionToSinglePartitionDescs(); + } + return singlePartitionDescList; + } + + private List buildMultiPartitionToSinglePartitionDescs() throws AnalysisException { + String partitionName; + long countNum = 0; + int startDayOfWeek = 1; + int startDayOfMonth = 1; + String partitionPrefix = this.partitionPrefix; + LocalDateTime startTime = this.startTime; + if (properties != null) { + if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_WEEK)) { + String dayOfWeekStr = properties.get(DynamicPartitionProperty.START_DAY_OF_WEEK); + try { + DynamicPartitionUtil.checkStartDayOfWeek(dayOfWeekStr); + } catch (DdlException e) { + throw new AnalysisException(e.getMessage()); + } + startDayOfWeek = Integer.parseInt(dayOfWeekStr); + } + if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_MONTH)) { + String dayOfMonthStr = properties.get(DynamicPartitionProperty.START_DAY_OF_MONTH); + try { + DynamicPartitionUtil.checkStartDayOfMonth(dayOfMonthStr); + } catch (DdlException e) { + throw new AnalysisException(e.getMessage()); + } + startDayOfMonth = Integer.parseInt(dayOfMonthStr); + } + + if (properties.containsKey(DynamicPartitionProperty.CREATE_HISTORY_PARTITION)) { + properties.put(DynamicPartitionProperty.CREATE_HISTORY_PARTITION, "false"); + } + if (properties.containsKey(DynamicPartitionProperty.PREFIX)) { + partitionPrefix = properties.get(DynamicPartitionProperty.PREFIX); + try { + DynamicPartitionUtil.checkPrefix(partitionPrefix); + } catch (DdlException e) { + throw new AnalysisException(e.getMessage()); + } + } + } + WeekFields weekFields = WeekFields.of(DayOfWeek.of(startDayOfWeek), 1); + while (startTime.isBefore(this.endTime)) { + PartitionValue lowerPartitionValue = new PartitionValue(startTime.format(dateTypeFormat())); + switch (this.timeUnitType) { + case HOUR: + partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(HOURS_FORMAT)); + startTime = startTime.plusHours(timeInterval); + break; + case DAY: + partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(DATES_FORMAT)); + startTime = startTime.plusDays(timeInterval); + break; + case WEEK: + LocalDate localDate = LocalDate.of(startTime.getYear(), startTime.getMonthValue(), + startTime.getDayOfMonth()); + int weekOfYear = localDate.get(weekFields.weekOfYear()); + partitionName = String.format("%s%s_%02d", partitionPrefix, + startTime.format(DateTimeFormatter.ofPattern(YEAR_FORMAT)), weekOfYear); + startTime = startTime.with(ChronoField.DAY_OF_WEEK, startDayOfMonth); + startTime = startTime.plusWeeks(timeInterval); + break; + case MONTH: + partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(MONTHS_FORMAT)); + startTime = startTime.withDayOfMonth(startDayOfMonth); + startTime = startTime.plusMonths(timeInterval); + break; + case YEAR: + partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(YEAR_FORMAT)); + startTime = startTime.withDayOfYear(1); + startTime = startTime.plusYears(timeInterval); + break; + default: + throw new AnalysisException("Multi build partition does not support time interval type: " + + this.timeUnitType); + } + if (this.timeUnitType != TimestampArithmeticExpr.TimeUnit.DAY && startTime.isAfter(this.endTime)) { + startTime = this.endTime; + } + PartitionValue upperPartitionValue = new PartitionValue(startTime.format(dateTypeFormat())); + PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed( + Lists.newArrayList(lowerPartitionValue), + Lists.newArrayList(upperPartitionValue) + ); + singlePartitionDescList.add( + new SinglePartitionDesc( + false, + partitionName, + partitionKeyDesc, + properties) + ); + + countNum++; + if (countNum > maxAllowedLimit) { + throw new AnalysisException("The number of Multi partitions too much, should not exceed:" + + maxAllowedLimit); + } + } + return singlePartitionDescList; + } + + private void timeTrans() throws AnalysisException { + + if (partitionKeyDesc.getLowerValues().size() != 1 || partitionKeyDesc.getUpperValues().size() != 1) { + throw new AnalysisException("partition column number in multi partition clause must be one " + + "but START column size is " + partitionKeyDesc.getLowerValues().size() + + ", END column size is " + partitionKeyDesc.getUpperValues().size() + "."); + } + + String startString = partitionKeyDesc.getLowerValues().get(0).getStringValue(); + String endString = partitionKeyDesc.getUpperValues().get(0).getStringValue(); + + try { + this.startDateTimeFormat = dateFormat(this.timeUnitType, startString); + this.endDateTimeFormat = dateFormat(this.timeUnitType, endString); + this.startTime = TimeUtils.formatDateTimeAndFullZero(startString, startDateTimeFormat); + this.endTime = TimeUtils.formatDateTimeAndFullZero(endString, endDateTimeFormat); + } catch (Exception e) { + throw new AnalysisException("Multi build partition START or END time style is illegal."); + } + + if (!this.startTime.isBefore(this.endTime)) { + throw new AnalysisException("Multi build partition start time should less than end time."); + } + } + + + private void timeIntervalTrans() throws AnalysisException { + this.timeInterval = partitionKeyDesc.getTimeInterval(); + String timeType = partitionKeyDesc.getTimeType(); + if (timeType == null) { + throw new AnalysisException("Unknown time interval type for Multi build partition."); + } + if (this.timeInterval <= 0) { + throw new AnalysisException("Multi partition time interval mush be larger than zero."); + } + try { + this.timeUnitType = TimestampArithmeticExpr.TimeUnit.valueOf(timeType); + } catch (Exception e) { + throw new AnalysisException("Multi build partition got an unknow time interval type: " + + timeType); + } + if (!timeUnitTypeMultiPartition.contains(this.timeUnitType)) { + throw new AnalysisException("Multi build partition does not support time interval type: " + + this.timeUnitType); + } + } + + private static DateTimeFormatter dateFormat(TimestampArithmeticExpr.TimeUnit timeUnitType, + String dateTimeStr) throws AnalysisException { + DateTimeFormatter res; + switch (timeUnitType) { + case HOUR: + if (dateTimeStr.length() == 10) { + res = DateTimeFormatter.ofPattern(HOURS_FORMAT); + } else if (dateTimeStr.length() == 13) { + res = DateTimeFormatter.ofPattern(HOUR_FORMAT); + } else if (dateTimeStr.length() == 19) { + res = DateTimeFormatter.ofPattern(DATETIME_FORMAT); + } else { + throw new AnalysisException("can not probe datetime(hour) format:" + dateTimeStr); + } + break; + case DAY: case WEEK: + if (dateTimeStr.length() == 8) { + res = DateTimeFormatter.ofPattern(DATES_FORMAT); + } else if (dateTimeStr.length() == 10) { + res = DateTimeFormatter.ofPattern(DATE_FORMAT); + } else if (dateTimeStr.length() == 19) { + res = DateTimeFormatter.ofPattern(DATETIME_FORMAT); + } else { + throw new AnalysisException("can not probe datetime(day or week) format:" + dateTimeStr); + } + break; + case MONTH: + if (dateTimeStr.length() == 6) { + res = DateTimeFormatter.ofPattern(MONTHS_FORMAT); + } else if (dateTimeStr.length() == 7) { + res = DateTimeFormatter.ofPattern(MONTH_FORMAT); + } else if (dateTimeStr.length() == 10) { + res = DateTimeFormatter.ofPattern(DATE_FORMAT); + } else if (dateTimeStr.length() == 19) { + res = DateTimeFormatter.ofPattern(DATETIME_FORMAT); + } else { + throw new AnalysisException("can not probe datetime(month) format:" + dateTimeStr); + } + break; + case YEAR: + if (dateTimeStr.length() == 4) { + res = DateTimeFormatter.ofPattern(YEAR_FORMAT); + } else if (dateTimeStr.length() == 8) { + res = DateTimeFormatter.ofPattern(DATES_FORMAT); + } else if (dateTimeStr.length() == 10) { + res = DateTimeFormatter.ofPattern(DATE_FORMAT); + } else if (dateTimeStr.length() == 19) { + res = DateTimeFormatter.ofPattern(DATETIME_FORMAT); + } else { + throw new AnalysisException("can not probe datetime(year) format:" + dateTimeStr); + } + break; + default: + throw new AnalysisException("Multi build partition does not support time interval type: " + + timeUnitType); + } + return res; + } + + private DateTimeFormatter dateTypeFormat() { + return DateTimeFormatter.ofPattern(this.timeUnitType.equals(TimeUnit.HOUR) ? DATETIME_FORMAT : DATE_FORMAT); + } + + + private List aaa(int ii) throws AnalysisException { + List res = new ArrayList<>(); + for (int i = 0; i < ii; i++) { + if (ii % 2 == 1) { + res.add(new MultiPartitionDesc(null, null)); + } else { + res.add(new SinglePartitionDesc(true, "-", null, null)); + } + } + return res; + } + +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java index 37130c41eb..8bb42657ae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionDesc.java @@ -43,12 +43,27 @@ public class PartitionDesc { protected PartitionType type; public PartitionDesc(List partitionColNames, - List singlePartitionDescs) { + List allPartitionDescs) throws AnalysisException { this.partitionColNames = partitionColNames; - this.singlePartitionDescs = singlePartitionDescs; - if (this.singlePartitionDescs == null) { - this.singlePartitionDescs = Lists.newArrayList(); + boolean isMultiPartition = false; + List tmpList = Lists.newArrayList(); + if (allPartitionDescs != null) { + for (AllPartitionDesc allPartitionDesc : allPartitionDescs) { + if (allPartitionDesc instanceof SinglePartitionDesc) { + tmpList.add((SinglePartitionDesc) allPartitionDesc); + } else if (allPartitionDesc instanceof MultiPartitionDesc) { + isMultiPartition = true; + List singlePartitionDescList + = ((MultiPartitionDesc) allPartitionDesc).getSinglePartitionDescList(); + tmpList.addAll(singlePartitionDescList); + } + } } + if (isMultiPartition && partitionColNames.size() != 1) { + throw new AnalysisException("multi partition column size except 1 but provided " + + partitionColNames.size() + "."); + } + this.singlePartitionDescs = tmpList; } public List getSinglePartitionDescs() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java index 767e3626b4..28265d2a12 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java @@ -41,6 +41,9 @@ public class PartitionKeyDesc { private List> inValues; private PartitionKeyValueType partitionKeyValueType; + private Long timeInterval; + private String timeType; + public static PartitionKeyDesc createMaxKeyDesc() { return MAX_VALUE; } @@ -71,6 +74,28 @@ public class PartitionKeyDesc { return desc; } + public static PartitionKeyDesc createMultiFixed( + List lowerValues, + List upperValues, + Long timeInterval, + String timeType) { + PartitionKeyDesc desc = new PartitionKeyDesc(); + desc.lowerValues = lowerValues; + desc.upperValues = upperValues; + desc.timeInterval = timeInterval; + desc.timeType = timeType; + desc.partitionKeyValueType = PartitionKeyValueType.FIXED; + return desc; + } + + public Long getTimeInterval() { + return timeInterval; + } + + public String getTimeType() { + return timeType; + } + public List getLowerValues() { return lowerValues; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java index ebcee3c319..3d9cae60e5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RangePartitionDesc.java @@ -32,8 +32,8 @@ import java.util.Map; public class RangePartitionDesc extends PartitionDesc { public RangePartitionDesc(List partitionColNames, - List singlePartitionDescs) { - super(partitionColNames, singlePartitionDescs); + List allPartitionDescs) throws AnalysisException { + super(partitionColNames, allPartitionDescs); type = org.apache.doris.catalog.PartitionType.RANGE; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SinglePartitionDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SinglePartitionDesc.java index f58b8048da..06b4bcfa71 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SinglePartitionDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SinglePartitionDesc.java @@ -32,7 +32,7 @@ import com.google.common.base.Preconditions; import java.util.Map; -public class SinglePartitionDesc { +public class SinglePartitionDesc implements AllPartitionDesc { private boolean isAnalyzed; private boolean ifNotExists; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java index a9c95fe85b..e22913fe52 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java @@ -1474,6 +1474,14 @@ public class Config extends ConfigBase { @ConfField(mutable = true, masterOnly = true) public static int max_dynamic_partition_num = 500; + /** + * Used to limit the maximum number of partitions that can be created when creating multi partition, + * to avoid creating too many partitions at one time. + * The number is determined by "start" and "end" in the multi partition parameters. + */ + @ConfField(mutable = true, masterOnly = true) + public static int max_multi_partition_num = 4096; + /** * Control the max num of backup/restore job per db */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java index ffafe67ac7..e87b48d1a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java @@ -99,7 +99,7 @@ public class DynamicPartitionUtil { } } - private static void checkPrefix(String prefix) throws DdlException { + public static void checkPrefix(String prefix) throws DdlException { try { FeNameFormat.checkPartitionName(prefix); } catch (AnalysisException e) { @@ -157,7 +157,7 @@ public class DynamicPartitionUtil { } } - private static boolean checkCreateHistoryPartition(String create) throws DdlException { + public static boolean checkCreateHistoryPartition(String create) throws DdlException { if (Strings.isNullOrEmpty(create) || (!Boolean.TRUE.toString().equalsIgnoreCase(create) && !Boolean.FALSE.toString().equalsIgnoreCase(create))) { @@ -181,7 +181,7 @@ public class DynamicPartitionUtil { } } - private static void checkStartDayOfMonth(String val) throws DdlException { + public static void checkStartDayOfMonth(String val) throws DdlException { if (Strings.isNullOrEmpty(val)) { throw new DdlException("Invalid properties: " + DynamicPartitionProperty.START_DAY_OF_MONTH); } @@ -197,7 +197,7 @@ public class DynamicPartitionUtil { } } - private static void checkStartDayOfWeek(String val) throws DdlException { + public static void checkStartDayOfWeek(String val) throws DdlException { if (Strings.isNullOrEmpty(val)) { throw new DdlException("Invalid properties: " + DynamicPartitionProperty.START_DAY_OF_WEEK); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java index 4317ca4019..b5dd620f25 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/TimeUtils.java @@ -36,7 +36,13 @@ import java.text.ParseException; import java.text.ParsePosition; import java.text.SimpleDateFormat; import java.time.DateTimeException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; import java.util.Date; import java.util.SimpleTimeZone; import java.util.TimeZone; @@ -277,4 +283,26 @@ public class TimeUtils { } throw new DdlException("Parse time zone " + value + " error"); } + + // format string DateTime And Full Zero for hour,minute,second + public static LocalDateTime formatDateTimeAndFullZero(String datetime, DateTimeFormatter formatter) { + TemporalAccessor temporal = formatter.parse(datetime); + int year = temporal.isSupported(ChronoField.YEAR) + ? temporal.get(ChronoField.YEAR) : 0; + int month = temporal.isSupported(ChronoField.MONTH_OF_YEAR) + ? temporal.get(ChronoField.MONTH_OF_YEAR) : 1; + int day = temporal.isSupported(ChronoField.DAY_OF_MONTH) + ? temporal.get(ChronoField.DAY_OF_MONTH) : 1; + int hour = temporal.isSupported(ChronoField.HOUR_OF_DAY) + ? temporal.get(ChronoField.HOUR_OF_DAY) : 0; + int minute = temporal.isSupported(ChronoField.MINUTE_OF_HOUR) + ? temporal.get(ChronoField.MINUTE_OF_HOUR) : 0; + int second = temporal.isSupported(ChronoField.SECOND_OF_MINUTE) + ? temporal.get(ChronoField.SECOND_OF_MINUTE) : 0; + int milliSecond = temporal.isSupported(ChronoField.MILLI_OF_SECOND) + ? temporal.get(ChronoField.MILLI_OF_SECOND) : 0; + return LocalDateTime.of(LocalDate.of(year, month, day), + LocalTime.of(hour, minute, second, milliSecond * 1000000)); + } + } diff --git a/regression-test/suites/partition_p0/multi_partition/test_multi_partition.groovy b/regression-test/suites/partition_p0/multi_partition/test_multi_partition.groovy new file mode 100644 index 0000000000..3f775143de --- /dev/null +++ b/regression-test/suites/partition_p0/multi_partition/test_multi_partition.groovy @@ -0,0 +1,222 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_multi_partition") { + // todo: test multi partitions : create table partition ... + sql "drop table if exists multi_par" + sql """ + CREATE TABLE IF NOT EXISTS multi_par ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k10) ( + FROM ("2022-12-01") TO ("2022-12-31") INTERVAL 1 DAY + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + List> result1 = sql "show tables like 'multi_par'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + List> result2 = sql "show partitions from multi_par" + logger.info("${result2}") + assertEquals(result2.size(), 30) + sql "drop table multi_par" + + + sql "drop table if exists multi_par1" + sql """ + CREATE TABLE IF NOT EXISTS multi_par1 ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k10) ( + FROM ("2000-11-14") TO ("2021-11-14") INTERVAL 1 YEAR, + FROM ("2021-11-14") TO ("2022-11-14") INTERVAL 1 MONTH, + FROM ("2022-11-14") TO ("2023-01-03") INTERVAL 1 WEEK, + FROM ("2023-01-03") TO ("2023-01-14") INTERVAL 1 DAY, + PARTITION p_20230114 VALUES [('2023-01-14'), ('2023-01-15')) + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + result1 = sql "show tables like 'multi_par1'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + result2 = sql "show partitions from multi_par1" + logger.info("${result2}") + assertEquals(result2.size(), 55) + sql "drop table multi_par1" + + + sql "drop table if exists multi_par2" + sql """ + CREATE TABLE IF NOT EXISTS multi_par2 ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k11) ( + FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR, + FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + result1 = sql "show tables like 'multi_par2'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + result2 = sql "show partitions from multi_par2" + logger.info("${result2}") + assertEquals(result2.size(), 48) + sql "drop table multi_par2" + + + sql "drop table if exists multi_par3" + sql """ + CREATE TABLE IF NOT EXISTS multi_par3 ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k11) ( + FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR, + FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + result1 = sql "show tables like 'multi_par3'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + result2 = sql "show partitions from multi_par3" + logger.info("${result2}") + assertEquals(result2.size(), 48) + sql "drop table multi_par3" + + + sql "drop table if exists multi_par4" + sql """ + CREATE TABLE IF NOT EXISTS multi_par4 ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k12) ( + FROM ("2022-12-01") TO ("2022-12-31") INTERVAL 1 DAY + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + result1 = sql "show tables like 'multi_par4'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + result2 = sql "show partitions from multi_par4" + logger.info("${result2}") + assertEquals(result2.size(), 30) + sql "drop table multi_par4" + + sql "drop table if exists multi_par5" + sql """ + CREATE TABLE IF NOT EXISTS multi_par5 ( + k1 tinyint NOT NULL, + k2 smallint NOT NULL, + k3 int NOT NULL, + k4 bigint NOT NULL, + k5 decimal(9, 3) NOT NULL, + k6 char(5) NOT NULL, + k10 date NOT NULL, + k11 datetime NOT NULL, + k12 datev2 NOT NULL, + k13 datetimev2 NOT NULL, + k14 datetimev2(3) NOT NULL, + k15 datetimev2(6) NOT NULL, + k7 varchar(20) NOT NULL, + k8 double max NOT NULL, + k9 float sum NOT NULL ) + AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7) + PARTITION BY RANGE(k13) ( + FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR, + FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR + ) + DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1") + """ + result1 = sql "show tables like 'multi_par5'" + logger.info("${result1}") + assertEquals(result1.size(), 1) + result2 = sql "show partitions from multi_par5" + logger.info("${result2}") + assertEquals(result2.size(), 48) + sql "drop table multi_par5" +}