[feature](partition) support new create partition syntax (#13772)

Create partitions use :
```
PARTITION BY RANGE(event_day)(
        FROM ("2000-11-14") TO ("2021-11-14") INTERVAL 1 YEAR,
        FROM ("2021-11-14") TO ("2022-11-14") INTERVAL 1 MONTH,
        FROM ("2022-11-14") TO ("2023-01-03") INTERVAL 1 WEEK,
        FROM ("2023-01-03") TO ("2023-01-14") INTERVAL 1 DAY,
        PARTITION p_20230114 VALUES [('2023-01-14'), ('2023-01-15'))
)

PARTITION BY RANGE(event_time)(
        FROM ("2023-01-03 12") TO ("2023-01-14 22") INTERVAL 1 HOUR
)
```
can create a year/month/week/day/hour's date partitions in a batch,
also it is compatible with the single partitioning method.
This commit is contained in:
catpineapple
2022-11-12 20:52:37 +08:00
committed by GitHub
parent 376b4fda9f
commit beaf2fcaf6
12 changed files with 682 additions and 21 deletions

View File

@ -781,8 +781,10 @@ nonterminal List<PartitionValue> partition_value_list;
nonterminal List<PartitionValue> partition_key_item_list;
nonterminal List<List<PartitionValue>> list_partition_values_list;
nonterminal SinglePartitionDesc single_partition_desc;
nonterminal List<SinglePartitionDesc> opt_single_partition_desc_list;
nonterminal List<SinglePartitionDesc> single_partition_desc_list;
nonterminal List<AllPartitionDesc> opt_all_partition_desc_list;
nonterminal List<AllPartitionDesc> all_partition_desc_list;
nonterminal PartitionKeyDesc fixed_multi_partition_key_desc;
nonterminal MultiPartitionDesc multi_partition_desc;
nonterminal List<AccessPrivilege> privilege_list;
nonterminal List<String> string_list;
@ -2812,13 +2814,13 @@ opt_partition ::=
:}
/* Range partition */
| KW_PARTITION KW_BY KW_RANGE LPAREN ident_list:columns RPAREN
LPAREN opt_single_partition_desc_list:list RPAREN
LPAREN opt_all_partition_desc_list:list RPAREN
{:
RESULT = new RangePartitionDesc(columns, list);
:}
/* List partition */
| KW_PARTITION KW_BY KW_LIST LPAREN ident_list:columns RPAREN
LPAREN opt_single_partition_desc_list:list RPAREN
LPAREN opt_all_partition_desc_list:list RPAREN
{:
RESULT = new ListPartitionDesc(columns, list);
:}
@ -2886,19 +2888,19 @@ opt_keys ::=
:}
;
opt_single_partition_desc_list ::=
opt_all_partition_desc_list ::=
/* Empty */
{:
RESULT = null;
:}
| single_partition_desc_list:list
| all_partition_desc_list:list
{:
RESULT = list;
:}
;
single_partition_desc_list ::=
single_partition_desc_list:list COMMA single_partition_desc:desc
all_partition_desc_list ::=
all_partition_desc_list:list COMMA single_partition_desc:desc
{:
list.add(desc);
RESULT = list;
@ -2907,6 +2909,15 @@ single_partition_desc_list ::=
{:
RESULT = Lists.newArrayList(desc);
:}
| all_partition_desc_list:list COMMA multi_partition_desc:desc
{:
list.add(desc);
RESULT = list;
:}
| multi_partition_desc:desc
{:
RESULT = Lists.newArrayList(desc);
:}
;
single_partition_desc ::=
@ -2928,6 +2939,22 @@ single_partition_desc ::=
:}
;
multi_partition_desc ::=
fixed_multi_partition_key_desc:desc
opt_key_value_map:properties
{:
RESULT = new MultiPartitionDesc(desc, properties);
:}
;
fixed_multi_partition_key_desc ::=
// FROM (lower) TO (upper) INTERVAL time_interval time_type
KW_FROM LPAREN partition_key_list:lower RPAREN KW_TO LPAREN partition_key_list:upper RPAREN KW_INTERVAL INTEGER_LITERAL:time_interval ident:time_unit
{:
RESULT = PartitionKeyDesc.createMultiFixed(lower, upper, time_interval, time_unit);
:}
;
partition_key_desc ::=
KW_MAX_VALUE
{:

View File

@ -0,0 +1,21 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.analysis;
public interface AllPartitionDesc {
}

View File

@ -33,8 +33,8 @@ import java.util.Map;
public class ListPartitionDesc extends PartitionDesc {
public ListPartitionDesc(List<String> partitionColNames,
List<SinglePartitionDesc> singlePartitionDescs) {
super(partitionColNames, singlePartitionDescs);
List<AllPartitionDesc> allPartitionDescs) throws AnalysisException {
super(partitionColNames, allPartitionDescs);
type = PartitionType.LIST;
}

View File

@ -0,0 +1,315 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.analysis;
import org.apache.doris.analysis.TimestampArithmeticExpr.TimeUnit;
import org.apache.doris.catalog.DynamicPartitionProperty;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.Config;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.util.DynamicPartitionUtil;
import org.apache.doris.common.util.TimeUtils;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import java.time.DayOfWeek;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoField;
import java.time.temporal.WeekFields;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
// to describe the key list partition's information in create table stmt
public class MultiPartitionDesc implements AllPartitionDesc {
public static final String HOURS_FORMAT = "yyyyMMddHH";
public static final String HOUR_FORMAT = "yyyy-MM-dd HH";
public static final String DATES_FORMAT = "yyyyMMdd";
public static final String DATE_FORMAT = "yyyy-MM-dd";
public static final String MONTHS_FORMAT = "yyyyMM";
public static final String MONTH_FORMAT = "yyyy-MM";
public static final String YEAR_FORMAT = "yyyy";
public static final String DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
private final String partitionPrefix = "p_";
private LocalDateTime startTime;
private LocalDateTime endTime;
private DateTimeFormatter startDateTimeFormat;
private DateTimeFormatter endDateTimeFormat;
private Long timeInterval;
private final PartitionKeyDesc partitionKeyDesc;
private TimestampArithmeticExpr.TimeUnit timeUnitType;
private final Map<String, String> properties;
private final List<SinglePartitionDesc> singlePartitionDescList = Lists.newArrayList();
private final ImmutableSet<TimestampArithmeticExpr.TimeUnit> timeUnitTypeMultiPartition = ImmutableSet.of(
TimestampArithmeticExpr.TimeUnit.HOUR,
TimestampArithmeticExpr.TimeUnit.DAY,
TimestampArithmeticExpr.TimeUnit.WEEK,
TimestampArithmeticExpr.TimeUnit.MONTH,
TimestampArithmeticExpr.TimeUnit.YEAR
);
private final Integer maxAllowedLimit = Config.max_multi_partition_num;
public MultiPartitionDesc(PartitionKeyDesc partitionKeyDesc,
Map<String, String> properties) throws AnalysisException {
this.partitionKeyDesc = partitionKeyDesc;
this.properties = properties;
this.timeIntervalTrans();
this.timeTrans();
}
public List<SinglePartitionDesc> getSinglePartitionDescList() throws AnalysisException {
if (singlePartitionDescList.size() == 0) {
buildMultiPartitionToSinglePartitionDescs();
}
return singlePartitionDescList;
}
private List<SinglePartitionDesc> buildMultiPartitionToSinglePartitionDescs() throws AnalysisException {
String partitionName;
long countNum = 0;
int startDayOfWeek = 1;
int startDayOfMonth = 1;
String partitionPrefix = this.partitionPrefix;
LocalDateTime startTime = this.startTime;
if (properties != null) {
if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_WEEK)) {
String dayOfWeekStr = properties.get(DynamicPartitionProperty.START_DAY_OF_WEEK);
try {
DynamicPartitionUtil.checkStartDayOfWeek(dayOfWeekStr);
} catch (DdlException e) {
throw new AnalysisException(e.getMessage());
}
startDayOfWeek = Integer.parseInt(dayOfWeekStr);
}
if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_MONTH)) {
String dayOfMonthStr = properties.get(DynamicPartitionProperty.START_DAY_OF_MONTH);
try {
DynamicPartitionUtil.checkStartDayOfMonth(dayOfMonthStr);
} catch (DdlException e) {
throw new AnalysisException(e.getMessage());
}
startDayOfMonth = Integer.parseInt(dayOfMonthStr);
}
if (properties.containsKey(DynamicPartitionProperty.CREATE_HISTORY_PARTITION)) {
properties.put(DynamicPartitionProperty.CREATE_HISTORY_PARTITION, "false");
}
if (properties.containsKey(DynamicPartitionProperty.PREFIX)) {
partitionPrefix = properties.get(DynamicPartitionProperty.PREFIX);
try {
DynamicPartitionUtil.checkPrefix(partitionPrefix);
} catch (DdlException e) {
throw new AnalysisException(e.getMessage());
}
}
}
WeekFields weekFields = WeekFields.of(DayOfWeek.of(startDayOfWeek), 1);
while (startTime.isBefore(this.endTime)) {
PartitionValue lowerPartitionValue = new PartitionValue(startTime.format(dateTypeFormat()));
switch (this.timeUnitType) {
case HOUR:
partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(HOURS_FORMAT));
startTime = startTime.plusHours(timeInterval);
break;
case DAY:
partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(DATES_FORMAT));
startTime = startTime.plusDays(timeInterval);
break;
case WEEK:
LocalDate localDate = LocalDate.of(startTime.getYear(), startTime.getMonthValue(),
startTime.getDayOfMonth());
int weekOfYear = localDate.get(weekFields.weekOfYear());
partitionName = String.format("%s%s_%02d", partitionPrefix,
startTime.format(DateTimeFormatter.ofPattern(YEAR_FORMAT)), weekOfYear);
startTime = startTime.with(ChronoField.DAY_OF_WEEK, startDayOfMonth);
startTime = startTime.plusWeeks(timeInterval);
break;
case MONTH:
partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(MONTHS_FORMAT));
startTime = startTime.withDayOfMonth(startDayOfMonth);
startTime = startTime.plusMonths(timeInterval);
break;
case YEAR:
partitionName = partitionPrefix + startTime.format(DateTimeFormatter.ofPattern(YEAR_FORMAT));
startTime = startTime.withDayOfYear(1);
startTime = startTime.plusYears(timeInterval);
break;
default:
throw new AnalysisException("Multi build partition does not support time interval type: "
+ this.timeUnitType);
}
if (this.timeUnitType != TimestampArithmeticExpr.TimeUnit.DAY && startTime.isAfter(this.endTime)) {
startTime = this.endTime;
}
PartitionValue upperPartitionValue = new PartitionValue(startTime.format(dateTypeFormat()));
PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed(
Lists.newArrayList(lowerPartitionValue),
Lists.newArrayList(upperPartitionValue)
);
singlePartitionDescList.add(
new SinglePartitionDesc(
false,
partitionName,
partitionKeyDesc,
properties)
);
countNum++;
if (countNum > maxAllowedLimit) {
throw new AnalysisException("The number of Multi partitions too much, should not exceed:"
+ maxAllowedLimit);
}
}
return singlePartitionDescList;
}
private void timeTrans() throws AnalysisException {
if (partitionKeyDesc.getLowerValues().size() != 1 || partitionKeyDesc.getUpperValues().size() != 1) {
throw new AnalysisException("partition column number in multi partition clause must be one "
+ "but START column size is " + partitionKeyDesc.getLowerValues().size()
+ ", END column size is " + partitionKeyDesc.getUpperValues().size() + ".");
}
String startString = partitionKeyDesc.getLowerValues().get(0).getStringValue();
String endString = partitionKeyDesc.getUpperValues().get(0).getStringValue();
try {
this.startDateTimeFormat = dateFormat(this.timeUnitType, startString);
this.endDateTimeFormat = dateFormat(this.timeUnitType, endString);
this.startTime = TimeUtils.formatDateTimeAndFullZero(startString, startDateTimeFormat);
this.endTime = TimeUtils.formatDateTimeAndFullZero(endString, endDateTimeFormat);
} catch (Exception e) {
throw new AnalysisException("Multi build partition START or END time style is illegal.");
}
if (!this.startTime.isBefore(this.endTime)) {
throw new AnalysisException("Multi build partition start time should less than end time.");
}
}
private void timeIntervalTrans() throws AnalysisException {
this.timeInterval = partitionKeyDesc.getTimeInterval();
String timeType = partitionKeyDesc.getTimeType();
if (timeType == null) {
throw new AnalysisException("Unknown time interval type for Multi build partition.");
}
if (this.timeInterval <= 0) {
throw new AnalysisException("Multi partition time interval mush be larger than zero.");
}
try {
this.timeUnitType = TimestampArithmeticExpr.TimeUnit.valueOf(timeType);
} catch (Exception e) {
throw new AnalysisException("Multi build partition got an unknow time interval type: "
+ timeType);
}
if (!timeUnitTypeMultiPartition.contains(this.timeUnitType)) {
throw new AnalysisException("Multi build partition does not support time interval type: "
+ this.timeUnitType);
}
}
private static DateTimeFormatter dateFormat(TimestampArithmeticExpr.TimeUnit timeUnitType,
String dateTimeStr) throws AnalysisException {
DateTimeFormatter res;
switch (timeUnitType) {
case HOUR:
if (dateTimeStr.length() == 10) {
res = DateTimeFormatter.ofPattern(HOURS_FORMAT);
} else if (dateTimeStr.length() == 13) {
res = DateTimeFormatter.ofPattern(HOUR_FORMAT);
} else if (dateTimeStr.length() == 19) {
res = DateTimeFormatter.ofPattern(DATETIME_FORMAT);
} else {
throw new AnalysisException("can not probe datetime(hour) format:" + dateTimeStr);
}
break;
case DAY: case WEEK:
if (dateTimeStr.length() == 8) {
res = DateTimeFormatter.ofPattern(DATES_FORMAT);
} else if (dateTimeStr.length() == 10) {
res = DateTimeFormatter.ofPattern(DATE_FORMAT);
} else if (dateTimeStr.length() == 19) {
res = DateTimeFormatter.ofPattern(DATETIME_FORMAT);
} else {
throw new AnalysisException("can not probe datetime(day or week) format:" + dateTimeStr);
}
break;
case MONTH:
if (dateTimeStr.length() == 6) {
res = DateTimeFormatter.ofPattern(MONTHS_FORMAT);
} else if (dateTimeStr.length() == 7) {
res = DateTimeFormatter.ofPattern(MONTH_FORMAT);
} else if (dateTimeStr.length() == 10) {
res = DateTimeFormatter.ofPattern(DATE_FORMAT);
} else if (dateTimeStr.length() == 19) {
res = DateTimeFormatter.ofPattern(DATETIME_FORMAT);
} else {
throw new AnalysisException("can not probe datetime(month) format:" + dateTimeStr);
}
break;
case YEAR:
if (dateTimeStr.length() == 4) {
res = DateTimeFormatter.ofPattern(YEAR_FORMAT);
} else if (dateTimeStr.length() == 8) {
res = DateTimeFormatter.ofPattern(DATES_FORMAT);
} else if (dateTimeStr.length() == 10) {
res = DateTimeFormatter.ofPattern(DATE_FORMAT);
} else if (dateTimeStr.length() == 19) {
res = DateTimeFormatter.ofPattern(DATETIME_FORMAT);
} else {
throw new AnalysisException("can not probe datetime(year) format:" + dateTimeStr);
}
break;
default:
throw new AnalysisException("Multi build partition does not support time interval type: "
+ timeUnitType);
}
return res;
}
private DateTimeFormatter dateTypeFormat() {
return DateTimeFormatter.ofPattern(this.timeUnitType.equals(TimeUnit.HOUR) ? DATETIME_FORMAT : DATE_FORMAT);
}
private List<AllPartitionDesc> aaa(int ii) throws AnalysisException {
List<AllPartitionDesc> res = new ArrayList<>();
for (int i = 0; i < ii; i++) {
if (ii % 2 == 1) {
res.add(new MultiPartitionDesc(null, null));
} else {
res.add(new SinglePartitionDesc(true, "-", null, null));
}
}
return res;
}
}

View File

@ -43,12 +43,27 @@ public class PartitionDesc {
protected PartitionType type;
public PartitionDesc(List<String> partitionColNames,
List<SinglePartitionDesc> singlePartitionDescs) {
List<AllPartitionDesc> allPartitionDescs) throws AnalysisException {
this.partitionColNames = partitionColNames;
this.singlePartitionDescs = singlePartitionDescs;
if (this.singlePartitionDescs == null) {
this.singlePartitionDescs = Lists.newArrayList();
boolean isMultiPartition = false;
List<SinglePartitionDesc> tmpList = Lists.newArrayList();
if (allPartitionDescs != null) {
for (AllPartitionDesc allPartitionDesc : allPartitionDescs) {
if (allPartitionDesc instanceof SinglePartitionDesc) {
tmpList.add((SinglePartitionDesc) allPartitionDesc);
} else if (allPartitionDesc instanceof MultiPartitionDesc) {
isMultiPartition = true;
List<SinglePartitionDesc> singlePartitionDescList
= ((MultiPartitionDesc) allPartitionDesc).getSinglePartitionDescList();
tmpList.addAll(singlePartitionDescList);
}
}
}
if (isMultiPartition && partitionColNames.size() != 1) {
throw new AnalysisException("multi partition column size except 1 but provided "
+ partitionColNames.size() + ".");
}
this.singlePartitionDescs = tmpList;
}
public List<SinglePartitionDesc> getSinglePartitionDescs() {

View File

@ -41,6 +41,9 @@ public class PartitionKeyDesc {
private List<List<PartitionValue>> inValues;
private PartitionKeyValueType partitionKeyValueType;
private Long timeInterval;
private String timeType;
public static PartitionKeyDesc createMaxKeyDesc() {
return MAX_VALUE;
}
@ -71,6 +74,28 @@ public class PartitionKeyDesc {
return desc;
}
public static PartitionKeyDesc createMultiFixed(
List<PartitionValue> lowerValues,
List<PartitionValue> upperValues,
Long timeInterval,
String timeType) {
PartitionKeyDesc desc = new PartitionKeyDesc();
desc.lowerValues = lowerValues;
desc.upperValues = upperValues;
desc.timeInterval = timeInterval;
desc.timeType = timeType;
desc.partitionKeyValueType = PartitionKeyValueType.FIXED;
return desc;
}
public Long getTimeInterval() {
return timeInterval;
}
public String getTimeType() {
return timeType;
}
public List<PartitionValue> getLowerValues() {
return lowerValues;
}

View File

@ -32,8 +32,8 @@ import java.util.Map;
public class RangePartitionDesc extends PartitionDesc {
public RangePartitionDesc(List<String> partitionColNames,
List<SinglePartitionDesc> singlePartitionDescs) {
super(partitionColNames, singlePartitionDescs);
List<AllPartitionDesc> allPartitionDescs) throws AnalysisException {
super(partitionColNames, allPartitionDescs);
type = org.apache.doris.catalog.PartitionType.RANGE;
}

View File

@ -32,7 +32,7 @@ import com.google.common.base.Preconditions;
import java.util.Map;
public class SinglePartitionDesc {
public class SinglePartitionDesc implements AllPartitionDesc {
private boolean isAnalyzed;
private boolean ifNotExists;

View File

@ -1474,6 +1474,14 @@ public class Config extends ConfigBase {
@ConfField(mutable = true, masterOnly = true)
public static int max_dynamic_partition_num = 500;
/**
* Used to limit the maximum number of partitions that can be created when creating multi partition,
* to avoid creating too many partitions at one time.
* The number is determined by "start" and "end" in the multi partition parameters.
*/
@ConfField(mutable = true, masterOnly = true)
public static int max_multi_partition_num = 4096;
/**
* Control the max num of backup/restore job per db
*/

View File

@ -99,7 +99,7 @@ public class DynamicPartitionUtil {
}
}
private static void checkPrefix(String prefix) throws DdlException {
public static void checkPrefix(String prefix) throws DdlException {
try {
FeNameFormat.checkPartitionName(prefix);
} catch (AnalysisException e) {
@ -157,7 +157,7 @@ public class DynamicPartitionUtil {
}
}
private static boolean checkCreateHistoryPartition(String create) throws DdlException {
public static boolean checkCreateHistoryPartition(String create) throws DdlException {
if (Strings.isNullOrEmpty(create)
|| (!Boolean.TRUE.toString().equalsIgnoreCase(create)
&& !Boolean.FALSE.toString().equalsIgnoreCase(create))) {
@ -181,7 +181,7 @@ public class DynamicPartitionUtil {
}
}
private static void checkStartDayOfMonth(String val) throws DdlException {
public static void checkStartDayOfMonth(String val) throws DdlException {
if (Strings.isNullOrEmpty(val)) {
throw new DdlException("Invalid properties: " + DynamicPartitionProperty.START_DAY_OF_MONTH);
}
@ -197,7 +197,7 @@ public class DynamicPartitionUtil {
}
}
private static void checkStartDayOfWeek(String val) throws DdlException {
public static void checkStartDayOfWeek(String val) throws DdlException {
if (Strings.isNullOrEmpty(val)) {
throw new DdlException("Invalid properties: " + DynamicPartitionProperty.START_DAY_OF_WEEK);
}

View File

@ -36,7 +36,13 @@ import java.text.ParseException;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.time.DateTimeException;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalAccessor;
import java.util.Date;
import java.util.SimpleTimeZone;
import java.util.TimeZone;
@ -277,4 +283,26 @@ public class TimeUtils {
}
throw new DdlException("Parse time zone " + value + " error");
}
// format string DateTime And Full Zero for hour,minute,second
public static LocalDateTime formatDateTimeAndFullZero(String datetime, DateTimeFormatter formatter) {
TemporalAccessor temporal = formatter.parse(datetime);
int year = temporal.isSupported(ChronoField.YEAR)
? temporal.get(ChronoField.YEAR) : 0;
int month = temporal.isSupported(ChronoField.MONTH_OF_YEAR)
? temporal.get(ChronoField.MONTH_OF_YEAR) : 1;
int day = temporal.isSupported(ChronoField.DAY_OF_MONTH)
? temporal.get(ChronoField.DAY_OF_MONTH) : 1;
int hour = temporal.isSupported(ChronoField.HOUR_OF_DAY)
? temporal.get(ChronoField.HOUR_OF_DAY) : 0;
int minute = temporal.isSupported(ChronoField.MINUTE_OF_HOUR)
? temporal.get(ChronoField.MINUTE_OF_HOUR) : 0;
int second = temporal.isSupported(ChronoField.SECOND_OF_MINUTE)
? temporal.get(ChronoField.SECOND_OF_MINUTE) : 0;
int milliSecond = temporal.isSupported(ChronoField.MILLI_OF_SECOND)
? temporal.get(ChronoField.MILLI_OF_SECOND) : 0;
return LocalDateTime.of(LocalDate.of(year, month, day),
LocalTime.of(hour, minute, second, milliSecond * 1000000));
}
}

View File

@ -0,0 +1,222 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
suite("test_multi_partition") {
// todo: test multi partitions : create table partition ...
sql "drop table if exists multi_par"
sql """
CREATE TABLE IF NOT EXISTS multi_par (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k10) (
FROM ("2022-12-01") TO ("2022-12-31") INTERVAL 1 DAY
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
List<List<Object>> result1 = sql "show tables like 'multi_par'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
List<List<Object>> result2 = sql "show partitions from multi_par"
logger.info("${result2}")
assertEquals(result2.size(), 30)
sql "drop table multi_par"
sql "drop table if exists multi_par1"
sql """
CREATE TABLE IF NOT EXISTS multi_par1 (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k10) (
FROM ("2000-11-14") TO ("2021-11-14") INTERVAL 1 YEAR,
FROM ("2021-11-14") TO ("2022-11-14") INTERVAL 1 MONTH,
FROM ("2022-11-14") TO ("2023-01-03") INTERVAL 1 WEEK,
FROM ("2023-01-03") TO ("2023-01-14") INTERVAL 1 DAY,
PARTITION p_20230114 VALUES [('2023-01-14'), ('2023-01-15'))
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
result1 = sql "show tables like 'multi_par1'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
result2 = sql "show partitions from multi_par1"
logger.info("${result2}")
assertEquals(result2.size(), 55)
sql "drop table multi_par1"
sql "drop table if exists multi_par2"
sql """
CREATE TABLE IF NOT EXISTS multi_par2 (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k11) (
FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR,
FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
result1 = sql "show tables like 'multi_par2'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
result2 = sql "show partitions from multi_par2"
logger.info("${result2}")
assertEquals(result2.size(), 48)
sql "drop table multi_par2"
sql "drop table if exists multi_par3"
sql """
CREATE TABLE IF NOT EXISTS multi_par3 (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k11) (
FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR,
FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
result1 = sql "show tables like 'multi_par3'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
result2 = sql "show partitions from multi_par3"
logger.info("${result2}")
assertEquals(result2.size(), 48)
sql "drop table multi_par3"
sql "drop table if exists multi_par4"
sql """
CREATE TABLE IF NOT EXISTS multi_par4 (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k12) (
FROM ("2022-12-01") TO ("2022-12-31") INTERVAL 1 DAY
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
result1 = sql "show tables like 'multi_par4'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
result2 = sql "show partitions from multi_par4"
logger.info("${result2}")
assertEquals(result2.size(), 30)
sql "drop table multi_par4"
sql "drop table if exists multi_par5"
sql """
CREATE TABLE IF NOT EXISTS multi_par5 (
k1 tinyint NOT NULL,
k2 smallint NOT NULL,
k3 int NOT NULL,
k4 bigint NOT NULL,
k5 decimal(9, 3) NOT NULL,
k6 char(5) NOT NULL,
k10 date NOT NULL,
k11 datetime NOT NULL,
k12 datev2 NOT NULL,
k13 datetimev2 NOT NULL,
k14 datetimev2(3) NOT NULL,
k15 datetimev2(6) NOT NULL,
k7 varchar(20) NOT NULL,
k8 double max NOT NULL,
k9 float sum NOT NULL )
AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k10,k11,k12,k13,k14,k15,k7)
PARTITION BY RANGE(k13) (
FROM ("2022-12-01 02") TO ("2022-12-02 02") INTERVAL 1 HOUR,
FROM ("2022-12-02 02:00:00") TO ("2022-12-03 02:00:00") INTERVAL 1 HOUR
)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
result1 = sql "show tables like 'multi_par5'"
logger.info("${result1}")
assertEquals(result1.size(), 1)
result2 = sql "show partitions from multi_par5"
logger.info("${result2}")
assertEquals(result2.size(), 48)
sql "drop table multi_par5"
}