diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
index b458dd6e9d..ea8fbdb7b9 100644
--- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
+++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
@@ -88,68 +88,9 @@ TO example_repo
EXCLUDE (example_tbl);
```
-4. Create a warehouse named hdfs_repo, rely on Baidu hdfs broker "hdfs_broker", the data root directory is: hdfs://hadoop-name-node:54310/path/to/repo/
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH BROKER `hdfs_broker`
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
- "username" = "user",
- "password" = "password"
-);
-```
-
-5. Create a repository named s3_repo to link cloud storage directly without going through the broker.
-
-```
-CREATE REPOSITORY `s3_repo`
-WITH S3
-ON LOCATION "s3://s3-repo"
-PROPERTIES
-(
- "AWS_ENDPOINT" = "http://s3-REGION.amazonaws.com",
- "AWS_ACCESS_KEY" = "AWS_ACCESS_KEY",
- "AWS_SECRET_KEY"="AWS_SECRET_KEY",
- "AWS_REGION" = "REGION"
-);
-```
-
-6. Create a repository named hdfs_repo to link HDFS directly without going through the broker.
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH hdfs
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
- "fs.defaultFS"="hdfs://hadoop-name-node:54310",
- "hadoop.username"="user"
-);
-```
-
-7. Create a repository named minio_repo to link minio storage directly through the s3 protocol.
-
-```
-CREATE REPOSITORY `minio_repo`
-WITH S3
-ON LOCATION "s3://minio_repo"
-PROPERTIES
-(
- "AWS_ENDPOINT" = "http://minio.com",
- "AWS_ACCESS_KEY" = "MINIO_USER",
- "AWS_SECRET_KEY"="MINIO_PASSWORD",
- "AWS_REGION" = "REGION",
- "use_path_style" = "true"
-);
-```
-
### Keywords
-```text
BACKUP
-```
### Best Practice
diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
index d94693edca..fc74734e91 100644
--- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
+++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
@@ -139,10 +139,11 @@ PROPERTIES
);
```
-
7. Create a repository named minio_repo via temporary security credentials.
+
+
```
CREATE REPOSITORY `minio_repo`
WITH S3
@@ -157,7 +158,20 @@ PROPERTIES
)
```
-
+8. Create repository using Tencent COS
+
+```
+CREATE REPOSITORY `cos_repo`
+WITH S3
+ON LOCATION "s3://backet1/"
+PROPERTIES
+(
+ "AWS_ACCESS_KEY" = "ak",
+ "AWS_SECRET_KEY" = "sk",
+ "AWS_ENDPOINT" = "http://cos.ap-beijing.myqcloud.com",
+ "AWS_REGION" = "ap-beijing"
+);
+```
### Keywords
diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
index de766279dd..dc652e935d 100644
--- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
+++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
@@ -90,65 +90,6 @@ TO example_repo
EXCLUDE (example_tbl);
```
-4. 创建名为 hdfs_repo 的仓库,依赖 Baidu hdfs broker "hdfs_broker",数据根目录为:hdfs://hadoop-name-node:54310/path/to/repo/
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH BROKER `hdfs_broker`
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
- "username" = "user",
- "password" = "password"
-);
-```
-
-5. 创建名为 s3_repo 的仓库,直接链接云存储,而不通过broker.
-
-```
-CREATE REPOSITORY `s3_repo`
-WITH S3
-ON LOCATION "s3://s3-repo"
-PROPERTIES
-(
- "AWS_ENDPOINT" = "http://s3-REGION.amazonaws.com",
- "AWS_ACCESS_KEY" = "AWS_ACCESS_KEY",
- "AWS_SECRET_KEY"="AWS_SECRET_KEY",
- "AWS_REGION" = "REGION"
-);
-```
-
-6. 创建名为 hdfs_repo 的仓库,直接链接HDFS,而不通过broker.
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH hdfs
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
- "fs.defaultFS"="hdfs://hadoop-name-node:54310",
- "hadoop.username"="user"
-);
-```
-
-7. 创建名为 minio_repo 的仓库,直接通过 s3 协议链接 minio.
-
-```
-CREATE REPOSITORY `minio_repo`
-WITH S3
-ON LOCATION "s3://minio_repo"
-PROPERTIES
-(
- "AWS_ENDPOINT" = "http://minio.com",
- "AWS_ACCESS_KEY" = "MINIO_USER",
- "AWS_SECRET_KEY"="MINIO_PASSWORD",
- "AWS_REGION" = "REGION",
- "use_path_style" = "true"
-);
-```
-
-
-
### Keywords
```text
diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
index a9de18f84c..9a5977d996 100644
--- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
+++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
@@ -136,10 +136,11 @@ PROPERTIES
"use_path_style" = "true"
);
```
-
7. 使用临时秘钥创建名为 minio_repo 的仓库
+
+
```
CREATE REPOSITORY `minio_repo`
WITH S3
@@ -154,7 +155,20 @@ PROPERTIES
)
```
-
+8. 使用腾讯云 COS 创建仓库
+
+```
+CREATE REPOSITORY `cos_repo`
+WITH S3
+ON LOCATION "s3://backet1/"
+PROPERTIES
+(
+ "AWS_ACCESS_KEY" = "ak",
+ "AWS_SECRET_KEY" = "sk",
+ "AWS_ENDPOINT" = "http://cos.ap-beijing.myqcloud.com",
+ "AWS_REGION" = "ap-beijing"
+);
+```
### Keywords
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 7d7351b71c..093dbcdd63 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -1320,8 +1320,13 @@ public class OlapTable extends Table {
}
// remove shadow index from copied table
- List shadowIndex = copied.getPartitions().stream().findFirst()
- .get().getMaterializedIndices(IndexExtState.SHADOW);
+ // NOTICE that there maybe not partition in table.
+ List shadowIndex = Lists.newArrayList();
+ Optional firstPartition = copied.getPartitions().stream().findFirst();
+ if (firstPartition.isPresent()) {
+ shadowIndex = firstPartition.get().getMaterializedIndices(IndexExtState.SHADOW);
+ }
+
for (MaterializedIndex deleteIndex : shadowIndex) {
LOG.debug("copied table delete shadow index : {}", deleteIndex.getId());
copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId()));
diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
index ef60733e73..3a522ad3a6 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
@@ -20,6 +20,7 @@ package org.apache.doris.catalog;
import org.apache.doris.analysis.AlterTableStmt;
import org.apache.doris.analysis.CreateDbStmt;
import org.apache.doris.analysis.CreateTableStmt;
+import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
import org.apache.doris.clone.DynamicPartitionScheduler;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.Config;
@@ -40,6 +41,7 @@ import org.junit.rules.ExpectedException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
+import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
@@ -1537,10 +1539,37 @@ public class DynamicPartitionTableTest {
+ ");";
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
"errCode = 2, detailMessage = Invalid \" dynamic_partition.reserved_history_periods \""
- + " value [2020-01-01,2020-03-01]. "
- + "It must be like "
- + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH "
- + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.",
+ + " value [2020-01-01,2020-03-01]. "
+ + "It must be like "
+ + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit is DAY/WEEK/MONTH "
+ + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd HH:mm:ss],[...,...]\" while time_unit is HOUR.",
() -> createTable(createOlapTblStmt4));
}
+
+ @Test
+ public void testNoPartition() throws AnalysisException {
+ String createOlapTblStmt = "CREATE TABLE test.`no_partition` (\n"
+ + " `k1` datetime NULL COMMENT \"\",\n"
+ + " `k2` int NULL COMMENT \"\",\n"
+ + " `k3` smallint NULL COMMENT \"\",\n"
+ + " `v1` varchar(2048) NULL COMMENT \"\",\n"
+ + " `v2` datetime NULL COMMENT \"\"\n"
+ + ") ENGINE=OLAP\n"
+ + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n"
+ + "COMMENT \"OLAP\"\n"
+ + "PARTITION BY RANGE (k1)()\n"
+ + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n"
+ + "PROPERTIES (\n"
+ + "\"replication_num\" = \"1\"\n"
+ + ");";
+ ExceptionChecker.expectThrowsNoException(() -> createTable(createOlapTblStmt));
+ OlapTable table = (OlapTable) Env.getCurrentInternalCatalog()
+ .getDbOrAnalysisException("default_cluster:test")
+ .getTableOrAnalysisException("no_partition");
+ Collection partitions = table.getPartitions();
+ Assert.assertTrue(partitions.isEmpty());
+ OlapTable copiedTable = table.selectiveCopy(Collections.emptyList(), IndexExtState.VISIBLE, true);
+ partitions = copiedTable.getPartitions();
+ Assert.assertTrue(partitions.isEmpty());
+ }
}