Revert "[feature](merge-on-write) enable merge-on-write by default (#27188)" (#28096)

This reverts commit 00c8bab84de8154052f9d323800b436cd0ad36e5.
This commit is contained in:
yiguolei
2023-12-07 11:31:36 +08:00
committed by GitHub
parent 3a7a8bb107
commit 8c79b86f5b
31 changed files with 40 additions and 122 deletions

View File

@ -493,14 +493,12 @@ That is to say, the Merge on Read implementation of the Unique Model is equivale
The Merge on Write implementation of the Unique Model is completely different from that of the Aggregate Model. It can deliver better performance in aggregation queries with primary key limitations.
In Doris 1.2.0, as a new feature, Merge on Write is disabled by default(before version 2.1), and users can enable it by adding the following property:
In Doris 1.2.0, as a new feature, Merge on Write is disabled by default, and users can enable it by adding the following property:
```
"enable_unique_key_merge_on_write" = "true"
```
In Doris 2.1, Merge on Write is enabled by default.
> NOTE:
> 1. It is recommended to use version 1.2.4 or above, as this version has fixed some bugs and stability issues.
> 2. Add the configuration item "disable_storage_page_cache=false" to the be.conf file. Failure to add this configuration item may have a significant impact on data load performance.
@ -545,8 +543,8 @@ On a Unique table with the Merge on Write option enabled, during the import stag
[NOTE]
1. The Merge on Write implementation is disabled by default can only be enabled by specifying a property when creating a new table. Before version 2.1, it's disabled by default. Since version 2.1, it's enabled by default.
2. The old Merge on Read cannot be seamlessly upgraded to the Merge on Write implementation (since they have completely different data organization). If you want to switch to the Merge on Write implementation, you need to manually execute `insert into unique-mow-table select * from source table` to load data to new table.
1. The new Merge on Write implementation is disabled by default, and can only be enabled by specifying a property when creating a new table.
2. The old Merge on Read cannot be seamlessly upgraded to the new implementation (since they have completely different data organization). If you want to switch to the Merge on Write implementation, you need to manually execute `insert into unique-mow- table select * from source table` to load data to new table.
3. The two unique features `delete sign` and `sequence col` of the Unique Model can be used as normal in the new implementation, and their usage remains unchanged.
</version>

View File

@ -383,12 +383,6 @@ Set table properties. The following attributes are currently supported:
`"compression"="zstd"`
* `enable_unique_key_merge_on_write`
<version since="1.2" type="inline"> Wheather the unique table use merge-on-write implementation. </version>
The property is disabled by default before version 2.1 and is enabled by default since version 2.1.
* `light_schema_change`
Whether to use the Light Schema Change optimization.

View File

@ -416,7 +416,7 @@ mysql> select sum_merge(k2) , group_concat_merge(k3)from aggstate where k1 != 2;
在某些多维分析场景下,用户更关注的是如何保证 Key 的唯一性,即如何获得 Primary Key 唯一性约束。
因此,我们引入了 Unique 数据模型。在1.2版本之前,该模型本质上是聚合模型的一个特例,也是一种简化的表结构表示方式。
由于聚合模型的实现方式是读时合并(merge on read),因此在一些聚合查询上性能不佳(参考后续章节[聚合模型的局限性](#聚合模型的局限性)的描述),
由于聚合模型的实现方式是读时合并merge on read),因此在一些聚合查询上性能不佳(参考后续章节[聚合模型的局限性](#聚合模型的局限性)的描述),
在1.2版本我们引入了Unique模型新的实现方式,写时合并(merge on write),通过在写入时做一些额外的工作,实现了最优的查询性能。
写时合并将在未来替换读时合并成为Unique模型的默认实现方式,两者将会短暂的共存一段时间。下面将对两种实现方式分别举例进行说明。
@ -496,14 +496,11 @@ PROPERTIES (
Unique模型的写时合并实现,与聚合模型就是完全不同的两种模型了,查询性能更接近于duplicate模型,在有主键约束需求的场景上相比聚合模型有较大的查询性能优势,尤其是在聚合查询以及需要用索引过滤大量数据的查询中。
在 1.2.0 版本中,作为一个新的feature,写时合并默认关闭(2.1 版本之前),用户可以通过添加下面的property来开启
在 1.2.0 版本中,作为一个新的feature,写时合并默认关闭,用户可以通过添加下面的property来开启
```
"enable_unique_key_merge_on_write" = "true"
```
从 2.1 版本开始,写时合并默认开启。
> 注意:
> 1. 建议使用1.2.4及以上版本,该版本修复了一些bug和稳定性问题
> 2. 在be.conf中添加配置项:disable_storage_page_cache=false。不添加该配置项可能会对数据导入性能产生较大影响
@ -547,8 +544,8 @@ PROPERTIES (
所有被标记删除的数据都会在文件级别被过滤掉,读取出来的数据就都是最新的数据,消除掉了读时合并中的数据聚合过程,并且能够在很多情况下支持多种谓词的下推。因此在许多场景都能带来比较大的性能提升,尤其是在有聚合查询的情况下。
【注意】
1. 要使用Merge-on-write实现的unique表,只能在建表时通过指定property的方式打开。在2.1版本之前该属性默认关闭,从2.1版本开始,该属性默认打开。
2. 旧的Merge-on-read的实现无法无缝升级到Merge-on-write的实现(数据组织方式完全不同),如果需要改为使用写时合并的实现版本,需要手动执行`insert into unique-mow-table select * from source table`.
1. 新的Merge-on-write实现默认关闭,且只能在建表时通过指定property的方式打开。
2. 旧的Merge-on-read的实现无法无缝升级到新版本的实现(数据组织方式完全不同),如果需要改为使用写时合并的实现版本,需要手动执行`insert into unique-mow-table select * from source table`.
3. 在Unique模型上独有的delete sign 和 sequence col,在写时合并的新版实现中仍可以正常使用,用法没有变化。
</version>

View File

@ -366,12 +366,6 @@ UNIQUE KEY(k1, k2)
`"function_column.sequence_type" = 'Date'`
* `enable_unique_key_merge_on_write`
<version since="1.2" type="inline"> unique表是否使用merge on write实现。</version>
该属性在 2.1 版本之前默认关闭,从 2.1 版本开始默认开启。
* `light_schema_change`
<version since="1.2" type="inline"> 是否使用light schema change优化。</version>

View File

@ -423,7 +423,6 @@ public class CreateTableStmt extends DdlStmt {
if (keysDesc.getKeysType() == KeysType.UNIQUE_KEYS) {
enableUniqueKeyMergeOnWrite = false;
if (properties != null) {
properties = PropertyAnalyzer.enableUniqueKeyMergeOnWriteIfNotExists(properties);
// `analyzeXXX` would modify `properties`, which will be used later,
// so we just clone a properties map here.
enableUniqueKeyMergeOnWrite = PropertyAnalyzer.analyzeUniqueKeyMergeOnWrite(

View File

@ -3215,8 +3215,8 @@ public class Env {
sb.append(olapTable.getEstimatePartitionSize()).append("\"");
}
// unique key table with merge on write, always print this property for unique table
if (olapTable.getKeysType() == KeysType.UNIQUE_KEYS) {
// unique key table with merge on write
if (olapTable.getKeysType() == KeysType.UNIQUE_KEYS && olapTable.getEnableUniqueKeyMergeOnWrite()) {
sb.append(",\n\"").append(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE).append("\" = \"");
sb.append(olapTable.getEnableUniqueKeyMergeOnWrite()).append("\"");
}

View File

@ -461,9 +461,6 @@ public class TableProperty implements Writable {
properties.put(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, Boolean.toString(enable));
}
// In order to ensure that unique tables without the `enable_unique_key_merge_on_write` property specified
// before version 2.1 still maintain the merge-on-read implementation after the upgrade, we will keep
// the default value here as false.
public boolean getEnableUniqueKeyMergeOnWrite() {
return Boolean.parseBoolean(properties.getOrDefault(
PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, "false"));

View File

@ -1280,13 +1280,4 @@ public class PropertyAnalyzer {
return properties;
}
// Since we can't change the default value of the property `enable_unique_key_merge_on_write`
// due to backward compatibility, we just explicitly set the value of this property to `true` if
// the user doesn't specify the property in `CreateTableStmt`/`CreateTableInfo`
public static Map<String, String> enableUniqueKeyMergeOnWriteIfNotExists(Map<String, String> properties) {
if (properties != null && properties.get(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE) == null) {
properties.put(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, "true");
}
return properties;
}
}

View File

@ -264,10 +264,6 @@ public class CreateTableInfo {
+ " set 'true' when create olap table by default.");
}
if (keysType.equals(KeysType.UNIQUE_KEYS)) {
properties = PropertyAnalyzer.enableUniqueKeyMergeOnWriteIfNotExists(properties);
}
if (properties != null && properties.containsKey(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE)) {
if (!keysType.equals(KeysType.UNIQUE_KEYS)) {
throw new AnalysisException(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE

View File

@ -536,7 +536,6 @@ public class CreateTableAsSelectStmtTest extends TestWithFeService {
+ "\"min_load_replica_num\" = \"-1\",\n"
+ "\"is_being_synced\" = \"false\",\n"
+ "\"storage_format\" = \"V2\",\n"
+ "\"enable_unique_key_merge_on_write\" = \"true\",\n"
+ "\"light_schema_change\" = \"true\",\n"
+ "\"disable_auto_compaction\" = \"false\",\n"
+ "\"enable_single_replica_compaction\" = \"false\",\n"

View File

@ -140,8 +140,8 @@ public class CreateTableStmtTest {
new KeysDesc(KeysType.UNIQUE_KEYS, colsName), null,
new HashDistributionDesc(10, Lists.newArrayList("col1")), properties, null, "");
stmt.analyze(analyzer);
Assert.assertEquals(col3.getAggregateType(), AggregateType.NONE);
Assert.assertEquals(col4.getAggregateType(), AggregateType.NONE);
Assert.assertEquals(col3.getAggregateType(), AggregateType.REPLACE);
Assert.assertEquals(col4.getAggregateType(), AggregateType.REPLACE);
// clear
cols.remove(col3);
cols.remove(col4);
@ -199,37 +199,6 @@ public class CreateTableStmtTest {
cols.remove(col4);
}
@Test
public void testCreateTableUniqueKeyMoR() throws UserException {
// setup
Map<String, String> properties = new HashMap<>();
properties.put(PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE, "false");
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BIGINT)));
col3.setIsKey(false);
cols.add(col3);
ColumnDef col4 = new ColumnDef("col4", new TypeDef(ScalarType.createType(PrimitiveType.STRING)));
col4.setIsKey(false);
cols.add(col4);
// test merge-on-write
CreateTableStmt stmt1 = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.UNIQUE_KEYS, colsName), null,
new HashDistributionDesc(10, Lists.newArrayList("col3")), properties, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Distribution column[col3] is not key column");
stmt1.analyze(analyzer);
CreateTableStmt stmt2 = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.UNIQUE_KEYS, colsName), null,
new HashDistributionDesc(10, Lists.newArrayList("col3")), properties, null, "");
stmt2.analyze(analyzer);
Assert.assertEquals(col3.getAggregateType(), AggregateType.REPLACE);
Assert.assertEquals(col4.getAggregateType(), AggregateType.REPLACE);
// clear
cols.remove(col3);
cols.remove(col4);
}
@Test
public void testCreateTableDuplicateWithoutKeys() throws UserException {
// setup

View File

@ -213,10 +213,10 @@ public class CreateTableTest extends TestWithFeService {
Assert.assertTrue(tbl8.getColumn("k1").isKey());
Assert.assertTrue(tbl8.getColumn("k2").isKey());
Assert.assertFalse(tbl8.getColumn("v1").isKey());
Assert.assertTrue(tbl8.getColumn(Column.SEQUENCE_COL).getAggregationType() == AggregateType.NONE);
Assert.assertTrue(tbl8.getColumn(Column.SEQUENCE_COL).getAggregationType() == AggregateType.REPLACE);
OlapTable tbl13 = (OlapTable) db.getTableOrDdlException("tbl13");
Assert.assertTrue(tbl13.getColumn(Column.SEQUENCE_COL).getAggregationType() == AggregateType.NONE);
Assert.assertTrue(tbl13.getColumn(Column.SEQUENCE_COL).getAggregationType() == AggregateType.REPLACE);
Assert.assertTrue(tbl13.getColumn(Column.SEQUENCE_COL).getType() == Type.INT);
Assert.assertEquals(tbl13.getSequenceMapCol(), "v1");
}

View File

@ -224,10 +224,10 @@ public class CreateTableCommandTest extends TestWithFeService {
Assertions.assertTrue(tbl8.getColumn("k1").isKey());
Assertions.assertTrue(tbl8.getColumn("k2").isKey());
Assertions.assertFalse(tbl8.getColumn("v1").isKey());
Assertions.assertSame(tbl8.getColumn(Column.SEQUENCE_COL).getAggregationType(), AggregateType.NONE);
Assertions.assertSame(tbl8.getColumn(Column.SEQUENCE_COL).getAggregationType(), AggregateType.REPLACE);
OlapTable tbl13 = (OlapTable) db.getTableOrDdlException("tbl13");
Assertions.assertSame(tbl13.getColumn(Column.SEQUENCE_COL).getAggregationType(), AggregateType.NONE);
Assertions.assertSame(tbl13.getColumn(Column.SEQUENCE_COL).getAggregationType(), AggregateType.REPLACE);
Assertions.assertSame(tbl13.getColumn(Column.SEQUENCE_COL).getType(), Type.INT);
Assertions.assertEquals(tbl13.getSequenceMapCol(), "v1");
}

View File

@ -2,7 +2,7 @@
-- !desc_tb --
user_id LARGEINT No true \N
city VARCHAR(20) Yes true \N
value1 BIGINT Yes false \N NONE
value1 BIGINT Yes false \N REPLACE
-- !select_tb --
1 Beijing 21474836478
@ -10,10 +10,9 @@ value1 BIGINT Yes false \N NONE
-- !desc_tb --
user_id LARGEINT No true \N
city VARCHAR(20) Yes true \N
value1 BIGINT Yes false \N NONE
value2 BIGINT Yes false \N NONE
value1 BIGINT Yes false \N REPLACE
value2 BIGINT Yes false \N REPLACE
-- !select_tb --
1 Beijing 21474836478 \N
2 Beijing 21474836478 21474836478

View File

@ -26,8 +26,7 @@ suite("add_table_policy_by_alter_table") {
UNIQUE KEY(k1)
DISTRIBUTED BY HASH (k1) BUCKETS 3
PROPERTIES(
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false"
"replication_num" = "1"
);
"""
assertEquals(create_table_not_have_policy_result.size(), 1);

View File

@ -29,8 +29,7 @@ suite("create_table_use_policy") {
DISTRIBUTED BY HASH (k1) BUCKETS 3
PROPERTIES(
"storage_policy" = "not_exist_policy",
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false"
"replication_num" = "1"
);
"""
@ -86,8 +85,7 @@ suite("create_table_use_policy") {
DISTRIBUTED BY HASH (k1) BUCKETS 3
PROPERTIES(
"storage_policy" = "test_create_table_use_policy",
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false"
"replication_num" = "1"
);
"""

View File

@ -122,8 +122,7 @@ suite("drop_policy") {
DISTRIBUTED BY HASH (k1) BUCKETS 3
PROPERTIES(
"storage_policy" = "drop_policy_test_has_table_binded",
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false"
"replication_num" = "1"
);
"""

View File

@ -34,7 +34,7 @@ suite("test_unique_table") {
date_value date
)
UNIQUE KEY(k)
DISTRIBUTED BY HASH(k) BUCKETS 5 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "false");
DISTRIBUTED BY HASH(k) BUCKETS 5 properties("replication_num" = "1");
"""
sql "insert into ${tbName} values(0, 1, 'test char', '2000-01-01')"
sql "insert into ${tbName} values(0, 2, 'test int', '2000-02-02')"
@ -55,8 +55,7 @@ suite("test_unique_table") {
UNIQUE KEY(k1)
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"enable_unique_key_merge_on_write" = "false"
"replication_allocation" = "tag.location.default: 1"
);
"""
sql "SET show_hidden_columns=true"

View File

@ -34,8 +34,7 @@ suite("test_unique_table_debug_data") {
distributed by hash(a) buckets 16
properties(
"replication_allocation" = "tag.location.default:1",
"disable_auto_compaction" = "true",
"enable_unique_key_merge_on_write" = "false"
"disable_auto_compaction" = "true"
);
"""

View File

@ -37,7 +37,7 @@ suite("test_unique_table_like") {
ENGINE=OLAP
UNIQUE KEY(k)
DISTRIBUTED BY HASH(k) BUCKETS 5 properties("replication_num" = "1",
"function_column.sequence_type" = "int", "enable_unique_key_merge_on_write" = "false");
"function_column.sequence_type" = "int");
"""
qt_desc_uniq_table "desc ${tbNameA}"
sql """

View File

@ -31,8 +31,7 @@ suite("test_unique_table_new_sequence") {
PROPERTIES (
"function_column.sequence_col" = "v2",
"replication_allocation" = "tag.location.default: 1",
"light_schema_change" = "true",
"enable_unique_key_merge_on_write" = "false"
"light_schema_change" = "true"
);
"""
// test streamload with seq col
@ -146,8 +145,7 @@ suite("test_unique_table_new_sequence") {
PROPERTIES (
"function_column.sequence_col" = "v4",
"replication_allocation" = "tag.location.default: 1",
"light_schema_change" = "true",
"enable_unique_key_merge_on_write" = "false"
"light_schema_change" = "true"
);
"""

View File

@ -127,7 +127,6 @@ suite("test_delete_on_value") {
DISTRIBUTED BY HASH(`x`) BUCKETS 4
PROPERTIES (
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false",
"function_column.sequence_col" = "z"
);"""
sql "insert into ${tableName4} values(1,1,10);"

View File

@ -209,7 +209,7 @@ suite("test_bitmap_index") {
v1 INT
)
UNIQUE KEY(k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "false");
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1");
"""
sql """
@ -313,7 +313,6 @@ suite("test_bitmap_index") {
"dynamic_partition.reserved_history_periods" = "NULL",
"dynamic_partition.storage_policy" = "",
"storage_format" = "V2",
"enable_unique_key_merge_on_write" = "false",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false"

View File

@ -189,7 +189,7 @@ suite("test_bitmap_index", "inverted_index") {
v1 INT
)
UNIQUE KEY(k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11)
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "false");
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1");
"""
sql """

View File

@ -30,7 +30,7 @@ suite ("unique") {
)
unique key (k1,k2,k3)
distributed BY hash(k1) buckets 3
properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "false");
properties("replication_num" = "1");
"""
sql "insert into u_table select 1,1,1,'a';"

View File

@ -31,8 +31,7 @@ suite ("varchar_length") {
DISTRIBUTED BY HASH(vid) BUCKETS AUTO
PROPERTIES
(
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "false"
"replication_num" = "1"
);
"""

View File

@ -33,8 +33,7 @@ suite("test_alter_table_drop_column") {
DISTRIBUTED BY HASH(siteid) BUCKETS 1
PROPERTIES (
"replication_num" = "1",
"bloom_filter_columns" = "pv",
"enable_unique_key_merge_on_write" = "false"
"bloom_filter_columns" = "pv"
);
"""

View File

@ -41,7 +41,7 @@ suite ("test_rename_column") {
`min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
BUCKETS 8
PROPERTIES ( "replication_num" = "1" , "light_schema_change" = "false", "enable_unique_key_merge_on_write" = "false")
PROPERTIES ( "replication_num" = "1" , "light_schema_change" = "false")
"""
qt_desc """ desc ${tableName} """
@ -122,7 +122,7 @@ suite ("test_rename_column") {
`min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
BUCKETS 8
PROPERTIES ( "replication_num" = "1" , "light_schema_change" = "false", "enable_unique_key_merge_on_write" = "false")
PROPERTIES ( "replication_num" = "1" , "light_schema_change" = "false")
"""
test {
sql """ ALTER table ${tableName} RENAME COLUMN date new_date """

View File

@ -41,7 +41,7 @@ suite("test_schema_change") {
PARTITION p201707 VALUES LESS THAN ('2021-12-01')
)
DISTRIBUTED BY HASH(siteid) BUCKETS 5
PROPERTIES("replication_num" = "1", "light_schema_change" = "true", "enable_unique_key_merge_on_write" = "false");
PROPERTIES("replication_num" = "1", "light_schema_change" = "true");
"""
sql """ insert into ${tbName} values('2021-11-01',1,1,'用户A',1),('2021-11-01',1,1,'用户B',1),('2021-11-01',1,1,'用户A',3),('2021-11-02',1,1,'用户A',1),('2021-11-02',1,1,'用户B',1),('2021-11-02',101,112332121,'用户B',112312),('2021-11-02',103,112332211,'用户B',112312); """
sql """ alter table ${tbName} modify column citycode string """

View File

@ -27,8 +27,7 @@ suite("test_uniq_delete_sign_schema_change", "schema_change") {
value3 INT
)
UNIQUE KEY (k1)
DISTRIBUTED BY HASH(k1) BUCKETS 1
properties("replication_num" = "1", "light_schema_change" = "false", "enable_unique_key_merge_on_write" = "false");
DISTRIBUTED BY HASH(k1) BUCKETS 1 properties("replication_num" = "1", "light_schema_change" = "false");
"""
sql "insert into ${tbName1} values(1,1,1,1);"
sql "insert into ${tbName1} values(1,1,1,2);"

View File

@ -29,7 +29,7 @@ suite("test_update_unique", "p0") {
date_value date
)
UNIQUE KEY(k)
DISTRIBUTED BY HASH(k) BUCKETS 5 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "false");
DISTRIBUTED BY HASH(k) BUCKETS 5 properties("replication_num" = "1");
"""
sql "insert into ${tbName1} values(1, 1, 1, '2000-01-01');"
sql "insert into ${tbName1} values(2, 1, 1, '2000-01-01');"
@ -49,12 +49,10 @@ suite("test_update_unique", "p0") {
// test complex update syntax
sql """
create table ${tbName1} (id int, c1 bigint, c2 string, c3 double, c4 date) unique key (id) distributed by hash(id)
properties('replication_num'='1', "enable_unique_key_merge_on_write" = "false");
create table ${tbName1} (id int, c1 bigint, c2 string, c3 double, c4 date) unique key (id) distributed by hash(id) properties('replication_num'='1');
"""
sql """
create table ${tbName2} (id int, c1 bigint, c2 string, c3 double, c4 date) unique key (id) distributed by hash(id)
properties('replication_num'='1', "enable_unique_key_merge_on_write" = "false");
create table ${tbName2} (id int, c1 bigint, c2 string, c3 double, c4 date) unique key (id) distributed by hash(id) properties('replication_num'='1');
"""
sql """
create table ${tbName3} (id int) distributed by hash (id) properties('replication_num'='1');