diff --git a/contrib/mppdb_decoding/mppdb_decoding.cpp b/contrib/mppdb_decoding/mppdb_decoding.cpp
index ab75860da..b5c05f2cb 100644
--- a/contrib/mppdb_decoding/mppdb_decoding.cpp
+++ b/contrib/mppdb_decoding/mppdb_decoding.cpp
@@ -226,8 +226,8 @@ static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id)
}
/* print the tuple 'tuple' into the StringInfo s */
-static void TupleToJsoninfo(
- cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+static void TupleToJsoninfo(Relation relation, cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc,
+ HeapTuple tuple, bool isOld)
{
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
@@ -254,7 +254,7 @@ static void TupleToJsoninfo(
* Don't print system columns, oid will already have been printed if
* present.
*/
- if (attr->attnum < 0)
+ if (attr->attnum < 0 || (isOld && !IsRelationReplidentKey(relation, attr->attnum)))
continue;
Oid typid = attr->atttypid; /* type of current attribute */
@@ -265,8 +265,6 @@ static void TupleToJsoninfo(
} else {
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (isnull && skip_nulls)
- continue;
/* print attribute name */
@@ -355,53 +353,53 @@ static void pg_decode_change(
case REORDER_BUFFER_CHANGE_INSERT:
op_type = cJSON_CreateString("INSERT");
if (change->data.tp.newtuple != NULL) {
- TupleToJsoninfo(
- columns_name, columns_type, columns_val, tupdesc, &change->data.tp.newtuple->tuple, false);
+ TupleToJsoninfo(relation, columns_name, columns_type, columns_val, tupdesc,
+ &change->data.tp.newtuple->tuple, false);
}
break;
case REORDER_BUFFER_CHANGE_UPDATE:
op_type = cJSON_CreateString("UPDATE");
if (change->data.tp.oldtuple != NULL) {
- TupleToJsoninfo(
- old_keys_name, old_keys_type, old_keys_val, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ TupleToJsoninfo(relation, old_keys_name, old_keys_type, old_keys_val, tupdesc,
+ &change->data.tp.oldtuple->tuple, true);
}
if (change->data.tp.newtuple != NULL) {
- TupleToJsoninfo(
- columns_name, columns_type, columns_val, tupdesc, &change->data.tp.newtuple->tuple, false);
+ TupleToJsoninfo(relation, columns_name, columns_type, columns_val, tupdesc,
+ &change->data.tp.newtuple->tuple, false);
}
break;
case REORDER_BUFFER_CHANGE_DELETE:
op_type = cJSON_CreateString("DELETE");
if (change->data.tp.oldtuple != NULL) {
- TupleToJsoninfo(
- old_keys_name, old_keys_type, old_keys_val, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ TupleToJsoninfo(relation, old_keys_name, old_keys_type, old_keys_val, tupdesc,
+ &change->data.tp.oldtuple->tuple, true);
}
/* if there was no PK, we only know that a delete happened */
break;
case REORDER_BUFFER_CHANGE_UINSERT:
op_type = cJSON_CreateString("INSERT");
if (change->data.utp.newtuple != NULL) {
- TupleToJsoninfo(columns_name, columns_type, columns_val, tupdesc,
+ TupleToJsoninfo(relation, columns_name, columns_type, columns_val, tupdesc,
(HeapTuple)(&change->data.utp.newtuple->tuple), false);
}
break;
case REORDER_BUFFER_CHANGE_UDELETE:
op_type = cJSON_CreateString("DELETE");
if (change->data.utp.oldtuple != NULL) {
- TupleToJsoninfo(old_keys_name, old_keys_type, old_keys_val, tupdesc,
+ TupleToJsoninfo(relation, old_keys_name, old_keys_type, old_keys_val, tupdesc,
(HeapTuple)(&change->data.utp.oldtuple->tuple), true);
}
break;
case REORDER_BUFFER_CHANGE_UUPDATE:
op_type = cJSON_CreateString("UPDATE");
if (change->data.utp.oldtuple != NULL) {
- TupleToJsoninfo(old_keys_name, old_keys_type, old_keys_val, tupdesc,
+ TupleToJsoninfo(relation, old_keys_name, old_keys_type, old_keys_val, tupdesc,
(HeapTuple)(&change->data.utp.oldtuple->tuple), true);
}
if (change->data.utp.newtuple != NULL) {
- TupleToJsoninfo(columns_name, columns_type, columns_val, tupdesc,
+ TupleToJsoninfo(relation, columns_name, columns_type, columns_val, tupdesc,
(HeapTuple)&change->data.utp.newtuple->tuple, false);
}
break;
diff --git a/contrib/pagehack/pagehack.cpp b/contrib/pagehack/pagehack.cpp
index f9b32e6a7..0ffc38ac8 100644
--- a/contrib/pagehack/pagehack.cpp
+++ b/contrib/pagehack/pagehack.cpp
@@ -3160,8 +3160,9 @@ static int parse_page_file(const char *filename, SegmentType type, const uint32
char decompressed[BLCKSZ];
while (start < number) {
size_t compressedSize = pageCompression->ReadCompressedBuffer(start, compressed, BLCKSZ);
- if (compressedSize == 0) {
- fprintf(stderr, "read block %u failed, filename: %s: %s\n", start, filename, strerror(errno));
+ if (compressedSize > MIN_COMPRESS_ERROR_RT) {
+ fprintf(stderr, "read block %u failed, filename: %s: code: %lu %s\n",
+ start, filename, compressedSize, strerror(errno));
delete pageCompression;
return false;
}
@@ -5435,6 +5436,7 @@ static void fill_filenode_map(char** class_map)
{"pg_cast_source_target_index", 2661},
{"pg_class_oid_index", 2662},
{"pg_class_relname_nsp_index", 2663},
+ {"pg_collation_enc_def_index", 3147},
{"pg_collation_name_enc_nsp_index", 3164},
{"pg_collation_oid_index", 3085},
{"pg_constraint_conname_nsp_index", 2664},
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 7b5d4b2e8..730578aae 100755
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -1394,18 +1394,18 @@ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
-- case when expr
explain (verbose, costs off) select * from ft1 where case c1 when 1 then 0 end = 0;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
Foreign Scan on public.ft1
Output: c1, c2, c3, c4, c5, c6, c7, c8
Node ID: 1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END) = 0::numeric))
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0 ELSE NULL::integer END) = 0))
FDW remote plans:
- Node 1: EXPLAIN (VERBOSE ON, COSTS OFF) SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END) = 0::numeric))
+ Node 1: EXPLAIN (VERBOSE ON, COSTS OFF) SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0 ELSE NULL::integer END) = 0))
Seq Scan on "S 1"."T 1"
Output: "C 1", c2, c3, c4, c5, c6, c7, c8
- Filter: (CASE "T 1"."C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END = 0::numeric)
+ Filter: (CASE "T 1"."C 1" WHEN 1 THEN 0 ELSE NULL::integer END = 0)
(11 rows)
diff --git a/contrib/postgres_fdw/expected/postgres_fdw_cstore.out b/contrib/postgres_fdw/expected/postgres_fdw_cstore.out
index 11fe1b520..af155fac4 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw_cstore.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw_cstore.out
@@ -1365,20 +1365,20 @@ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
-- case when expr
explain (verbose, costs off) select * from ft1 where case c1 when 1 then 0 end = 0;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
Foreign Scan on public.ft1
Output: c1, c2, c3, c4, c5, c6, c7, c8
Node ID: 1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END) = 0::numeric))
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0 ELSE NULL::integer END) = 0))
FDW remote plans:
- Node 1: EXPLAIN (VERBOSE ON, COSTS OFF) SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END) = 0::numeric))
+ Node 1: EXPLAIN (VERBOSE ON, COSTS OFF) SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((CASE "C 1" WHEN 1 THEN 0 ELSE NULL::integer END) = 0))
Row Adapter
Output: "C 1", c2, c3, c4, c5, c6, c7, c8
-> CStore Scan on "S 1"."T 1"
Output: "C 1", c2, c3, c4, c5, c6, c7, c8
- Filter: (CASE "T 1"."C 1" WHEN 1 THEN 0::numeric ELSE NULL::numeric END = 0::numeric)
+ Filter: (CASE "T 1"."C 1" WHEN 1 THEN 0 ELSE NULL::integer END = 0)
(13 rows)
diff --git a/contrib/postgres_fdw/expected/postgres_fdw_partition.out b/contrib/postgres_fdw/expected/postgres_fdw_partition.out
index 5925bd6a2..f0476e43c 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw_partition.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw_partition.out
@@ -252,12 +252,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM ft6; --err
SELECT * FROM ft6;
c1 | c2 | c3
----+-----+--------
- 12 | 13 | AAA012
- 18 | 19 | AAA018
- 27 | 28 | AAA027
- 39 | 40 | AAA039
- 45 | 46 | AAA045
- 48 | 49 | AAA048
3 | 4 | AAA003
6 | 7 | AAA006
9 | 10 | AAA009
@@ -268,12 +262,12 @@ SELECT * FROM ft6;
33 | 34 | AAA033
36 | 37 | AAA036
42 | 43 | AAA042
- 51 | 52 | AAA051
- 63 | 64 | AAA063
- 66 | 67 | AAA066
- 69 | 70 | AAA069
- 75 | 76 | AAA075
- 84 | 85 | AAA084
+ 12 | 13 | AAA012
+ 18 | 19 | AAA018
+ 27 | 28 | AAA027
+ 39 | 40 | AAA039
+ 45 | 46 | AAA045
+ 48 | 49 | AAA048
54 | 55 | AAA054
57 | 58 | AAA057
60 | 61 | AAA060
@@ -285,6 +279,12 @@ SELECT * FROM ft6;
93 | 94 | AAA093
96 | 97 | AAA096
99 | 100 | AAA099
+ 51 | 52 | AAA051
+ 63 | 64 | AAA063
+ 66 | 67 | AAA066
+ 69 | 70 | AAA069
+ 75 | 76 | AAA075
+ 84 | 85 | AAA084
(33 rows)
SHOW sql_beta_feature;
@@ -313,12 +313,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM ft6; --suc
SELECT * FROM ft6;
c1 | c2 | c3
----+-----+--------
- 12 | 13 | AAA012
- 18 | 19 | AAA018
- 27 | 28 | AAA027
- 39 | 40 | AAA039
- 45 | 46 | AAA045
- 48 | 49 | AAA048
3 | 4 | AAA003
6 | 7 | AAA006
9 | 10 | AAA009
@@ -329,12 +323,12 @@ SELECT * FROM ft6;
33 | 34 | AAA033
36 | 37 | AAA036
42 | 43 | AAA042
- 51 | 52 | AAA051
- 63 | 64 | AAA063
- 66 | 67 | AAA066
- 69 | 70 | AAA069
- 75 | 76 | AAA075
- 84 | 85 | AAA084
+ 12 | 13 | AAA012
+ 18 | 19 | AAA018
+ 27 | 28 | AAA027
+ 39 | 40 | AAA039
+ 45 | 46 | AAA045
+ 48 | 49 | AAA048
54 | 55 | AAA054
57 | 58 | AAA057
60 | 61 | AAA060
@@ -346,6 +340,12 @@ SELECT * FROM ft6;
93 | 94 | AAA093
96 | 97 | AAA096
99 | 100 | AAA099
+ 51 | 52 | AAA051
+ 63 | 64 | AAA063
+ 66 | 67 | AAA066
+ 69 | 70 | AAA069
+ 75 | 76 | AAA075
+ 84 | 85 | AAA084
(33 rows)
-- ======================================================================================================================================
@@ -1931,10 +1931,10 @@ SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE
SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1) WHERE t1.c1 < 10;
c1 | c2 | c1 | c2
----+----+----+----
+ 4 | 5 | |
2 | 3 | |
6 | 7 | 6 | 7
8 | 9 | |
- 4 | 5 | |
(4 rows)
-- clauses within the nullable side are not pulled up, but the top level clause
@@ -1980,10 +1980,10 @@ SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE
WHERE (t2.c1 < 10 OR t2.c1 IS NULL) AND t1.c1 < 10;
c1 | c2 | c1 | c2
----+----+----+----
+ 4 | 5 | |
2 | 3 | |
6 | 7 | 6 | 7
8 | 9 | |
- 4 | 5 | |
(4 rows)
-- right outer join
@@ -2492,7 +2492,7 @@ SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNE
-> Partitioned Index Scan using t3_pkey on "S 1"."T 3"
Output: "T 3".c1, "T 3".ctid, "T 3".tableoid
Index Cond: ("T 3".c1 = 50)
- Selected Partitions: 1
+ Selected Partitions: 2
-> Hash Full Join
Output: ft4.c1, ft4.*, ft5.c1, ft5.*
Hash Cond: (ft4.c1 = ft5.c1)
@@ -4058,9 +4058,9 @@ SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT
SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT JOIN ft2 ON (ft1.c1 = ft2.c1) WHERE ft1.c1 = 12) q(a, b, c) ON (ft4.c1 = q.b) WHERE ft4.c1 BETWEEN 10 AND 15;
c1 | a | b | c
----+----+----+----
- 12 | 13 | 12 | 12
10 | | |
14 | | |
+ 12 | 13 | 12 | 12
(3 rows)
-- join with nullable side with some columns with null values
@@ -9636,12 +9636,6 @@ EXPLAIN (VERBOSE, COSTS OFF)select * from ftx;
select * from ftx;
c1 | c2 | c3
----+-----+--------
- 12 | 13 | AAA012
- 39 | 40 | AAA039
- 48 | 49 | AAA048
- 18 | 19 |
- 27 | 28 |
- 45 | 46 |
3 | 4 | AAA003
6 | 7 | AAA006
15 | 16 | AAA015
@@ -9652,12 +9646,12 @@ select * from ftx;
42 | 43 | AAA042
9 | 10 |
36 | 37 |
- 51 | 52 | AAA051
- 66 | 67 | AAA066
- 69 | 70 | AAA069
- 75 | 76 | AAA075
- 84 | 85 | AAA084
- 63 | 64 |
+ 12 | 13 | AAA012
+ 39 | 40 | AAA039
+ 48 | 49 | AAA048
+ 18 | 19 |
+ 27 | 28 |
+ 45 | 46 |
57 | 58 | AAA057
60 | 61 | AAA060
78 | 79 | AAA078
@@ -9669,6 +9663,12 @@ select * from ftx;
81 | 82 |
90 | 91 |
99 | 100 |
+ 51 | 52 | AAA051
+ 66 | 67 | AAA066
+ 69 | 70 | AAA069
+ 75 | 76 | AAA075
+ 84 | 85 | AAA084
+ 63 | 64 |
(33 rows)
EXPLAIN (VERBOSE, COSTS OFF)select * from ftx partition(ptb1); --ERR
diff --git a/contrib/security_plugin/gs_policy_plugin.cpp b/contrib/security_plugin/gs_policy_plugin.cpp
index 53d51c0cc..f06a1963e 100644
--- a/contrib/security_plugin/gs_policy_plugin.cpp
+++ b/contrib/security_plugin/gs_policy_plugin.cpp
@@ -515,30 +515,6 @@ void set_result_set_function(const PolicyLabelItem &func)
}
}
-/*
- * check exchange partition list contains masked table.
- * For given AlterTableCmd list, check whether ordinary
- * tables have bound masking policies.
- */
-static bool exchange_partition_with_masked_table(List *cmds)
-{
- if (cmds == NIL) {
- return false;
- }
- ListCell *lc = NULL;
- foreach (lc, cmds) {
- AlterTableCmd *cmd = (AlterTableCmd*)lfirst(lc);
- if (cmd->subtype == AT_ExchangePartition && cmd->exchange_with_rel != NULL) {
- Oid relid = RangeVarGetRelid(cmd->exchange_with_rel, NoLock, true);
- if (is_masked_relation(relid)) {
- return true;
- }
- }
- }
-
- return false;
-}
-
static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query)
{
/* do nothing when enable_security_policy is off */
@@ -652,9 +628,24 @@ static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query)
/* For ALTER TABLE EXCHANGE, will not allowed if the ordinary table(it's columns) has masking policy.*/
if (query->utilityStmt != NULL && nodeTag(query->utilityStmt) == T_AlterTableStmt) {
AlterTableStmt *alter_table = (AlterTableStmt *)(query->utilityStmt);
- if (exchange_partition_with_masked_table(alter_table->cmds)) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("ALTER TABLE EXCHANGE can not execute with masked ordinary table.")));
+ Oid relid = RangeVarGetRelid(alter_table->relation, NoLock, true);
+
+ ListCell *lc = NULL;
+ foreach (lc, alter_table->cmds) {
+ AlterTableCmd *cmd = (AlterTableCmd *)lfirst(lc);
+ if (cmd->subtype == AT_ExchangePartition) {
+ Assert(PointerIsValid(cmd->exchange_with_rel));
+ if (is_masked_relation(relid)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ALTER TABLE EXCHANGE can not execute with masked partition table.")));
+ }
+
+ Oid ordTableOid = RangeVarGetRelid(cmd->exchange_with_rel, NoLock, true);
+ if (is_masked_relation(ordTableOid)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ALTER TABLE EXCHANGE can not execute with masked ordinary table.")));
+ }
+ }
}
}
break;
diff --git a/contrib/sql_decoding/sql_decoding.cpp b/contrib/sql_decoding/sql_decoding.cpp
index 890a67a3d..065d8466f 100644
--- a/contrib/sql_decoding/sql_decoding.cpp
+++ b/contrib/sql_decoding/sql_decoding.cpp
@@ -286,7 +286,7 @@ static void print_literal(StringInfo s, Oid typid, char* outputstr)
/*
* Decode tuple into stringinfo.
*/
-static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple)
{
Assert(tuple != NULL);
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
@@ -315,10 +315,6 @@ static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple,
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (skip_nulls && isnull) {
- continue;
- }
-
/* query output function */
Oid typid = attr->atttypid; /* type of current attribute */
getTypeOutputInfo(typid, &typoutput, &typisvarlena);
@@ -343,7 +339,7 @@ static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple,
* Decode tuple into stringinfo.
* This function is used for UPDATE or DELETE statements.
*/
-static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+static void TupleToStringinfoUpd(Relation relation, StringInfo s, TupleDesc tupdesc, HeapTuple tuple)
{
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
@@ -360,7 +356,7 @@ static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tupl
Form_pg_attribute attr = &tupdesc->attrs[natt]; /* the attribute itself */
- if (attr->attisdropped || attr->attnum < 0) {
+ if (attr->attisdropped || attr->attnum < 0 || !IsRelationReplidentKey(relation, attr->attnum)) {
continue;
}
@@ -371,10 +367,6 @@ static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tupl
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (isnull && skip_nulls) {
- continue;
- }
-
if (!isFirstAtt) {
appendStringInfoString(s, " and ");
} else {
@@ -403,31 +395,32 @@ static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tupl
* Callback for handle decoded tuple.
* Additional info will be added if the tuple is found null.
*/
-static void TupleHandler(StringInfo s, TupleDesc tupdesc, ReorderBufferChange* change, bool isHeap, bool isNewTuple)
+static void TupleHandler(Relation relation, StringInfo s, TupleDesc tupdesc, ReorderBufferChange* change,
+ bool isHeap, bool isNewTuple)
{
if (isHeap && isNewTuple) {
if (change->data.tp.newtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
- TupleToStringinfo(s, tupdesc, &change->data.tp.newtuple->tuple, false);
+ TupleToStringinfo(s, tupdesc, &change->data.tp.newtuple->tuple);
}
} else if (isHeap && !isNewTuple) {
if (change->data.tp.oldtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
- TupleToStringinfoUpd(s, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ TupleToStringinfoUpd(relation, s, tupdesc, &change->data.tp.oldtuple->tuple);
}
} else if (!isHeap && isNewTuple) {
if (change->data.utp.newtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
- TupleToStringinfo(s, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple), false);
+ TupleToStringinfo(s, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple));
}
} else {
if (change->data.utp.oldtuple == NULL) {
appendStringInfoString(s, " (no-tuple-data)");
} else {
- TupleToStringinfoUpd(s, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple), true);
+ TupleToStringinfoUpd(relation, s, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple));
}
}
}
@@ -472,7 +465,7 @@ static void pg_decode_change(
isHeap = false;
}
appendStringInfoString(ctx->out, " values ");
- TupleHandler(ctx->out, tupdesc, change, isHeap, true);
+ TupleHandler(relation, ctx->out, tupdesc, change, isHeap, true);
break;
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_UUPDATE:
@@ -483,12 +476,12 @@ static void pg_decode_change(
isHeap = false;
}
appendStringInfoString(ctx->out, " where ");
- TupleHandler(ctx->out, tupdesc, change, isHeap, false);
+ TupleHandler(relation, ctx->out, tupdesc, change, isHeap, false);
appendStringInfoChar(ctx->out, ';');
appendStringInfoString(ctx->out, "insert into ");
appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table));
appendStringInfoString(ctx->out, " values ");
- TupleHandler(ctx->out, tupdesc, change, isHeap, true);
+ TupleHandler(relation, ctx->out, tupdesc, change, isHeap, true);
break;
@@ -502,7 +495,7 @@ static void pg_decode_change(
}
appendStringInfoString(ctx->out, " where ");
- TupleHandler(ctx->out, tupdesc, change, isHeap, false);
+ TupleHandler(relation, ctx->out, tupdesc, change, isHeap, false);
break;
default:
Assert(false);
diff --git a/contrib/test_decoding/test_decoding.cpp b/contrib/test_decoding/test_decoding.cpp
index 4a3107c74..8f5b377b7 100644
--- a/contrib/test_decoding/test_decoding.cpp
+++ b/contrib/test_decoding/test_decoding.cpp
@@ -199,15 +199,13 @@ static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id)
return true;
return false;
}
-static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+static void tuple_to_stringinfo(Relation relation, StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool isOld)
{
- if (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data))
+ if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
+ (int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
return;
+ }
- if ((int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) != tupdesc->natts)
- return;
-
- int natt;
Oid oid;
/* print oid of tuple, it's not included in the TupleDesc */
@@ -216,9 +214,8 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple
}
/* print all columns individually */
- for (natt = 0; natt < tupdesc->natts; natt++) {
+ for (int natt = 0; natt < tupdesc->natts; natt++) {
Form_pg_attribute attr; /* the attribute itself */
- Oid typid; /* type of current attribute */
Oid typoutput; /* output function */
bool typisvarlena = false;
Datum origval; /* possibly toasted Datum */
@@ -237,16 +234,17 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple
* Don't print system columns, oid will already have been printed if
* present.
*/
- if (attr->attnum < 0)
+ if (attr->attnum < 0 || (isOld && !IsRelationReplidentKey(relation, attr->attnum)))
continue;
- typid = attr->atttypid;
+ Oid typid = attr->atttypid; /* type of current attribute */
/* get Datum from tuple */
- origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull);
-
- if (isnull && skip_nulls)
- continue;
+ if (tuple->tupTableType == HEAP_TUPLE) {
+ origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull);
+ } else {
+ origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
+ }
/* print attribute name */
appendStringInfoChar(s, ' ');
@@ -327,20 +325,40 @@ static void pg_decode_change(
if (change->data.tp.newtuple == NULL)
appendStringInfoString(ctx->out, " (no-tuple-data)");
else
- tuple_to_stringinfo(ctx->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ break;
+ case REORDER_BUFFER_CHANGE_UINSERT:
+ appendStringInfoString(ctx->out, " INSERT:");
+ if (change->data.utp.newtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ else
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple), false);
break;
case REORDER_BUFFER_CHANGE_UPDATE:
appendStringInfoString(ctx->out, " UPDATE:");
if (change->data.tp.oldtuple != NULL) {
appendStringInfoString(ctx->out, " old-key:");
- tuple_to_stringinfo(ctx->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
appendStringInfoString(ctx->out, " new-tuple:");
}
if (change->data.tp.newtuple == NULL)
appendStringInfoString(ctx->out, " (no-tuple-data)");
else
- tuple_to_stringinfo(ctx->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ break;
+ case REORDER_BUFFER_CHANGE_UUPDATE:
+ appendStringInfoString(ctx->out, " UPDATE:");
+ if (change->data.utp.oldtuple != NULL) {
+ appendStringInfoString(ctx->out, " old-key:");
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple), true);
+ appendStringInfoString(ctx->out, " new-tuple:");
+ }
+
+ if (change->data.utp.newtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ else
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple), false);
break;
case REORDER_BUFFER_CHANGE_DELETE:
appendStringInfoString(ctx->out, " DELETE:");
@@ -350,7 +368,15 @@ static void pg_decode_change(
appendStringInfoString(ctx->out, " (no-tuple-data)");
/* In DELETE, only the replica identity is present; display that */
else
- tuple_to_stringinfo(ctx->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ break;
+ case REORDER_BUFFER_CHANGE_UDELETE:
+ appendStringInfoString(ctx->out, " DELETE:");
+
+ if (change->data.utp.oldtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ else
+ tuple_to_stringinfo(relation, ctx->out, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple), true);
break;
default:
Assert(false);
diff --git a/doc/src/sgml/ref/alter_schema.sgmlin b/doc/src/sgml/ref/alter_schema.sgmlin
index c6f58cc53..b2937f349 100644
--- a/doc/src/sgml/ref/alter_schema.sgmlin
+++ b/doc/src/sgml/ref/alter_schema.sgmlin
@@ -15,6 +15,9 @@ ALTER SCHEMA schema_name
ALTER SCHEMA schema_name
OWNER TO new_owner;
ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN;
+ALTER SCHEMA schema_name
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ];
+NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
\ No newline at end of file
diff --git a/doc/src/sgml/ref/alter_table.sgmlin b/doc/src/sgml/ref/alter_table.sgmlin
index aae4fa582..a6ea942da 100755
--- a/doc/src/sgml/ref/alter_table.sgmlin
+++ b/doc/src/sgml/ref/alter_table.sgmlin
@@ -53,11 +53,17 @@ column_clause
| ENCRYPTION KEY ROTATION
| AUTO_INCREMENT [ = ] value
| ALTER INDEX index_name [ VISBLE | INVISIBLE ]
+ | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]
+ | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ]
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database!
where column_clause can be:
-ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
+ADD [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] [ FIRST | AFTER column_name ]
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
+ | MODIFY [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
+ | CHANGE [ COLUMN ] column_name new_column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
| DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ]
| ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ]
| ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT }
@@ -68,20 +74,23 @@ ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ c
| ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] )
| ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] )
| ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
+NOTICE: 'MODIFY [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'CHANGE [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where table_constraint can be:
@@ -102,11 +111,14 @@ where table_constraint_using_index can be:
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ FIRST | AFTER column_name ]' clause is only available in CENTRALIZED mode!
+NOTICE: '[ FIRST | AFTER column_name ]' in 'MODIFY | CHANGE [ COLUMN ] ...' clause is only available in B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET charset ]' is only available in CENTRALIZED mode and B-format database!
NOTICE: '[ VISIBLE | INVISIBLE ]' is only avaliable in CENTRALIZED mode and B-format database!
diff --git a/doc/src/sgml/ref/alter_table_partition.sgmlin b/doc/src/sgml/ref/alter_table_partition.sgmlin
index 88892afe6..1d34bf04c 100644
--- a/doc/src/sgml/ref/alter_table_partition.sgmlin
+++ b/doc/src/sgml/ref/alter_table_partition.sgmlin
@@ -11,9 +11,7 @@
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
- action [, ... ];
-ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
- RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name;
+ { action [, ... ] | rename_clause | reset_clause };
where action can be:
move_clause |
@@ -30,16 +28,16 @@ MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tab
where exchange_clause can be:
EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) }
WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )}
- [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ]
+ [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ] [ UPDATE GLOBAL INDEX ]
where row_clause can be:
{ ENABLE | DISABLE } ROW MOVEMENT
where merge_clause can be:
MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name
- [ TABLESPACE tablespacename ]
+ [ TABLESPACE tablespacename ] [ UPDATE GLOBAL INDEX ]
where modify_clause can be:
MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES }
where split_clause can be:
-SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause }
+SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause } [ UPDATE GLOBAL INDEX ]
where split_point_clause can be:
AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] )
where no_split_point_clause can be:
@@ -60,10 +58,13 @@ PARTITION partition_name {
{END({partition_value | MAXVALUE})}
} [TABLESPACE tablespace_name]
where drop_clause can be:
-DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) }
+DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ]
where truncate_clause can be:
TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ]
-NOTICE: 'truncate_clause' is only avaliable in CENTRALIZED mode!
+where rename_clause can be:
+RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name;
+where reset_clause can be:
+RESET PARTITION;
\ No newline at end of file
diff --git a/doc/src/sgml/ref/alter_table_subpartition.sgmlin b/doc/src/sgml/ref/alter_table_subpartition.sgmlin
index 8c290a27a..efa2a16af 100644
--- a/doc/src/sgml/ref/alter_table_subpartition.sgmlin
+++ b/doc/src/sgml/ref/alter_table_subpartition.sgmlin
@@ -11,13 +11,16 @@
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
- action [, ... ];
+ { action [, ... ] | reset_clause };
where action can be:
+ row_clause |
add_clause |
drop_clause |
split_clause |
truncate_clause
+where row_clause can be:
+{ ENABLE | DISABLE } ROW MOVEMENT
where add_clause can be:
ADD { partition_less_than_item | partition_list_item } [ ( subpartition_definition_list ) ]
MODIFY PARTITION partition_name ADD subpartition_definition
@@ -37,7 +40,8 @@ AT ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tab
VALUES ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] )
where truncate_clause can be:
TRUNCATE SUBPARTITION { subpartition_name } [ UPDATE GLOBAL INDEX ]
-NOTICE: 'ALTER TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
+where reset_clause can be:
+RESET PARTITION;
\ No newline at end of file
diff --git a/doc/src/sgml/ref/analyse.sgmlin b/doc/src/sgml/ref/analyse.sgmlin
index 6b3a6f9b8..f364c30f2 100755
--- a/doc/src/sgml/ref/analyse.sgmlin
+++ b/doc/src/sgml/ref/analyse.sgmlin
@@ -14,7 +14,7 @@
[ table_name [ ( column_name [, ...] ) ] ];
{ANALYZE | ANALYSE} [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
- PARTITION patrition_name;
+ PARTITION (partition_name);
{ANALYZE | ANALYSE} [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ANALYZE | ANALYSE} [ VERBOSE ]
@@ -23,7 +23,7 @@
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
table_name|index_name [CASCADE];
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}
- table_name PARTITION (patrition_name) [CASCADE];
+ table_name PARTITION (partition_name) [CASCADE];
diff --git a/doc/src/sgml/ref/analyze.sgmlin b/doc/src/sgml/ref/analyze.sgmlin
index e6c0e3593..be4e5ca56 100755
--- a/doc/src/sgml/ref/analyze.sgmlin
+++ b/doc/src/sgml/ref/analyze.sgmlin
@@ -14,7 +14,7 @@
[ table_name [ ( column_name [, ...] ) ] ];
{ ANALYZE | ANALYSE } [ VERBOSE ]
[ table_name [ ( column_name [, ...] ) ] ]
- PARTITION patrition_name;
+ PARTITION (partition_name);
{ ANALYZE | ANALYSE } [ VERBOSE ]
{ foreign_table_name | FOREIGN TABLES };
{ ANALYZE | ANALYSE } [ VERBOSE ]
@@ -22,7 +22,7 @@
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE };
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE } table_name | index_name [ CASCADE ];
{ ANALYZE | ANALYSE } VERIFY { FAST | COMPLETE }
- table_name PARTITION (patrition_name) [ CASCADE ];
+ table_name PARTITION (partition_name) [ CASCADE ];
diff --git a/doc/src/sgml/ref/create_schema.sgmlin b/doc/src/sgml/ref/create_schema.sgmlin
index b12c06acc..fc1a9833c 100644
--- a/doc/src/sgml/ref/create_schema.sgmlin
+++ b/doc/src/sgml/ref/create_schema.sgmlin
@@ -12,6 +12,9 @@
CREATE SCHEMA [ IF NOT EXISTS ] schema_name
[ AUTHORIZATION user_name ] [WITH BLOCKCHAIN] [ schema_element [ ... ] ];
+CREATE SCHEMA schema_name
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ];
+NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
\ No newline at end of file
diff --git a/doc/src/sgml/ref/create_table.sgmlin b/doc/src/sgml/ref/create_table.sgmlin
index dd6b271f4..ed166b65c 100644
--- a/doc/src/sgml/ref/create_table.sgmlin
+++ b/doc/src/sgml/ref/create_table.sgmlin
@@ -11,11 +11,13 @@
CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name
- ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
+ ( { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ])
[ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
[ COMPRESS | NOCOMPRESS ]
@@ -27,14 +29,15 @@ where column_constraint can be:
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where table_constraint can be:
[ CONSTRAINT [ constraint_name ] ]
{ CHECK ( expression ) |
@@ -66,11 +69,13 @@ where list_distribution_rules can be:
]
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
NOTICE: '[ VISIBLE | INVISIBLE ]' is only avaliable in CENTRALIZED mode and B-format database!
diff --git a/doc/src/sgml/ref/create_table_partition.sgmlin b/doc/src/sgml/ref/create_table_partition.sgmlin
index bac6bc345..57ce3dea1 100644
--- a/doc/src/sgml/ref/create_table_partition.sgmlin
+++ b/doc/src/sgml/ref/create_table_partition.sgmlin
@@ -12,12 +12,14 @@
CREATE TABLE [ IF NOT EXISTS ] partition_table_name
( [
- { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ]
+ { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ]
] )
[ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ]
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ]
@@ -26,25 +28,30 @@ CREATE TABLE [ IF NOT EXISTS ] partition_table_name
[ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]
PARTITION BY {
{VALUES (partition_key)} |
- {RANGE (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] ( partition_less_than_item [, ... ] )} |
- {RANGE (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] ( partition_start_end_item [, ... ] )} |
- {LIST | HASH (partition_key) (PARTITION partition_name [ VALUES (list_values_clause) ] opt_table_space ) }
- NOTICE: LIST/HASH partition is only avaliable in CENTRALIZED mode!
+ {RANGE [ COLUMNS ] (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] [ PARTITIONS integer ] ( partition_less_than_item [, ... ] )} |
+ {RANGE [ COLUMNS ] (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] [ PARTITIONS integer ] ( partition_start_end_item [, ... ] )} |
+ {{{LIST [ COLUMNS ]} | HASH | KEY} (partition_key) [ PARTITIONS integer ] (PARTITION partition_name [ VALUES [ IN ] (list_values_clause) ] opt_table_space ) }
} [ { ENABLE | DISABLE } ROW MOVEMENT ];
+NOTICE: [ COLUMNS ] is only available in B-format database!
+NOTICE: [ PARTITIONS integer ] in RANGE/LIST partition is only available in B-format database!
+NOTICE: [ IN ] is only available in B-format database!
+NOTICE: KEY is only available in B-format database!
+
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where table_constraint can be:
[ CONSTRAINT [ constraint_name ] ]
{ CHECK ( expression ) |
@@ -59,7 +66,8 @@ where index_parameters can be:
where like_option can be:
{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | DISTRIBUTION | ALL }
where partition_less_than_item can be:
-PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } ) [TABLESPACE tablespace_name]
+PARTITION partition_name VALUES LESS THAN { ( { partition_value | MAXVALUE } [, ... ] ) | MAXVALUE } [TABLESPACE [=] tablespace_name]
+NOTICE: MAXVALUE without parentheses is only available in B-format database!
where partition_start_end_item can be:
PARTITION partition_name {
{START(partition_value) END (partition_value) EVERY (interval_value)} |
@@ -69,10 +77,12 @@ PARTITION partition_name {
} [TABLESPACE tablespace_name]
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
diff --git a/doc/src/sgml/ref/create_table_subpartition.sgmlin b/doc/src/sgml/ref/create_table_subpartition.sgmlin
index 1e558e68c..509a328d4 100644
--- a/doc/src/sgml/ref/create_table_subpartition.sgmlin
+++ b/doc/src/sgml/ref/create_table_subpartition.sgmlin
@@ -11,23 +11,30 @@
CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name
-( { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ]
+( { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ]
)
[ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ]
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ]
- PARTITION BY {RANGE | LIST | HASH} (partition_key) SUBPARTITION BY {RANGE | LIST | HASH} (subpartition_key)
+ PARTITION BY {RANGE [ COLUMNS ] | LIST [ COLUMNS ] | HASH | KEY} (partition_key) [ PARTITIONS integer ] SUBPARTITION BY {RANGE | LIST | HASH | KEY} (subpartition_key) [ SUBPARTITIONS integer ]
(
- PARTITION partition_name1 [ VALUES LESS THAN (val1) | VALUES (val1[, ...]) ] [ TABLESPACE tablespace ]
+ PARTITION partition_name1 [ VALUES LESS THAN { (val1 | MAXVALUE) | MAXVALUE } | VALUES [ IN ] (val1[, ...]) ] [ TABLESPACE [=] tablespace ]
(
- { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE tablespace ] } [, ...]
+ { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE [=] tablespace ] } [, ...]
)
[, ...]
) [ { ENABLE | DISABLE } ROW MOVEMENT ];
+NOTICE: [ COLUMNS ] is only available in B-format database!
+NOTICE: [ PARTITIONS integer ] in RANGE/LIST partition is only available in B-format database!
+NOTICE: [ IN ] is only available in B-format database!
+NOTICE: KEY is only available in B-format database!
+NOTICE: MAXVALUE without parentheses is only available in B-format database!
where column_constraint can be:
[ CONSTRAINT constraint_name ]
@@ -35,13 +42,14 @@ where column_constraint can be:
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where table_constraint can be:
[ CONSTRAINT [ constraint_name ] ]
{ CHECK ( expression ) |
@@ -56,12 +64,14 @@ where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
-NOTICE: 'CREATE TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
+NOTICE: 'CREATE TABLE SUBPARTITION' is only available in CENTRALIZED mode!
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
diff --git a/doc/src/sgml/ref/select.sgmlin b/doc/src/sgml/ref/select.sgmlin
index 85dc60eb2..344839a8b 100644
--- a/doc/src/sgml/ref/select.sgmlin
+++ b/doc/src/sgml/ref/select.sgmlin
@@ -13,6 +13,7 @@
[ WITH [ RECURSIVE ] with_query [, ...] ]
SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]
{ * | {expression [ [ AS ] output_name ]} [, ...] }
+ [into_option]
[ FROM from_item [, ...] ]
[ WHERE condition ]
[ [ START WITH condition ] CONNECT BY condition [ ORDER SIBLINGS BY expression ] ]
@@ -24,9 +25,27 @@ SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]
[ LIMIT { [offset,] count | ALL } ]
[ OFFSET start [ ROW | ROWS ] ]
[ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
+ [into_option]
[ {FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT | WAIT N | SKIP LOCKED]} [...] ]
+ [into_option]
TABLE { ONLY { (table_name) | table_name } | table_name [ * ]};
+where into_option can be:
+INTO var_name [, var_name] ...
+| INTO OUTFILE 'file_name'
+ [CHARACTER SET charset_name]
+ export_options
+| INTO DUMPFILE 'file_name'
+where export_options can be:
+[FIELDS
+ [TERMINATED BY 'string']
+ [[OPTIONALLY] ENCLOSED BY 'char']
+ [ESCAPED BY 'char' ]
+]
+[LINES
+ [STARTING BY 'string']
+ [TERMINATED BY 'string']
+]
where from_item can be:
[ ONLY ] table_name [ * ] [ partition_clause ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ]
[ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ]
@@ -50,6 +69,7 @@ PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) }
SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] )}
where nlssort_expression_clause can be:
NLSSORT ( column_name, ' NLS_SORT = { SCHINESE_PINYIN_M | generic_m_ci } ' )
+NOTICE: [into_option] is only available in CENTRALIZED mode and B-format database.
NOTICE: '[ [ START WITH condition ] CONNECT BY condition [ ORDER SIBLINGS BY expression ] ]' is only avaliable in CENTRALIZED mode!
NOTICE: 'SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] )}' is only avaliable in CENTRALIZED mode!
diff --git a/doc/src/sgml/ref/set.sgmlin b/doc/src/sgml/ref/set.sgmlin
index f105913a9..d5bbd7376 100644
--- a/doc/src/sgml/ref/set.sgmlin
+++ b/doc/src/sgml/ref/set.sgmlin
@@ -24,6 +24,8 @@ SET [ SESSION | @@SESSION. | @@]
{ {config_parameter = { expr | DEFAULT }}};
SET @var_name := expr [, @var_name := expr] ...
SET @var_name = expr [, @var_name = expr] ...
-NOTICE: '@var_name','{ GLOBAL | @@GLOBAL.}', '[ SESSION | @@SESSION. | @@]' are only avaiable in CENTRALIZED mode and B-format database, and enable_set_variable_b_format = on.
+NOTICE: '@var_name' are only available in CENTRALIZED mode and B-format database.
+NOTICE: '{ GLOBAL | @@GLOBAL.}' are only available in CENTRALIZED mode and B-format database.
+NOTICE: '[ @@SESSION. | @@]' are only available in CENTRALIZED mode and B-format database.
\ No newline at end of file
diff --git a/doc/src/sgml/ref/set_transaction.sgmlin b/doc/src/sgml/ref/set_transaction.sgmlin
index b1aa95195..a6abe414a 100644
--- a/doc/src/sgml/ref/set_transaction.sgmlin
+++ b/doc/src/sgml/ref/set_transaction.sgmlin
@@ -10,11 +10,12 @@
-{SET [ LOCAL ] TRANSACTION|SET SESSION CHARACTERISTICS AS TRANSACTION}
+{SET [ LOCAL | SESSION | GLOBAL ] TRANSACTION|SET SESSION CHARACTERISTICS AS TRANSACTION}
{ ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED }
| { READ WRITE | READ ONLY | SERIALIZABLE | REPEATABLE READ }
} [, ...]
SET TRANSACTION SNAPSHOT snapshot_id;
+NOTICE: SET GLOBAL TRANSACTION is only available in CENTRALIZED mode and B-format database!
\ No newline at end of file
diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf
index 494675464..515a90c78 100755
--- a/src/bin/gs_guc/cluster_guc.conf
+++ b/src/bin/gs_guc/cluster_guc.conf
@@ -58,10 +58,13 @@ audit_resource_policy|bool|0,0|NULL|NULL|
audit_rotation_interval|int|1,35791394|min|NULL|
audit_rotation_size|int|1024,1048576|kB|NULL|
audit_space_limit|int|1024,1073741824|kB|NULL|
-audit_system_object|int|0,134217727|NULL|NULL|
+audit_system_object|int|0,268435455|NULL|NULL|
audit_user_locked|int|0,1|NULL|NULL|
audit_user_violation|int|0,1|NULL|NULL|
audit_set_parameter|int|0,1|NULL|NULL|
+no_audit_client|string|0,0|NULL|NULL|
+full_audit_users|string|0,0|NULL|NULL|
+audit_system_function_exec|int|0,1|NULL|NULL|
authentication_timeout|int|1,600|s|NULL|
autoanalyze|bool|0,0|NULL|NULL|
autovacuum|bool|0,0|NULL|Even if this parameter is set to off, when a transaction ID wraparound imminent, the database will automatically start the cleanup process automatically.|
@@ -82,6 +85,7 @@ backslash_quote|enum|safe_encoding,on,off,true,false,yes,no,1,0|NULL|NULL|
backtrace_min_messages|enum|debug,debug5,debug4,debug3,debug2,debug1,log,info,notice,warning,error,fatal,panic|NULL|It will increase the cost of the system, when print the function stack information frequently. Therefore, when analyzing the problem, avoid setting the value of backtrace_min_messages for fatal following levels.|
bbox_dump_count|int|1,20|NULL|NULL|
bbox_dump_path|string|0,0|NULL|NULL|
+b_format_behavior_compat_options|string|0,0|NULL|NULL|
behavior_compat_options|string|0,0|NULL|NULL|
plsql_compile_check_options|string|0,0|NULL|NULL|
bgwriter_delay|int|10,10000|ms|NULL|
@@ -153,6 +157,7 @@ instr_unique_sql_count|int|0,2147483647|NULL|NULL|
track_stmt_session_slot|int|0,2147483647|NULL|NULL|
track_stmt_details_size|int64|0,100000000|NULL|NULL|
track_stmt_stat_level|string|0,0|NULL|NULL|
+enable_availablezone|bool|0,0|NULL|NULL|
enable_instr_cpu_timer|bool|0,0|NULL|NULL|
enable_instr_rt_percentile|bool|0,0|NULL|NULL|
enable_instr_track_wait|bool|0,0|NULL|NULL|
diff --git a/src/bin/gs_guc/cluster_guc.cpp b/src/bin/gs_guc/cluster_guc.cpp
index 22d7355b4..fbb909cb1 100644
--- a/src/bin/gs_guc/cluster_guc.cpp
+++ b/src/bin/gs_guc/cluster_guc.cpp
@@ -1093,6 +1093,63 @@ int validate_node_instance_name(char* nodename, int type, char* instance_name)
return 0;
}
+
+#ifndef ENABLE_MULTIPLE_NODES
+/*
+ ******************************************************************************
+ Function : gsguc_precheck_forbid_parameters
+ Description : Forbid reload/set/check GUC parameters in list for ALL node type
+ Input : nodename(indicates node name)
+ Output : none
+ Return : bool
+ ******************************************************************************
+ */
+
+/* Forbid parameters for method "reload" with "all" nodes */
+char *gsguc_forbid_list_reload[] = {
+ "listen_addresses",
+ NULL
+};
+
+bool gsguc_precheck_forbid_parameters(char *nodename)
+{
+ /* If set pg_hba, just pass */
+ if (is_hba_conf) {
+ return true;
+ }
+
+ switch (ctl_command) {
+ case SET_CONF_COMMAND:
+ /* process checking parameters for SET mode */
+ break;
+ case RELOAD_CONF_COMMAND:
+ /* process checking parameters for RELOAD mode */
+ if (strncmp(nodename, "all", strlen("all")) != 0) {
+ break;
+ }
+ /* parameters not support '-N all' */
+ for (int i = 0; i < config_param_number && config_param[i] != NULL; i++) {
+ for (int j = 0; gsguc_forbid_list_reload[j] != NULL; j++) {
+ if (strcmp(config_param[i], gsguc_forbid_list_reload[j]) == 0) {
+ write_stderr(_("ERROR: \"%s\" can not \"%s\" with \"%s\" method.\n"),
+ gsguc_forbid_list_reload[j],
+ get_ctl_command_type(),
+ nodename);
+ return false;
+ }
+ }
+ }
+ break;
+ case CHECK_CONF_COMMAND:
+ /* process checking parameters for CHECK mode */
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+#endif
+
/*
******************************************************************************
Function : validate_cluster_guc_options
@@ -1371,10 +1428,16 @@ void process_cluster_guc_option(char* nodename, int type, char* instance_name, c
}
else
{
- if (0 == strncmp(nodename, "all", sizeof("all")))
+ if (0 == strncmp(nodename, "all", strlen("all"))) {
+#ifndef ENABLE_MULTIPLE_NODES
+ if (!gsguc_precheck_forbid_parameters(nodename)) {
+ exit(1);
+ }
+#endif
do_all_nodes_instance(instance_name, indatadir);
- else
+ } else {
do_remote_instance(nodename, instance_name, indatadir);
+ }
}
}
diff --git a/src/bin/gs_guc/pg_guc.cpp b/src/bin/gs_guc/pg_guc.cpp
index 190b56ec4..22cb4fdfe 100644
--- a/src/bin/gs_guc/pg_guc.cpp
+++ b/src/bin/gs_guc/pg_guc.cpp
@@ -2634,6 +2634,8 @@ int main(int argc, char** argv)
char* pgdata_C = NULL;
char* nodename = NULL;
char* instance_name = NULL;
+ int nodeNum = 0;
+ int instanceNum = 0;
int nRet = 0;
errno_t rc = 0;
progname = PROG_NAME;
@@ -2753,11 +2755,13 @@ int main(int argc, char** argv)
case 'N': {
GS_FREE(nodename);
nodename = xstrdup(optarg);
+ nodeNum++;
break;
}
case 'I': {
GS_FREE(instance_name);
instance_name = xstrdup(optarg);
+ instanceNum++;
break;
}
case 'M': {
@@ -2909,6 +2913,11 @@ int main(int argc, char** argv)
return 0;
}
+ if (nodeNum > 1 || instanceNum > 1) {
+ write_stderr(_("ERROR: The number of -I or -N must less than 2.\n"));
+ exit(1);
+ }
+
if (false == allocate_memory_list()) {
write_stderr(_("ERROR: Failed to allocate memory to list.\n"));
exit(1);
diff --git a/src/bin/pg_dump/common.cpp b/src/bin/pg_dump/common.cpp
index 2121cac1e..2c4cfba7a 100644
--- a/src/bin/pg_dump/common.cpp
+++ b/src/bin/pg_dump/common.cpp
@@ -97,7 +97,7 @@ TableInfo* getSchemaData(Archive* fout, int* numTablesPtr)
int numForeignDataWrappers;
int numForeignServers;
int numDefaultACLs;
-
+ int numEvents;
if (g_verbose)
write_msg(NULL, "reading schemas\n");
nspinfo = getNamespaces(fout, &numNamespaces);
@@ -241,6 +241,11 @@ TableInfo* getSchemaData(Archive* fout, int* numTablesPtr)
write_msg(NULL, "reading triggers\n");
getTriggers(fout, tblinfo, numTables);
+ if (g_verbose) {
+ write_msg(NULL, "reading events\n");
+ }
+ getEvents(fout, &numEvents);
+
/*Open-source-fix: Fix ordering of obj id for Rules and EventTriggers*/
if (g_verbose)
write_msg(NULL, "reading rewrite rules\n");
diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp
index f08ca1e8d..2a619e3f8 100644
--- a/src/bin/pg_dump/pg_backup_archiver.cpp
+++ b/src/bin/pg_dump/pg_backup_archiver.cpp
@@ -2679,6 +2679,9 @@ static void _doSetFixedOutputState(ArchiveHandle* AH)
/* Make sure function checking is disabled */
(void)ahprintf(AH, "SET check_function_bodies = false;\n");
+ /* Make sure trigger checking is disabled */
+ (void)ahprintf(AH, "SET session_replication_role = replica;\n");
+
/* Avoid annoying notices etc */
(void)ahprintf(AH, "SET client_min_messages = warning;\n");
if (!AH->publicArc.std_strings)
diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp
index 25350f9f7..4620dc449 100644
--- a/src/bin/pg_dump/pg_dump.cpp
+++ b/src/bin/pg_dump/pg_dump.cpp
@@ -147,6 +147,7 @@ typedef unsigned short uint16_t;
#define TARGET_V5 (strcasecmp("v5", optarg) == 0 ? true : false)
#define if_exists (targetV1 || targetV5) ? "IF EXISTS " : ""
#define if_cascade (targetV1 || targetV5) ? " CASCADE" : ""
+#define INTERVAL_UNITE_OFFSET 3
#define USING_STR_OFFSET 7
#define UNIQUE_OFFSET 7
@@ -235,11 +236,13 @@ static char connected_node_type = 'N'; /* 'N' -- NONE
'D' -- Datanode
*/
char* all_data_nodename_list = NULL;
+const uint32 CHARACTER_SET_VERSION_NUM = 92844;
const uint32 USTORE_UPGRADE_VERSION = 92368;
const uint32 PACKAGE_ENHANCEMENT = 92444;
const uint32 SUBSCRIPTION_VERSION = 92580;
const uint32 SUBSCRIPTION_BINARY_VERSION_NUM = 92656;
const uint32 B_DUMP_TRIGGER_VERSION_NUM = 92843;
+const uint32 EVENT_VERSION = 92844;
#ifdef DUMPSYSLOG
char* syslogpath = NULL;
@@ -385,6 +388,7 @@ static void dumpOpfamily(Archive* fout, OpfamilyInfo* opfinfo);
static void dumpCollation(Archive* fout, CollInfo* convinfo);
static void dumpConversion(Archive* fout, ConvInfo* convinfo);
static void dumpRule(Archive* fout, RuleInfo* rinfo);
+static void dumpEvent(Archive *fout, EventInfo *einfo);
static void dumpRlsPolicy(Archive* fout, RlsPolicyInfo* policyinfo);
static void dumpAgg(Archive* fout, AggInfo* agginfo);
static void dumpTrigger(Archive* fout, TriggerInfo* tginfo);
@@ -493,6 +497,7 @@ static void dumpUniquePrimaryDef(PQExpBuffer buf, ConstraintInfo* coninfo, IndxI
static bool findDBCompatibility(Archive* fout, const char* databasename);
static void dumpTableAutoIncrement(Archive* fout, PQExpBuffer sqlbuf, TableInfo* tbinfo);
static bool needIgnoreSequence(TableInfo* tbinfo);
+inline bool isDB4AIschema(const NamespaceInfo *nspinfo);
#ifdef DUMPSYSLOG
static void ReceiveSyslog(PGconn* conn, const char* current_path);
#endif
@@ -813,18 +818,6 @@ int main(int argc, char** argv)
((dbname != NULL) ? dbname : ""), errorMessages);
}
- if (!isExecUserSuperRole(fout)) {
- if (use_role != NULL)
- write_msg(NULL,
- "Notice: options --role is not super or sysadmin role, can only back up objects belonging to user "
- "%s.\n",
- use_role);
- else
- write_msg(NULL,
- "Notice: options -U is not super or sysadmin role, can only back up objects belonging to user %s.\n",
- PQuser(((ArchiveHandle*)fout)->connection));
- }
-
if (CheckIfStandby(fout)) {
(void)remove(filename);
exit_horribly(NULL, "%s is not supported on standby or cascade standby\n", progname);
@@ -855,6 +848,19 @@ int main(int argc, char** argv)
setup_connection(fout);
+ if (!isExecUserSuperRole(fout)) {
+ if (use_role != NULL) {
+ write_msg(NULL,
+ "Notice: options --role is not super or sysadmin role, can only back up objects belonging to user "
+ "%s.\n",
+ use_role);
+ } else {
+ write_msg(NULL,
+ "Notice: options -U is not super or sysadmin role, can only back up objects belonging to user %s.\n",
+ PQuser(((ArchiveHandle*)fout)->connection));
+ }
+ }
+
/*
* Disable security label support if server version < v9.1.x (prevents
* access to nonexistent pg_seclabel catalog)
@@ -2215,8 +2221,11 @@ static void selectDumpableNamespace(NamespaceInfo* nsinfo)
strcmp(nsinfo->dobj.name, "pkg_util") == 0 || strcmp(nsinfo->dobj.name, "sys") == 0 ||
strcmp(nsinfo->dobj.name, "cstore") == 0 || strcmp(nsinfo->dobj.name, "snapshot") == 0 ||
strcmp(nsinfo->dobj.name, "information_schema") == 0 || strcmp(nsinfo->dobj.name, "pkg_service") == 0 ||
- strcmp(nsinfo->dobj.name, "blockchain") == 0 || strcmp(nsinfo->dobj.name, "db4ai") == 0 ||
- strcmp(nsinfo->dobj.name, "sqladvisor") == 0)
+ strcmp(nsinfo->dobj.name, "blockchain") == 0 || strcmp(nsinfo->dobj.name, "sqladvisor") == 0
+#ifdef ENABLE_MULTIPLE_NODES
+ || strcmp(nsinfo->dobj.name, "db4ai") == 0
+#endif
+ )
nsinfo->dobj.dump = false;
else
nsinfo->dobj.dump = true;
@@ -2369,7 +2378,8 @@ static void selectDumpableType(Archive* fout, TypeInfo* tyinfo)
/* skip undefined placeholder types */
else if (!tyinfo->isDefined)
tyinfo->dobj.dump = false;
-
+ else if (isDB4AIschema(tyinfo->dobj.nmspace))
+ tyinfo->dobj.dump = false;
else
tyinfo->dobj.dump = true;
}
@@ -2384,9 +2394,12 @@ static void selectDumpableType(Archive* fout, TypeInfo* tyinfo)
*/
static void selectDumpableDefaultACL(DefaultACLInfo* dinfo)
{
- if (dinfo->dobj.nmspace != NULL)
- dinfo->dobj.dump = dinfo->dobj.nmspace->dobj.dump;
- else
+ if (dinfo->dobj.nmspace != NULL) {
+ if (isDB4AIschema(dinfo->dobj.nmspace))
+ dinfo->dobj.dump = false;
+ else
+ dinfo->dobj.dump = dinfo->dobj.nmspace->dobj.dump;
+ } else
dinfo->dobj.dump = include_everything;
}
@@ -2444,9 +2457,25 @@ static void selectDumpableObject(DumpableObject* dobj, Archive* fout = NULL)
}
/*
- * Dump a table's contents for loading using the COPY command
- * - this routine is called by the Archiver when it wants the table
- * to be dumped.
+ * selectDumpableFuncs: policy-setting subroutine
+ * Mark a function as to be dumped or not
+ *
+ * Normally, we dump all extensions, or none of them if dump_include_everything
+ * is false. However, DB4AI functions is created in gs_init, no need dump
+ */
+static void selectDumpableFuncs(FuncInfo *fcinfo, Archive *fout = NULL)
+{
+ if (isDB4AIschema(fcinfo->dobj.nmspace)) {
+ fcinfo->dobj.dump = false;
+ } else {
+ selectDumpableObject(&(fcinfo->dobj), fout);
+ }
+}
+
+/*
+ * Dump a table's contents for loading using the COPY command
+ * - this routine is called by the Archiver when it wants the table
+ * to be dumped.
*/
static int dumpTableData_copy(Archive* fout, void* dcontext)
@@ -2468,6 +2497,12 @@ static int dumpTableData_copy(Archive* fout, void* dcontext)
"dumping contents of table \"%s\"\n",
fmtQualifiedId(fout, tbinfo->dobj.nmspace->dobj.name, tbinfo->dobj.name));
+ if (isDB4AIschema(tbinfo->dobj.nmspace) && !isExecUserSuperRole(fout)) {
+ write_msg(NULL, "WARNING: schema db4ai not dumped because current user is not a superuser\n");
+ destroyPQExpBuffer(q);
+ return 1;
+ }
+
/*
* Make sure we are in proper schema. We will qualify the table name
* below anyway (in case its name conflicts with a pg_catalog table); but
@@ -2634,6 +2669,11 @@ static int dumpTableData_insert(Archive* fout, void* dcontext)
int nfields;
int field;
+ if (isDB4AIschema(tbinfo->dobj.nmspace) && !isExecUserSuperRole(fout)) {
+ write_msg(NULL, "WARNING: schema db4ai not dumped because current user is not a superuser\n");
+ destroyPQExpBuffer(q);
+ return 1;
+ }
/*
* Make sure we are in proper schema. We will qualify the table name
* below anyway (in case its name conflicts with a pg_catalog table); but
@@ -4915,6 +4955,7 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
int i_rolname;
int i_nspacl;
int i_nspblockchain;
+ int i_collation;
/*
* Before 7.3, there are no real namespaces; create two dummy entries, one
@@ -4931,6 +4972,7 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
nsinfo[0].rolname = gs_strdup("");
nsinfo[0].nspacl = gs_strdup("");
nsinfo[0].hasBlockchain = false;
+ nsinfo[0].collate = 0;
selectDumpableNamespace(&nsinfo[0]);
@@ -4942,6 +4984,7 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
nsinfo[1].rolname = gs_strdup("");
nsinfo[1].nspacl = gs_strdup("");
nsinfo[1].hasBlockchain = false;
+ nsinfo[1].collate = 0;
selectDumpableNamespace(&nsinfo[1]);
@@ -4962,8 +5005,12 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
appendPQExpBuffer(query,
"SELECT tableoid, oid, nspname, "
"(%s nspowner) AS rolname, "
- "nspacl, nspblockchain FROM pg_namespace",
- username_subquery);
+ "nspacl, nspblockchain, ", username_subquery);
+ if (GetVersionNum(fout) >= CHARACTER_SET_VERSION_NUM) {
+ appendPQExpBuffer(query, "nspcollation FROM pg_namespace");
+ } else {
+ appendPQExpBuffer(query, "NULL AS nspcollation FROM pg_namespace");
+ }
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -4977,6 +5024,7 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
i_rolname = PQfnumber(res, "rolname");
i_nspacl = PQfnumber(res, "nspacl");
i_nspblockchain = PQfnumber(res, "nspblockchain");
+ i_collation = PQfnumber(res, "nspcollation");
for (i = 0; i < ntups; i++) {
nsinfo[i].dobj.objType = DO_NAMESPACE;
@@ -4987,9 +5035,27 @@ NamespaceInfo* getNamespaces(Archive* fout, int* numNamespaces)
nsinfo[i].rolname = gs_strdup(PQgetvalue(res, i, i_rolname));
nsinfo[i].nspacl = gs_strdup(PQgetvalue(res, i, i_nspacl));
nsinfo[i].hasBlockchain = (strcmp(PQgetvalue(res, i, i_nspblockchain), "t") == 0) ? true : false;
+ if (PQgetisnull(res, i, i_collation)) {
+ nsinfo[i].collate = 0;
+ } else {
+ nsinfo[i].collate = atooid(PQgetvalue(res, i, i_collation));
+ }
/* Decide whether to dump this nmspace */
selectDumpableNamespace(&nsinfo[i]);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (unlikely(isDB4AIschema(&nsinfo[i]) && nsinfo[i].dobj.dump)) {
+ selectSourceSchema(fout, "db4ai");
+ resetPQExpBuffer(query);
+ appendPQExpBuffer(query, "SELECT id FROM snapshot");
+ PGresult *res_ = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
+ if (PQntuples(res_) != 0 and outputClean == 0) {
+ exit_horribly(NULL, "Using options -c/--clean to dump db4ai schema. Or clean this schema.\n");
+ }
+ nsinfo[i].dobj.dump = (PQntuples(res_) != 0 and outputClean == 1);
+ PQclear(res_);
+ }
+#endif
}
PQclear(res);
@@ -6154,7 +6220,7 @@ FuncInfo* getFuncs(Archive* fout, int* numFuncs)
}
/* Decide whether we want to dump it */
- selectDumpableObject(&(finfo[i].dobj), fout);
+ selectDumpableFuncs(&(finfo[i]), fout);
}
PQclear(res);
@@ -8745,6 +8811,116 @@ void getTriggers(Archive* fout, TableInfo tblinfo[], int numTables)
destroyPQExpBuffer(query);
}
+EventInfo* getEvents(Archive *fout, int *numEvents)
+{
+ PGresult *res = NULL;
+ PGresult *attres = NULL;
+ PGresult *procres = NULL;
+ int ntups;
+ int attntups;
+ int i;
+ int j;
+ PQExpBuffer query;
+ EventInfo *evinfo = NULL;
+ int i_oid;
+ int i_jobid;
+ int i_definer;
+ int i_evname;
+ int i_starttime;
+ int i_endtime;
+ int i_intervaltime;
+ int i_evstatus;
+ int i_evbody;
+ int i_nspname;
+ char* database_name = PQdb(GetConnection(fout));
+ bool is_bcompatibility = findDBCompatibility(fout, PQdb(GetConnection(fout)));
+
+ if (GetVersionNum(fout) < EVENT_VERSION) {
+ return NULL;
+ }
+ /* Make sure we are in proper schema */
+ selectSourceSchema(fout, "pg_catalog");
+ query = createPQExpBuffer();
+ if (is_bcompatibility) {
+ appendPQExpBuffer(
+ query,
+ "SELECT pg_job.oid, job_id, log_user, job_name, pg_job.nspname, pg_namespace.oid, dbname, start_date, "
+ "end_date, interval, enable "
+ "FROM pg_job LEFT join pg_namespace on pg_namespace.nspname = pg_job.nspname where dbname=\'%s\'",
+ database_name);
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
+
+ ntups = PQntuples(res);
+ if (ntups == 0) {
+ PQclear(res);
+ destroyPQExpBuffer(query);
+ return NULL;
+ }
+ *numEvents = ntups;
+ evinfo = (EventInfo *)pg_malloc(ntups * sizeof(EventInfo));
+ i_definer = PQfnumber(res, "log_user");
+ i_evname = PQfnumber(res, "job_name");
+ i_starttime = PQfnumber(res, "start_date");
+ i_endtime = PQfnumber(res, "end_date");
+ i_intervaltime = PQfnumber(res, "interval");
+ i_evstatus = PQfnumber(res, "enable");
+ i_oid = PQfnumber(res, "oid");
+ i_nspname = PQfnumber(res, "nspname");
+ i_jobid = PQfnumber(res, "job_id");
+ for (i = 0; i < ntups; i++) {
+ evinfo[i].dobj.objType = DO_EVENT;
+ evinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
+ AssignDumpId(&evinfo[i].dobj);
+ evinfo[i].dobj.name = gs_strdup(PQgetvalue(res, i, i_evname));
+ evinfo[i].dobj.nmspace =
+ findNamespace(fout, atooid(PQgetvalue(res, i, 5)), evinfo[i].dobj.catId.oid);
+ evinfo[i].evdefiner = gs_strdup(PQgetvalue(res, i, i_definer));
+ evinfo[i].evname = gs_strdup(PQgetvalue(res, i, i_evname));
+ evinfo[i].starttime = gs_strdup(PQgetvalue(res, i, i_starttime));
+ evinfo[i].endtime = gs_strdup(PQgetvalue(res, i, i_endtime));
+ evinfo[i].intervaltime = gs_strdup(PQgetvalue(res, i, i_intervaltime));
+ evinfo[i].evstatus = (PQgetvalue(res, i, i_evstatus)[0] == 't');
+ evinfo[i].nspname = gs_strdup(PQgetvalue(res, i, i_nspname));
+ evinfo[i].comment = NULL;
+ Oid ev_oid = atooid(PQgetvalue(res, i, i_jobid));
+ PQExpBuffer attquery = createPQExpBuffer();
+ appendPQExpBuffer(attquery, "SELECT "
+ "* "
+ "FROM gs_job_attribute where job_name='%s'", evinfo[i].evname);
+ attres = ExecuteSqlQuery(fout, attquery->data, PGRES_TUPLES_OK);
+ attntups = PQntuples(attres);
+ int i_attributename;
+ int i_attributevalue;
+ for (j = 0; j < attntups; j++) {
+ i_attributename = PQfnumber(attres, "attribute_name");
+ i_attributevalue = PQfnumber(attres, "attribute_value");
+ if (strcmp(PQgetvalue(attres, j, i_attributename), "auto_drop") == 0) {
+ evinfo[i].autodrop = (PQgetvalue(attres, j, i_attributevalue)[0] == 't');
+ } else if (strcmp(PQgetvalue(attres, j, i_attributename), "comments") == 0) {
+ evinfo[i].comment = gs_strdup(PQgetvalue(attres, j, i_attributevalue));
+ }
+ }
+ PQExpBuffer procquery = createPQExpBuffer();
+ appendPQExpBuffer(procquery, "SELECT "
+ "what "
+ "FROM pg_job_proc where job_id=%u", ev_oid);
+ procres = ExecuteSqlQuery(fout, procquery->data, PGRES_TUPLES_OK);
+ i_evbody = PQfnumber(procres, "what");
+ evinfo[i].evbody = gs_strdup(PQgetvalue(procres, 0, i_evbody));
+ PQclear(attres);
+ PQclear(procres);
+ destroyPQExpBuffer(attquery);
+ destroyPQExpBuffer(procquery);
+ selectDumpableObject(&(evinfo[i].dobj), fout);
+ }
+ }
+
+ PQclear(res);
+
+ destroyPQExpBuffer(query);
+ return evinfo;
+}
+
/*
* getProcLangs
* get basic information about every procedural language in the system
@@ -10670,6 +10846,10 @@ static void dumpDumpableObject(Archive* fout, DumpableObject* dobj)
dumpTrigger(fout, (TriggerInfo*)dobj);
break;
}
+ case DO_EVENT: {
+ dumpEvent(fout, (EventInfo *)dobj);
+ break;
+ }
case DO_CONSTRAINT:
dumpConstraint(fout, (ConstraintInfo*)dobj);
break;
@@ -10820,6 +11000,9 @@ static void dumpNamespace(Archive* fout, NamespaceInfo* nspinfo)
if (strlen(nspinfo->dobj.name) == 0)
return;
+ if (isDB4AIschema(nspinfo)) {
+ return;
+ }
if (isExecUserNotObjectOwner(fout, nspinfo->rolname))
return;
@@ -10828,7 +11011,7 @@ static void dumpNamespace(Archive* fout, NamespaceInfo* nspinfo)
labelq = createPQExpBuffer();
qnspname = gs_strdup(fmtId(nspinfo->dobj.name));
- if (!targetV1 || strcasecmp(qnspname, "public") != 0) {
+ if (strcasecmp(qnspname, "public") != 0) {
appendPQExpBuffer(delq, "DROP SCHEMA IF EXISTS %s%s;\n", qnspname, if_cascade);
if (nspinfo->hasBlockchain) {
@@ -10836,6 +11019,13 @@ static void dumpNamespace(Archive* fout, NamespaceInfo* nspinfo)
} else {
appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
}
+ if (OidIsValid(nspinfo->collate)) {
+ CollInfo *coll = NULL;
+ coll = findCollationByOid(nspinfo->collate);
+ if (coll != NULL) {
+ appendPQExpBuffer(q, "ALTER SCHEMA %s COLLATE = %s;\n", qnspname, fmtId(coll->dobj.name));
+ }
+ }
}
appendPQExpBuffer(labelq, "SCHEMA %s", qnspname);
@@ -10862,7 +11052,7 @@ static void dumpNamespace(Archive* fout, NamespaceInfo* nspinfo)
NULL);
/* Dump Schema Comments and Security Labels */
- if (!targetV1 || strcasecmp(qnspname, "public") != 0) {
+ if (strcasecmp(qnspname, "public") != 0) {
dumpComment(fout, labelq->data, NULL, nspinfo->rolname, nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
}
dumpSecLabel(fout, labelq->data, NULL, nspinfo->rolname, nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
@@ -17893,6 +18083,7 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo)
int ntups;
int i;
int j;
+ int cnt;
bool partkeyexprIsNull = PartkeyexprIsNull(fout, tbinfo, false);
/* get partitioned table info */
@@ -17996,32 +18187,86 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo)
appendPQExpBuffer(result, "(");
i_partboundary = (int*)pg_malloc(partkeynum * sizeof(int));
- /* get table partitions info */
- appendPQExpBuffer(partitionq,
- "SELECT p.oid as oid, "
- "p.relname AS partName, "
- "pg_catalog.array_length(partkey, 1) AS subpartkeynum, "
- "partkey AS subpartkey, ");
+ if (partStrategy != PART_STRATEGY_LIST || partkeynum == 1) {
+ /* get table partitions info */
+ appendPQExpBuffer(partitionq,
+ "SELECT p.oid as oid, "
+ "p.relname AS partName, "
+ "pg_catalog.array_length(partkey, 1) AS subpartkeynum, "
+ "partkey AS subpartkey, ");
- for (i = 1; i <= partkeynum; i++)
- appendPQExpBuffer(partitionq, "p.boundaries[%d] AS partBoundary_%d, ", i, i);
- appendPQExpBuffer(partitionq,
- "pg_catalog.array_to_string(p.boundaries, ',') as bound, "
- "pg_catalog.array_to_string(p.boundaries, ''',''') as boundstr, "
- "t.spcname AS reltblspc "
- "FROM pg_partition p LEFT JOIN pg_tablespace t "
- "ON p.reltablespace = t.oid "
- "WHERE p.parentid = '%u' AND p.parttype = '%c' "
- "AND p.partstrategy = '%c' ORDER BY ",
- tbinfo->dobj.catId.oid,
- PART_OBJ_TYPE_TABLE_PARTITION,
- newStrategy);
- for (i = 1; i <= partkeynum; i++) {
- if (i == partkeynum)
- appendPQExpBuffer(
- partitionq, "p.boundaries[%d]::%s ASC", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]);
- else
- appendPQExpBuffer(partitionq, "p.boundaries[%d]::%s, ", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]);
+ for (i = 1; i <= partkeynum; i++)
+ appendPQExpBuffer(partitionq, "p.boundaries[%d] AS partBoundary_%d, ", i, i);
+ appendPQExpBuffer(partitionq,
+ "pg_catalog.array_to_string(p.boundaries, ',') as bound, "
+ "pg_catalog.array_to_string(p.boundaries, ''',''') as boundstr, "
+ "t.spcname AS reltblspc "
+ "FROM pg_partition p LEFT JOIN pg_tablespace t "
+ "ON p.reltablespace = t.oid "
+ "WHERE p.parentid = '%u' AND p.parttype = '%c' "
+ "AND p.partstrategy = '%c' ORDER BY ",
+ tbinfo->dobj.catId.oid,
+ PART_OBJ_TYPE_TABLE_PARTITION,
+ newStrategy);
+ for (i = 1; i <= partkeynum; i++) {
+ if (i == partkeynum)
+ appendPQExpBuffer(
+ partitionq, "p.boundaries[%d]::%s ASC", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]);
+ else
+ appendPQExpBuffer(
+ partitionq, "p.boundaries[%d]::%s, ", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]);
+ }
+ } else {
+ appendPQExpBuffer(partitionq,
+ "SELECT /*+ hashjoin(p t) */ p.oid AS oid, "
+ "p.relname AS partName, "
+ "pg_catalog.array_length(partkey, 1) AS subpartkeynum, "
+ "partkey AS subpartkey, ");
+ for (i = 1; i <= partkeynum; i++) {
+ appendPQExpBuffer(partitionq, "NULL AS partBoundary_%d, ", i);
+ }
+ appendPQExpBuffer(partitionq,
+ "p.bound_def AS bound, "
+ "p.bound_def AS boundstr, "
+ "t.spcname AS reltblspc FROM ( "
+ "SELECT oid, relname, reltablespace, partkey, "
+ "pg_catalog.string_agg(bound,',' ORDER BY bound_id) AS bound_def FROM( "
+ "SELECT oid, relname, reltablespace, partkey, bound_id, '('||"
+ "pg_catalog.array_to_string(pg_catalog.array_agg(key_value ORDER BY key_id),',','NULL')||')' AS bound "
+ "FROM ( SELECT oid, relname, reltablespace, partkey, bound_id, key_id, ");
+ cnt = 0;
+ for (i = 0; i < partkeynum; i++) {
+ if (!isTypeString(tbinfo, partkeycols[i])) {
+ continue;
+ }
+ if (cnt > 0) {
+ appendPQExpBuffer(partitionq, ",");
+ } else {
+ appendPQExpBuffer(partitionq, "CASE WHEN key_id in (");
+ }
+ appendPQExpBuffer(partitionq, "%d", i + 1);
+ cnt++;
+ }
+ if (cnt > 0) {
+ appendPQExpBuffer(partitionq, ") THEN pg_catalog.quote_literal(key_value) ELSE key_value END AS ");
+ }
+ appendPQExpBuffer(partitionq,
+ "key_value FROM ( "
+ "SELECT oid, relname, reltablespace, partkey, bound_id, "
+ "pg_catalog.generate_subscripts(keys_array, 1) AS key_id, "
+ "pg_catalog.unnest(keys_array)::text AS key_value FROM ( "
+ "SELECT oid, relname, reltablespace, partkey, bound_id,key_bounds::cstring[] AS keys_array FROM ( "
+ "SELECT oid, relname, reltablespace, partkey, pg_catalog.unnest(boundaries) AS key_bounds, "
+ "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_partition "
+ "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c')))) "
+ "GROUP BY oid, relname, reltablespace, partkey, bound_id) "
+ "GROUP BY oid, relname, reltablespace, partkey "
+ "UNION ALL SELECT oid, relname, reltablespace, partkey, 'DEFAULT' AS bound_def FROM pg_partition "
+ "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p "
+ "LEFT JOIN pg_tablespace t ON p.reltablespace = t.oid "
+ "ORDER BY p.bound_def ASC",
+ tbinfo->dobj.catId.oid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST,
+ tbinfo->dobj.catId.oid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST);
}
res = ExecuteSqlQuery(fout, partitionq->data, PGRES_TUPLES_OK);
@@ -18093,7 +18338,7 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo)
}
if (boundaryValue == NULL || strlen(boundaryValue) == 0) {
appendPQExpBuffer(result, "DEFAULT");
- } else if (isTypeString(tbinfo, partkeycols[0])) {
+ } else if (partkeynum == 1 && isTypeString(tbinfo, partkeycols[0])) {
char *boundStr = gs_strdup(PQgetvalue(res, i, iBoundStr));
appendPQExpBuffer(result, "'%s'", boundStr);
free(boundStr);
@@ -18293,6 +18538,11 @@ bool isTsStoreTable(const TableInfo *tbinfo)
}
}
+inline bool isDB4AIschema(const NamespaceInfo *nspinfo)
+{
+ return (strcmp(nspinfo->dobj.name, "db4ai") == 0);
+}
+
static void dumpExtensionLinkedGrammer(Archive *fout, const TableInfo *tbinfo, PQExpBuffer q)
{
/* Add the grammar extension linked to PGXC depending on data got from pgxc_class */
@@ -21199,6 +21449,9 @@ static void dumpSequenceData(Archive* fout, TableDataInfo* tdinfo, bool large)
#ifndef ENABLE_MULTIPLE_NODES
last = PQgetvalue(res, 0, 5);
+ if (isDB4AIschema(tbinfo->dobj.nmspace)) {
+ last = atoi(last) > atoi(min) ? last : min;
+ }
#else
/*
* In Postgres-XC it is possible that the current value of a
@@ -21661,6 +21914,93 @@ static void dumpRule(Archive* fout, RuleInfo* rinfo)
destroyPQExpBuffer(labelq);
}
+static void dumpEvent(Archive *fout, EventInfo *einfo)
+{
+ PQExpBuffer query;
+ PQExpBuffer delqry;
+
+ if (dataOnly || !einfo->dobj.dump) {
+ return;
+ }
+
+ /* Set proper schema search path so type references list correctly */
+ selectSourceSchema(fout, einfo->dobj.nmspace->dobj.name);
+
+ query = createPQExpBuffer();
+ delqry = createPQExpBuffer();
+
+ appendPQExpBuffer(delqry, "DROP EVENT %s ", fmtId(einfo->evname));
+ appendPQExpBuffer(query, "CREATE ");
+ if (einfo->evdefiner) {
+ appendPQExpBuffer(query, "definer=%s ", einfo->evdefiner);
+ }
+ appendPQExpBuffer(query, "EVENT %s.%s ON SCHEDULE ", fmtId(einfo->nspname), fmtId(einfo->evname));
+ if (strcmp(einfo->intervaltime, "null") != 0) {
+ int interval_len = 10;
+ char* begin_pos = einfo->intervaltime + interval_len;
+ char* end_pos = strstr(begin_pos, "\'");
+ uint32 pos_offset;
+ char* interval_unit;
+ if (end_pos) {
+ interval_unit = end_pos + INTERVAL_UNITE_OFFSET;
+ pos_offset = (uint32)(end_pos - begin_pos);
+ } else {
+ return;
+ }
+ char interval_num[pos_offset + 1] = {0};
+ errno_t rc = memcpy_s(interval_num, pos_offset + 1, begin_pos, pos_offset);
+ securec_check_c(rc, "\0", "\0");
+
+ bool is_exist_to_unite = (strstr(einfo->intervaltime, " to ") != NULL);
+ if (is_exist_to_unite) {
+ appendPQExpBuffer(query, "EVERY \'%s\' ", interval_num);
+ } else {
+ appendPQExpBuffer(query, "EVERY %s ", interval_num);
+ }
+ appendPQExpBuffer(query, "%s ", interval_unit);
+ if (einfo->starttime) {
+ appendPQExpBuffer(query, "STARTS \'%s\' ", einfo->starttime);
+ }
+ if (einfo->endtime) {
+ appendPQExpBuffer(query, "ENDS \'%s\' ", einfo->endtime);
+ }
+ } else {
+ appendPQExpBuffer(query, "AT \'%s\' ", einfo->starttime);
+ }
+ if (einfo->autodrop) {
+ appendPQExpBuffer(query, "ON COMPLETION NOT PRESERVE ");
+ } else {
+ appendPQExpBuffer(query, "ON COMPLETION PRESERVE ");
+ }
+ if (einfo->evstatus) {
+ appendPQExpBuffer(query, "ENABLE ");
+ } else {
+ appendPQExpBuffer(query, "DISABLE ");
+ }
+ if (einfo->comment) {
+ appendPQExpBuffer(query, "COMMENT \'%s\' ", einfo->comment);
+ }
+ appendPQExpBuffer(query, "DO %s;", einfo->evbody);
+
+ ArchiveEntry(fout,
+ einfo->dobj.catId,
+ einfo->dobj.dumpId,
+ einfo->evname,
+ einfo->dobj.nmspace->dobj.name,
+ NULL,
+ einfo->evdefiner,
+ false,
+ "EVENT",
+ SECTION_POST_DATA,
+ query->data,
+ delqry->data,
+ NULL,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+ destroyPQExpBuffer(query);
+}
/*
* dumpRlsPolicy
* Dump row level security information for table
@@ -22213,6 +22553,7 @@ static void addBoundaryDependencies(DumpableObject** dobjs, int numObjs, Dumpabl
case DO_PUBLICATION:
case DO_PUBLICATION_REL:
case DO_SUBSCRIPTION:
+ case DO_EVENT:
/* Post-data objects: must come after the post-data boundary */
if (dobj->objType == DO_INDEX &&
((IndxInfo*)dobj)->indextable && ((IndxInfo*)dobj)->indextable->isMOT) {
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 49e53647e..fc8db07e5 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -92,7 +92,8 @@ typedef enum {
DO_RLSPOLICY, /* dump row level security policy of table */
DO_PUBLICATION,
DO_PUBLICATION_REL,
- DO_SUBSCRIPTION
+ DO_SUBSCRIPTION,
+ DO_EVENT
} DumpableObjectType;
typedef struct _dumpableObject {
@@ -113,6 +114,7 @@ typedef struct _namespaceInfo {
char* rolname; /* name of owner, or empty string */
char* nspacl;
bool hasBlockchain;
+ int collate;
} NamespaceInfo;
typedef struct _extensionInfo {
@@ -362,6 +364,19 @@ typedef struct _triggerInfo {
char* tgdef;
bool tgdb;
} TriggerInfo;
+typedef struct _eventInfo {
+ DumpableObject dobj;
+ char* evdefiner;
+ char* evname;
+ char* nspname;
+ char* starttime;
+ char* endtime;
+ char* intervaltime;
+ bool autodrop;
+ bool evstatus;
+ char* comment;
+ char* evbody;
+}EventInfo;
/*
* struct ConstraintInfo is used for all constraint types. However we
@@ -574,6 +589,7 @@ extern void getConstraints(Archive* fout, TableInfo tblinfo[], int numTables);
extern RuleInfo* getRules(Archive* fout, int* numRules);
extern void getRlsPolicies(Archive* fout, TableInfo tblinfo[], int numTables);
extern void getTriggers(Archive* fout, TableInfo tblinfo[], int numTables);
+extern EventInfo *getEvents(Archive *fout, int *numEvents);
extern ProcLangInfo* getProcLangs(Archive* fout, int* numProcLangs);
extern CastInfo* getCasts(Archive* fout, int* numCasts);
extern void getTableAttrs(Archive* fout, TableInfo* tbinfo, int numTables);
diff --git a/src/bin/pg_dump/pg_dump_sort.cpp b/src/bin/pg_dump/pg_dump_sort.cpp
index 002ce23e7..7714f9ca4 100644
--- a/src/bin/pg_dump/pg_dump_sort.cpp
+++ b/src/bin/pg_dump/pg_dump_sort.cpp
@@ -120,7 +120,8 @@ static const int newObjectTypePriority[] = {
33, /* DO_RLSPOLICY */
34, /* DO_PUBLICATION */
35, /* DO_PUBLICATION_REL */
- 36 /* DO_SUBSCRIPTION */
+ 36, /* DO_SUBSCRIPTION */
+ 18 /* DO_EVENT */
};
static DumpId postDataBoundId;
@@ -1100,6 +1101,11 @@ static void describeDumpableObject(DumpableObject* obj, char* buf, int bufsize)
buf, bufsize, bufsize - 1, "TRIGGER %s (ID %d OID %u)", obj->name, obj->dumpId, obj->catId.oid);
securec_check_ss_c(nRet, "\0", "\0");
return;
+ case DO_EVENT:
+ nRet = snprintf_s(
+ buf, bufsize, bufsize - 1, "EVENT %s (ID %d OID %u)", obj->name, obj->dumpId, obj->catId.oid);
+ securec_check_ss_c(nRet, "\0", "\0");
+ return;
case DO_CONSTRAINT:
nRet = snprintf_s(
buf, bufsize, bufsize - 1, "CONSTRAINT %s (ID %d OID %u)", obj->name, obj->dumpId, obj->catId.oid);
diff --git a/src/bin/pg_dump/pg_dumpall.cpp b/src/bin/pg_dump/pg_dumpall.cpp
index 3ac365a8c..c1a8a86df 100644
--- a/src/bin/pg_dump/pg_dumpall.cpp
+++ b/src/bin/pg_dump/pg_dumpall.cpp
@@ -1201,7 +1201,7 @@ static void validate_dumpall_options(char** argv)
void help(void)
{
- printf(_("%s extracts a openGauss database cluster into an SQL script file.\n\n"), progname);
+ printf(_("%s extracts an openGauss database cluster into an SQL script file.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
diff --git a/src/bin/pg_dump/pg_restore.cpp b/src/bin/pg_dump/pg_restore.cpp
index 4f23ceebb..32e90fae9 100644
--- a/src/bin/pg_dump/pg_restore.cpp
+++ b/src/bin/pg_dump/pg_restore.cpp
@@ -743,7 +743,7 @@ static void restore_getopts(int argc, char** argv, struct option* options, Resto
void usage(const char* pchProgname)
{
- printf(_("%s restores a openGauss database from an archive created by gs_dump.\n\n"), pchProgname);
+ printf(_("%s restores an openGauss database from an archive created by gs_dump.\n\n"), pchProgname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... FILE\n"), pchProgname);
diff --git a/src/bin/pg_probackup/data.cpp b/src/bin/pg_probackup/data.cpp
index f8738d5ad..7e0fb4ccd 100644
--- a/src/bin/pg_probackup/data.cpp
+++ b/src/bin/pg_probackup/data.cpp
@@ -498,10 +498,16 @@ prepare_page(ConnectionArgs *conn_arg,
else if (read_len < 0)
elog(ERROR, "Cannot read block %u of \"%s\": %s",
blknum, from_fullpath, strerror(errno));
- else if (read_len != BLCKSZ)
- elog(WARNING, "Cannot read block %u of \"%s\": "
- "read %i of %d, try again",
- blknum, from_fullpath, read_len, BLCKSZ);
+ else if (read_len != BLCKSZ) {
+ if (read_len > (int)MIN_COMPRESS_ERROR_RT) {
+ elog(ERROR, "Cannot read block %u of \"%s\" code: %lu : %s", blknum, from_fullpath, read_len,
+ strerror(errno));
+ }
+ elog(WARNING,
+ "Cannot read block %u of \"%s\": "
+ "read %i of %d, try again",
+ blknum, from_fullpath, read_len, BLCKSZ);
+ }
else
{
/* If it is in DSS mode, the validation is skipped */
diff --git a/src/bin/pg_probackup/file.cpp b/src/bin/pg_probackup/file.cpp
index b69483424..cc0f4d55b 100644
--- a/src/bin/pg_probackup/file.cpp
+++ b/src/bin/pg_probackup/file.cpp
@@ -1422,6 +1422,9 @@ static void fio_send_pages_impl(int out, char* buf)
{
if (pageCompression) {
read_len = pageCompression->ReadCompressedBuffer(blknum, read_buffer, BLCKSZ, true);
+ if (read_len > MIN_COMPRESS_ERROR_RT) {
+ elog(ERROR, "can not read actual block %u, error code: %lu,", blknum, read_len);
+ }
} else {
/*
* Optimize stdio buffer usage, fseek only when current position
diff --git a/src/bin/pg_rewind/filemap.cpp b/src/bin/pg_rewind/filemap.cpp
index 692ea179c..9019ff512 100755
--- a/src/bin/pg_rewind/filemap.cpp
+++ b/src/bin/pg_rewind/filemap.cpp
@@ -22,6 +22,7 @@
#include "PageCompression.h"
#include "storage/cu.h"
#include "storage/smgr/fd.h"
+#include "storage/cfs/cfs_converter.h"
#define BLOCKSIZE (8 * 1024)
#define BUILD_PATH_LEN 2560 /* (MAXPGPATH*2 + 512) */
@@ -682,7 +683,7 @@ void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blk
file_entry_t* key_ptr = NULL;
file_entry_t* entry = NULL;
BlockNumber blkno_inseg;
- int segno;
+ BlockNumber segno;
filemap_t* map = filemap;
file_entry_t** e;
bool processed = false;
@@ -695,6 +696,8 @@ void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blk
if (rnode.opt != 0) {
compress = true;
+ segno = blkno / CFS_LOGIC_BLOCKS_PER_FILE;
+ blkno_inseg = blkno % CFS_LOGIC_BLOCKS_PER_FILE;
}
path = datasegpath(rnode, forknum, segno, compress);
diff --git a/src/bin/psql/describe.cpp b/src/bin/psql/describe.cpp
index 50aeee93a..9560e90d5 100644
--- a/src/bin/psql/describe.cpp
+++ b/src/bin/psql/describe.cpp
@@ -1783,7 +1783,9 @@ static bool describeOneTableDetails(const char* schemaname, const char* relation
ProcessStatus process_status = ADD_TYPE;
ValuesProcessor::deprocess_value(pset.db, (unsigned char *)default_value, strlen(default_value),
original_type_id, 0, &plaintext, plainTextSize, process_status);
- default_value = (char *)plaintext;
+ if (plaintext != NULL) {
+ default_value = (char *)plaintext;
+ }
}
#endif
if (tmpbuf.len > 0) {
diff --git a/src/bin/psql/startup.cpp b/src/bin/psql/startup.cpp
index afdb26160..03c5f5f14 100644
--- a/src/bin/psql/startup.cpp
+++ b/src/bin/psql/startup.cpp
@@ -1056,7 +1056,7 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
extern char* optarg;
extern int optind;
int c;
- bool is_action_file = false;
+ bool action_string_need_free = false;
/* Database Security: Data importing/dumping support AES128. */
char* dencrypt_key = NULL;
char* dbname = NULL;
@@ -1085,6 +1085,10 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
if (optarg == NULL) {
break;
}
+ if (action_string_need_free) {
+ free(options->action_string);
+ action_string_need_free = false;
+ }
is_interactive = false;
options->action_string = optarg;
if (optarg[0] == '\\') {
@@ -1093,6 +1097,7 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
} else {
options->action = ACT_SINGLE_QUERY;
options->action_string = pg_strdup(optarg); /* need to free in main() */
+ action_string_need_free = true;
/* clear action string after -c command when it inludes sensitive info */
if (SensitiveStrCheck(optarg)) {
rc = memset_s(optarg, strlen(optarg), 0, strlen(optarg));
@@ -1116,11 +1121,13 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
break;
}
is_interactive = false;
- is_action_file = (options->action_string != NULL) && (options->action == ACT_FILE);
- if (is_action_file)
+ if (action_string_need_free) {
free(options->action_string);
+ action_string_need_free = false;
+ }
options->action_string = pg_strdup(optarg);
options->action = ACT_FILE;
+ action_string_need_free = true;
break;
case 'F':
if (pset.popt.topt.fieldSep.separator != NULL)
@@ -1259,12 +1266,16 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
showVersion();
exit(EXIT_SUCCESS);
case 'W':
- pset.getPassword = TRI_YES;
- if (optarg != NULL) {
- options->passwd = pg_strdup(optarg);
- rc = memset_s(optarg, strlen(optarg), 0, strlen(optarg));
- check_memset_s(rc);
+ if (optarg == NULL) {
+ break;
}
+ if (options->passwd != NULL) {
+ free(options->passwd);
+ }
+ pset.getPassword = TRI_YES;
+ options->passwd = pg_strdup(optarg);
+ rc = memset_s(optarg, strlen(optarg), 0, strlen(optarg));
+ check_memset_s(rc);
break;
case 'x':
pset.popt.topt.expanded = (unsigned short int)true;
@@ -1345,6 +1356,11 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts*
rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv));
check_memset_s(rc);
}
+ /* Disallow creating replication connections with gsql */
+ if ((temp = strstr(options->dbname, "replication=")) != NULL) {
+ (void)fprintf(stderr, _("The 'replication' parameter is not supported by gsql.\n"));
+ exit(EXIT_FAILURE);
+ }
}
}
diff --git a/src/common/backend/catalog/Makefile b/src/common/backend/catalog/Makefile
index edd16c090..619958bbf 100644
--- a/src/common/backend/catalog/Makefile
+++ b/src/common/backend/catalog/Makefile
@@ -24,7 +24,7 @@ OBJS = catalog.o dependency.o heap.o index.o indexing.o namespace.o aclchk.o \
pg_type.o pgxc_class.o storage.o storage_gtt.o toasting.o pg_job.o pg_partition.o\
pg_hashbucket.o cstore_ctlg.o pg_builtin_proc.o streaming_stream.o\
gs_matview.o pgxc_slice.o pg_job_proc.o gs_job_argument.o gs_job_attribute.o pg_uid.o gs_global_config.o\
- gs_db_privilege.o pg_publication.o pg_subscription.o
+ gs_db_privilege.o pg_publication.o pg_subscription.o gs_utf8_collation.o
BKIFILES = postgres.bki postgres.description postgres.shdescription
diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini
index 35fba63d7..195292628 100755
--- a/src/common/backend/catalog/builtin_funcs.ini
+++ b/src/common/backend/catalog/builtin_funcs.ini
@@ -1935,6 +1935,10 @@
"col_description", 1,
AddBuiltinFunc(_0(1216), _1("col_description"), _2(2), _3(true), _4(false), _5(NULL), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(SQLlanguageId), _10(100), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 26, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("select description from pg_catalog.pg_description where objoid = $1 and classoid = 'pg_catalog.pg_class'::pg_catalog.regclass and objsubid = $2"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("get description for table column"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0))
),
+ AddFuncGroup(
+ "gs_validate_ext_listen_ip", 1,
+ AddBuiltinFunc(_0(5169), _1("gs_validate_ext_listen_ip"), _2(3), _3(true), _4(true), _5(gs_validate_ext_listen_ip), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 2275, 2275, 2275), _21(5, 2275, 2275, 2275, 20, 25), _22(5, 'i', 'i', 'i', 'o', 'o'), _23(5, "clear", "validate_node_name", "validate_ip", "pid", "node_name"), _24(NULL), _25("gs_validate_ext_listen_ip"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0))
+ ),
AddFuncGroup(
"comm_client_info", 1,
AddBuiltinFunc(_0(1991), _1("comm_client_info"), _2(0), _3(true), _4(true), _5(comm_client_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(9, 25, 25, 20, 23, 20, 23, 25, 25, 23), _22(9, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(9, "node_name", "app", "tid", "lwtid", "query_id", "socket", "remote_ip", "remote_port", "logic_id"), _24(NULL), _25("comm_client_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0))
@@ -10881,8 +10885,8 @@ AddFuncGroup(
),
AddFuncGroup(
"standby_statement_history", 2,
- AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(53, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25), _22(53, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(53, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)),
- AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(54, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25), _22(54, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(54, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0))
+ AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(54, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25), _22(54, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(54, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)),
+ AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(55, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25), _22(55, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(55, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0))
),
AddFuncGroup(
"statement_detail_decode", 1,
diff --git a/src/common/backend/catalog/catalog.cpp b/src/common/backend/catalog/catalog.cpp
index 5f5a38cba..0299f52f0 100644
--- a/src/common/backend/catalog/catalog.cpp
+++ b/src/common/backend/catalog/catalog.cpp
@@ -1193,6 +1193,12 @@ Oid GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
/* Check for existing file of same name */
rpath = relpath(rnode, MAIN_FORKNUM);
fd = BasicOpenFile(rpath, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0) {
+ char compress_rpath[MAXPGPATH];
+ int ret = snprintf_s(compress_rpath, MAXPGPATH, MAXPGPATH - 1, "%s%s", rpath, COMPRESS_STR);
+ securec_check_ss(ret, "\0", "\0");
+ fd = BasicOpenFile(compress_rpath, O_RDONLY | PG_BINARY, 0);
+ }
if (fd >= 0) {
/* definite collision */
close(fd);
diff --git a/src/common/backend/catalog/gs_job_attribute.cpp b/src/common/backend/catalog/gs_job_attribute.cpp
index 5d2204ba3..668e5f7f8 100644
--- a/src/common/backend/catalog/gs_job_attribute.cpp
+++ b/src/common/backend/catalog/gs_job_attribute.cpp
@@ -843,7 +843,9 @@ void create_program_internal(PG_FUNCTION_ARGS, bool is_inline)
Datum comments = PG_ARGISNULL(5) ? (Datum)0 : PG_GETARG_DATUM(5);
/* perform program check */
- check_program_name_valid(program_name, is_inline);
+ if (!(u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)) {
+ check_program_name_valid(program_name, is_inline);
+ }
check_program_type_valid(program_type);
check_program_creation_privilege(program_type);
check_program_type_argument(program_type, PG_GETARG_INT32(3));
@@ -881,7 +883,7 @@ void create_program_internal(PG_FUNCTION_ARGS, bool is_inline)
* @param enabled Object enabled? or not
*/
static void dbe_insert_pg_job(Datum name, Datum job_id, Datum start_date, Datum interval, Datum end_date, Datum enabled,
- Datum priv_user)
+ Datum priv_user, Datum log_user, Datum schema_name)
{
errno_t rc = EOK;
Datum values[Natts_pg_job];
@@ -899,13 +901,13 @@ static void dbe_insert_pg_job(Datum name, Datum job_id, Datum start_date, Datum
values[Anum_pg_job_enable - 1] = TextToBool(enabled);
values[Anum_pg_job_start_date - 1] = DirectFunctionCall1(timestamptz_timestamp, TextToTimeStampTz(start_date));
values[Anum_pg_job_end_date - 1] = DirectFunctionCall1(timestamptz_timestamp, TextToTimeStampTz(end_date));
+ values[Anum_pg_job_log_user - 1] = log_user;
+ values[Anum_pg_job_priv_user - 1] = priv_user; /* program's owner */
+ values[Anum_pg_job_nspname - 1] = schema_name;
/* Fill other values */
const char* db_name = get_and_check_db_name(u_sess->proc_cxt.MyDatabaseId, true);
- values[Anum_pg_job_log_user - 1] = DirectFunctionCall1(namein, CStringGetDatum(GetUserNameFromId(GetUserId())));
- values[Anum_pg_job_priv_user - 1] = priv_user; /* program's owner */
values[Anum_pg_job_dbname - 1] = DirectFunctionCall1(namein, CStringGetDatum(db_name));
- values[Anum_pg_job_nspname - 1] = DirectFunctionCall1(namein, CStringGetDatum(get_real_search_schema()));
values[Anum_pg_job_node_name - 1] = get_pg_job_node_name();
values[Anum_pg_job_job_status - 1] = CharGetDatum(PGJOB_SUCC_STATUS);
values[Anum_pg_job_current_postgres_pid - 1] = Int64GetDatum(-1);
@@ -930,7 +932,7 @@ static void dbe_insert_pg_job(Datum name, Datum job_id, Datum start_date, Datum
* @param job_name
* @return char*
*/
-static char *get_inline_schedule_name(Datum job_name)
+char *get_inline_schedule_name(Datum job_name)
{
errno_t rc;
char *c_job_name = TextDatumGetCString(job_name);
@@ -947,7 +949,7 @@ static char *get_inline_schedule_name(Datum job_name)
* @brief create_inline_program
* Create a inline program object
*/
-static char *create_inline_program(Datum job_name, Datum job_type, Datum job_action, Datum num_of_args, Datum enabled)
+char *create_inline_program(Datum job_name, Datum job_type, Datum job_action, Datum num_of_args, Datum enabled)
{
errno_t rc;
char *c_job_name = TextDatumGetCString(job_name);
@@ -976,6 +978,45 @@ static char *create_inline_program(Datum job_name, Datum job_type, Datum job_act
return program_name;
}
+char *CreateEventInlineProgram(Datum job_name, Datum job_type, Datum job_action, Datum job_definer)
+{
+ errno_t rc;
+ char *c_job_name = TextDatumGetCString(job_name);
+
+ char *program_name = (char *)palloc(sizeof(char) * MAX_JOB_NAME_LEN);
+ rc = strcpy_s(program_name, MAX_JOB_NAME_LEN, INLINE_JOB_PROGRAM_PREFIX);
+ securec_check(rc, "\0", "\0");
+ rc = strcat_s(program_name, MAX_JOB_NAME_LEN, c_job_name);
+ securec_check(rc, "\0", "\0");
+
+ /* perform program check */
+ if (!(u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)) {
+ check_program_name_valid(CStringGetTextDatum(program_name), true);
+ }
+ check_program_type_valid(job_type);
+ check_program_creation_privilege(job_type);
+ check_program_type_argument(job_type, 0);
+ check_program_action(job_action);
+ check_if_arguments_defined(CStringGetTextDatum(program_name), 0);
+
+ const char *object_type = "program";
+ Datum attribute_name[] = {CStringGetTextDatum("object_type"), CStringGetTextDatum("program_type"),
+ CStringGetTextDatum("program_action"), CStringGetTextDatum("number_of_arguments"),
+ CStringGetTextDatum("enabled"), CStringGetTextDatum("comments"),
+ CStringGetTextDatum("owner")};
+ int count = lengthof(attribute_name);
+ Datum attribute_value[] = {
+ CStringGetTextDatum(object_type), job_type, job_action, CStringGetTextDatum("0"), BoolToText(BoolGetDatum(true)), (Datum)0, job_definer};
+ multi_insert_attribute(CStringGetTextDatum(program_name), attribute_name, attribute_value, count);
+
+ for (int i = 0; i < count; i++) {
+ pfree(DatumGetPointer(attribute_name[i]));
+ }
+ pfree(DatumGetPointer(attribute_value[0]));
+
+ return program_name;
+}
+
/*
* @brief get_job_id
* Get the unique job id.
@@ -1094,16 +1135,26 @@ void create_job_raw(PG_FUNCTION_ARGS)
Datum end_date = PG_GETARG_DATUM(13);
Datum job_action = PG_GETARG_DATUM(14);
Datum job_type = PG_GETARG_DATUM(15);
+ Datum job_definer = (PG_ARGISNULL(16)) ? Datum(0) : PG_GETARG_DATUM(16);
+ Datum job_schemaname = (PG_ARGISNULL(17)) ? Datum(0) : PG_GETARG_DATUM(17);
+ Datum job_definer_oid = (PG_ARGISNULL(18)) ? Datum(0) : PG_GETARG_DATUM(18);
/* Various checks */
- check_job_name_valid(job_name);
+ if (!(u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)) {
+ check_job_name_valid(job_name);
+ }
check_job_creation_privilege(job_type);
check_job_class_valid(job_class);
/* gs_job_attribute */
const char *object_type = "job";
- char *username = get_role_name_str();
- Datum owner = CStringGetTextDatum(username);
+ Datum owner = Datum(0);
+ if (PG_ARGISNULL(16)) {
+ char *username = get_role_name_str();
+ owner = CStringGetTextDatum(username);
+ } else {
+ owner = job_definer_oid;
+ }
const Datum attribute_name[] = {CStringGetTextDatum("object_type"), CStringGetTextDatum("program_name"),
CStringGetTextDatum("schedule_name"), CStringGetTextDatum("job_class"),
CStringGetTextDatum("auto_drop"), CStringGetTextDatum("comments"),
@@ -1116,8 +1167,23 @@ void create_job_raw(PG_FUNCTION_ARGS)
/* pg_job && pg_job_proc */
Datum job_id = Int16GetDatum(get_job_id());
- Datum priv_user = get_priv_user(program_name, job_intype);
- dbe_insert_pg_job(job_name, job_id, start_date, repeat_interval, end_date, enabled, priv_user);
+ Datum priv_user;
+ Datum log_user;
+ if (PG_ARGISNULL(16)) {
+ priv_user = get_priv_user(program_name, job_intype);
+ log_user = DirectFunctionCall1(namein, CStringGetDatum(GetUserNameFromId(GetUserId())));
+ } else {
+ priv_user = DirectFunctionCall1(namein, job_definer);
+ log_user = DirectFunctionCall1(namein, job_definer);
+ }
+ Datum schema_name;
+ if (PG_ARGISNULL(17)) {
+ schema_name = DirectFunctionCall1(namein, CStringGetDatum(get_real_search_schema()));
+ } else {
+ schema_name = DirectFunctionCall1(namein, job_schemaname);
+ }
+ dbe_insert_pg_job(job_name, job_id, start_date, repeat_interval, end_date, enabled, priv_user, log_user,
+ schema_name);
dbe_insert_pg_job_proc(job_id, job_action, job_name);
for (int i = 0; i < count; i++) {
pfree(DatumGetPointer(attribute_name[i]));
@@ -1148,7 +1214,7 @@ void create_job_1_internal(PG_FUNCTION_ARGS)
Datum program_name = CStringGetTextDatum(c_program_name);
pfree_ext(c_program_name);
- static const short nrgs_job = 16;
+ static const short nrgs_job = 17;
FunctionCallInfoData fcinfo_job;
InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL);
errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum));
@@ -1172,6 +1238,7 @@ void create_job_1_internal(PG_FUNCTION_ARGS)
fcinfo_job.arg[13] = TimeStampTzToText(end_date); /* end_date */
fcinfo_job.arg[14] = job_action; /* job action */
fcinfo_job.arg[15] = job_type; /* job type */
+ fcinfo_job.argnull[16] = true;
create_job_raw(&fcinfo_job);
}
@@ -1212,7 +1279,7 @@ static void get_schedule_info(Datum schedule_name, Datum *start_date, Datum *rep
* @param num_of_args
* @param enabled
*/
-static void get_program_info(Datum program_name, Datum *job_type, Datum *job_action, Datum *num_of_args, Datum *enabled)
+void get_program_info(Datum program_name, Datum *job_type, Datum *job_action, Datum *num_of_args, Datum *enabled)
{
check_object_is_visible(program_name);
/* total of 4 attributes: program_type, number_of_arguments, enabled, program_action from program */
@@ -1257,7 +1324,7 @@ void create_job_2_internal(PG_FUNCTION_ARGS)
Datum enabled;
get_program_info(program_name, &job_type, &job_action, &num_of_args, &enabled);
- static const short nrgs_job = 16;
+ static const short nrgs_job = 17;
FunctionCallInfoData fcinfo_job;
InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL);
errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum));
@@ -1283,6 +1350,7 @@ void create_job_2_internal(PG_FUNCTION_ARGS)
fcinfo_job.arg[13] = end_date; /* end_date */
fcinfo_job.arg[14] = job_action; /* job action */
fcinfo_job.arg[15] = job_type; /* job type */
+ fcinfo_job.argnull[16] = true;
create_job_raw(&fcinfo_job);
}
@@ -1308,7 +1376,7 @@ void create_job_3_internal(PG_FUNCTION_ARGS)
Datum enabled;
get_program_info(program_name, &job_type, &job_action, &num_of_args, &enabled);
- static const short nrgs_job = 16;
+ static const short nrgs_job = 17;
FunctionCallInfoData fcinfo_job;
InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL);
errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum));
@@ -1334,6 +1402,7 @@ void create_job_3_internal(PG_FUNCTION_ARGS)
fcinfo_job.arg[13] = TimeStampTzToText(end_date); /* end_date */
fcinfo_job.arg[14] = job_action; /* job action */
fcinfo_job.arg[15] = job_type; /* job type */
+ fcinfo_job.argnull[16] = true;
create_job_raw(&fcinfo_job);
}
@@ -1361,7 +1430,7 @@ void create_job_4_internal(PG_FUNCTION_ARGS)
Datum program_name = CStringGetTextDatum(c_program_name);
pfree_ext(c_program_name);
- static const short nrgs_job = 16;
+ static const short nrgs_job = 17;
FunctionCallInfoData fcinfo_job;
InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL);
@@ -1383,6 +1452,7 @@ void create_job_4_internal(PG_FUNCTION_ARGS)
fcinfo_job.arg[13] = end_date; /* end_date */
fcinfo_job.arg[14] = job_action; /* job action */
fcinfo_job.arg[15] = job_type; /* job type */
+ fcinfo_job.argnull[16] = true;
create_job_raw(&fcinfo_job);
}
@@ -1788,8 +1858,7 @@ void set_job_attribute(const Datum job_name, const Datum attribute_name, const D
* @param attribute_name
* @param attribute_value
*/
-static void set_attribute_with_related_rel(const Datum object_name, const Datum attribute_name,
- const Datum attribute_value)
+void set_attribute_with_related_rel(const Datum object_name, const Datum attribute_name, const Datum attribute_value)
{
char *object_type = get_attribute_value_str(object_name, "object_type", RowExclusiveLock, false, false);
if (pg_strcasecmp(object_type, "program") == 0) {
@@ -1853,7 +1922,7 @@ void set_attribute_1_internal(PG_FUNCTION_ARGS, Oid type)
* @param extra_name secondary attribute name for 'event_spec'
* @param extra_value secondary attribute value if exists
*/
-static void prepare_set_attribute(Datum attribute, Datum *name, Datum *value, Datum *extra_name, Datum extra_value)
+void prepare_set_attribute(Datum attribute, Datum *name, Datum *value, Datum *extra_name, Datum extra_value)
{
/* Cannot change owner. */
char *attribute_str = TextDatumGetCString(attribute);
diff --git a/src/common/backend/catalog/gs_utf8_collation.cpp b/src/common/backend/catalog/gs_utf8_collation.cpp
new file mode 100644
index 000000000..56f690557
--- /dev/null
+++ b/src/common/backend/catalog/gs_utf8_collation.cpp
@@ -0,0 +1,952 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
+ *
+ * openGauss is licensed under Mulan PSL v2.
+ * You can use this software according to the terms and conditions of the Mulan PSL v2.
+ * You may obtain a copy of Mulan PSL v2 at:
+ *
+ * http://license.coscl.org.cn/MulanPSL2
+ *
+ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
+ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+ * See the Mulan PSL v2 for more details.
+ * ---------------------------------------------------------------------------------------
+ *
+ * gs_utf8_collation.cpp
+ * Contains three UTF8 collations :
+ * utf8mb4_general_ci/utf8mb4_unicode_ci/utf8mb4_bin
+ *
+ * IDENTIFICATION
+ * src/common/backend/catalog/gs_utf8_collation.cpp
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+#include "postgres.h"
+#include "knl/knl_variable.h"
+
+#include "catalog/pg_collation.h"
+#include "utils/builtins.h"
+#include "utils/syscache.h"
+#include "parser/parse_type.h"
+#include "access/hash.h"
+#include "utils/lsyscache.h"
+#include "catalog/gs_utf8_collation.h"
+
+typedef struct GS_UNICASE_PAGES {
+ GS_UINT32 **sort_page;
+} GS_UNICASE_INFO;
+
+static GS_UINT32 sort00[] = {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+ 0x0060, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+ 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+ 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+ 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x039C, 0x00B6, 0x00B7,
+ 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x00C6, 0x0043,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049,
+ 0x00D0, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x00D7,
+ 0x00D8, 0x0055, 0x0055, 0x0055, 0x0055, 0x0059, 0x00DE, 0x00DF,
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x00C6, 0x0043,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049,
+ 0x00D0, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x00F7,
+ 0x00D8, 0x0055, 0x0055, 0x0055, 0x0055, 0x0059, 0x00DE, 0x0059
+};
+
+static GS_UINT32 sort01[] = {
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0043, 0x0043,
+ 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0043, 0x0044, 0x0044,
+ 0x0110, 0x0110, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0047, 0x0047, 0x0047, 0x0047,
+ 0x0047, 0x0047, 0x0047, 0x0047, 0x0048, 0x0048, 0x0126, 0x0126,
+ 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049, 0x0049,
+ 0x0049, 0x0049, 0x0132, 0x0132, 0x004A, 0x004A, 0x004B, 0x004B,
+ 0x0138, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x013F,
+ 0x013F, 0x0141, 0x0141, 0x004E, 0x004E, 0x004E, 0x004E, 0x004E,
+ 0x004E, 0x0149, 0x014A, 0x014A, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x0152, 0x0152, 0x0052, 0x0052, 0x0052, 0x0052,
+ 0x0052, 0x0052, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053,
+ 0x0053, 0x0053, 0x0054, 0x0054, 0x0054, 0x0054, 0x0166, 0x0166,
+ 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0055, 0x0055, 0x0055, 0x0055, 0x0057, 0x0057, 0x0059, 0x0059,
+ 0x0059, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x0053,
+ 0x0180, 0x0181, 0x0182, 0x0182, 0x0184, 0x0184, 0x0186, 0x0187,
+ 0x0187, 0x0189, 0x018A, 0x018B, 0x018B, 0x018D, 0x018E, 0x018F,
+ 0x0190, 0x0191, 0x0191, 0x0193, 0x0194, 0x01F6, 0x0196, 0x0197,
+ 0x0198, 0x0198, 0x019A, 0x019B, 0x019C, 0x019D, 0x019E, 0x019F,
+ 0x004F, 0x004F, 0x01A2, 0x01A2, 0x01A4, 0x01A4, 0x01A6, 0x01A7,
+ 0x01A7, 0x01A9, 0x01AA, 0x01AB, 0x01AC, 0x01AC, 0x01AE, 0x0055,
+ 0x0055, 0x01B1, 0x01B2, 0x01B3, 0x01B3, 0x01B5, 0x01B5, 0x01B7,
+ 0x01B8, 0x01B8, 0x01BA, 0x01BB, 0x01BC, 0x01BC, 0x01BE, 0x01F7,
+ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C4, 0x01C4, 0x01C4, 0x01C7,
+ 0x01C7, 0x01C7, 0x01CA, 0x01CA, 0x01CA, 0x0041, 0x0041, 0x0049,
+ 0x0049, 0x004F, 0x004F, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x018E, 0x0041, 0x0041,
+ 0x0041, 0x0041, 0x00C6, 0x00C6, 0x01E4, 0x01E4, 0x0047, 0x0047,
+ 0x004B, 0x004B, 0x004F, 0x004F, 0x004F, 0x004F, 0x01B7, 0x01B7,
+ 0x004A, 0x01F1, 0x01F1, 0x01F1, 0x0047, 0x0047, 0x01F6, 0x01F7,
+ 0x004E, 0x004E, 0x0041, 0x0041, 0x00C6, 0x00C6, 0x00D8, 0x00D8
+};
+
+static GS_UINT32 sort02[] = {
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0045, 0x0045, 0x0045, 0x0045,
+ 0x0049, 0x0049, 0x0049, 0x0049, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x0052, 0x0052, 0x0052, 0x0052, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0053, 0x0053, 0x0054, 0x0054, 0x021C, 0x021C, 0x0048, 0x0048,
+ 0x0220, 0x0221, 0x0222, 0x0222, 0x0224, 0x0224, 0x0041, 0x0041,
+ 0x0045, 0x0045, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x0059, 0x0059, 0x0234, 0x0235, 0x0236, 0x0237,
+ 0x0238, 0x0239, 0x023A, 0x023B, 0x023C, 0x023D, 0x023E, 0x023F,
+ 0x0240, 0x0241, 0x0242, 0x0243, 0x0244, 0x0245, 0x0246, 0x0247,
+ 0x0248, 0x0249, 0x024A, 0x024B, 0x024C, 0x024D, 0x024E, 0x024F,
+ 0x0250, 0x0251, 0x0252, 0x0181, 0x0186, 0x0255, 0x0189, 0x018A,
+ 0x0258, 0x018F, 0x025A, 0x0190, 0x025C, 0x025D, 0x025E, 0x025F,
+ 0x0193, 0x0261, 0x0262, 0x0194, 0x0264, 0x0265, 0x0266, 0x0267,
+ 0x0197, 0x0196, 0x026A, 0x026B, 0x026C, 0x026D, 0x026E, 0x019C,
+ 0x0270, 0x0271, 0x019D, 0x0273, 0x0274, 0x019F, 0x0276, 0x0277,
+ 0x0278, 0x0279, 0x027A, 0x027B, 0x027C, 0x027D, 0x027E, 0x027F,
+ 0x01A6, 0x0281, 0x0282, 0x01A9, 0x0284, 0x0285, 0x0286, 0x0287,
+ 0x01AE, 0x0289, 0x01B1, 0x01B2, 0x028C, 0x028D, 0x028E, 0x028F,
+ 0x0290, 0x0291, 0x01B7, 0x0293, 0x0294, 0x0295, 0x0296, 0x0297,
+ 0x0298, 0x0299, 0x029A, 0x029B, 0x029C, 0x029D, 0x029E, 0x029F,
+ 0x02A0, 0x02A1, 0x02A2, 0x02A3, 0x02A4, 0x02A5, 0x02A6, 0x02A7,
+ 0x02A8, 0x02A9, 0x02AA, 0x02AB, 0x02AC, 0x02AD, 0x02AE, 0x02AF,
+ 0x02B0, 0x02B1, 0x02B2, 0x02B3, 0x02B4, 0x02B5, 0x02B6, 0x02B7,
+ 0x02B8, 0x02B9, 0x02BA, 0x02BB, 0x02BC, 0x02BD, 0x02BE, 0x02BF,
+ 0x02C0, 0x02C1, 0x02C2, 0x02C3, 0x02C4, 0x02C5, 0x02C6, 0x02C7,
+ 0x02C8, 0x02C9, 0x02CA, 0x02CB, 0x02CC, 0x02CD, 0x02CE, 0x02CF,
+ 0x02D0, 0x02D1, 0x02D2, 0x02D3, 0x02D4, 0x02D5, 0x02D6, 0x02D7,
+ 0x02D8, 0x02D9, 0x02DA, 0x02DB, 0x02DC, 0x02DD, 0x02DE, 0x02DF,
+ 0x02E0, 0x02E1, 0x02E2, 0x02E3, 0x02E4, 0x02E5, 0x02E6, 0x02E7,
+ 0x02E8, 0x02E9, 0x02EA, 0x02EB, 0x02EC, 0x02ED, 0x02EE, 0x02EF,
+ 0x02F0, 0x02F1, 0x02F2, 0x02F3, 0x02F4, 0x02F5, 0x02F6, 0x02F7,
+ 0x02F8, 0x02F9, 0x02FA, 0x02FB, 0x02FC, 0x02FD, 0x02FE, 0x02FF
+};
+
+static GS_UINT32 sort03[] = {
+ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
+ 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
+ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+ 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
+ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
+ 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
+ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
+ 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
+ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0399, 0x0346, 0x0347,
+ 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
+ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
+ 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
+ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
+ 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
+ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
+ 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
+ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0391, 0x0387,
+ 0x0395, 0x0397, 0x0399, 0x038B, 0x039F, 0x038D, 0x03A5, 0x03A9,
+ 0x0399, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
+ 0x0398, 0x0399, 0x039A, 0x039B, 0x039C, 0x039D, 0x039E, 0x039F,
+ 0x03A0, 0x03A1, 0x03A2, 0x03A3, 0x03A4, 0x03A5, 0x03A6, 0x03A7,
+ 0x03A8, 0x03A9, 0x0399, 0x03A5, 0x0391, 0x0395, 0x0397, 0x0399,
+ 0x03A5, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
+ 0x0398, 0x0399, 0x039A, 0x039B, 0x039C, 0x039D, 0x039E, 0x039F,
+ 0x03A0, 0x03A1, 0x03A3, 0x03A3, 0x03A4, 0x03A5, 0x03A6, 0x03A7,
+ 0x03A8, 0x03A9, 0x0399, 0x03A5, 0x039F, 0x03A5, 0x03A9, 0x03CF,
+ 0x0392, 0x0398, 0x03D2, 0x03D2, 0x03D2, 0x03A6, 0x03A0, 0x03D7,
+ 0x03D8, 0x03D9, 0x03DA, 0x03DA, 0x03DC, 0x03DC, 0x03DE, 0x03DE,
+ 0x03E0, 0x03E0, 0x03E2, 0x03E2, 0x03E4, 0x03E4, 0x03E6, 0x03E6,
+ 0x03E8, 0x03E8, 0x03EA, 0x03EA, 0x03EC, 0x03EC, 0x03EE, 0x03EE,
+ 0x039A, 0x03A1, 0x03A3, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
+ 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF
+};
+
+static GS_UINT32 sort04[] = {
+ 0x0415, 0x0415, 0x0402, 0x0413, 0x0404, 0x0405, 0x0406, 0x0406,
+ 0x0408, 0x0409, 0x040A, 0x040B, 0x041A, 0x0418, 0x0423, 0x040F,
+ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
+ 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F,
+ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
+ 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F,
+ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
+ 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F,
+ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
+ 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F,
+ 0x0415, 0x0415, 0x0402, 0x0413, 0x0404, 0x0405, 0x0406, 0x0406,
+ 0x0408, 0x0409, 0x040A, 0x040B, 0x041A, 0x0418, 0x0423, 0x040F,
+ 0x0460, 0x0460, 0x0462, 0x0462, 0x0464, 0x0464, 0x0466, 0x0466,
+ 0x0468, 0x0468, 0x046A, 0x046A, 0x046C, 0x046C, 0x046E, 0x046E,
+ 0x0470, 0x0470, 0x0472, 0x0472, 0x0474, 0x0474, 0x0474, 0x0474,
+ 0x0478, 0x0478, 0x047A, 0x047A, 0x047C, 0x047C, 0x047E, 0x047E,
+ 0x0480, 0x0480, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
+ 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048C, 0x048E, 0x048E,
+ 0x0490, 0x0490, 0x0492, 0x0492, 0x0494, 0x0494, 0x0496, 0x0496,
+ 0x0498, 0x0498, 0x049A, 0x049A, 0x049C, 0x049C, 0x049E, 0x049E,
+ 0x04A0, 0x04A0, 0x04A2, 0x04A2, 0x04A4, 0x04A4, 0x04A6, 0x04A6,
+ 0x04A8, 0x04A8, 0x04AA, 0x04AA, 0x04AC, 0x04AC, 0x04AE, 0x04AE,
+ 0x04B0, 0x04B0, 0x04B2, 0x04B2, 0x04B4, 0x04B4, 0x04B6, 0x04B6,
+ 0x04B8, 0x04B8, 0x04BA, 0x04BA, 0x04BC, 0x04BC, 0x04BE, 0x04BE,
+ 0x04C0, 0x0416, 0x0416, 0x04C3, 0x04C3, 0x04C5, 0x04C6, 0x04C7,
+ 0x04C7, 0x04C9, 0x04CA, 0x04CB, 0x04CB, 0x04CD, 0x04CE, 0x04CF,
+ 0x0410, 0x0410, 0x0410, 0x0410, 0x04D4, 0x04D4, 0x0415, 0x0415,
+ 0x04D8, 0x04D8, 0x04D8, 0x04D8, 0x0416, 0x0416, 0x0417, 0x0417,
+ 0x04E0, 0x04E0, 0x0418, 0x0418, 0x0418, 0x0418, 0x041E, 0x041E,
+ 0x04E8, 0x04E8, 0x04E8, 0x04E8, 0x042D, 0x042D, 0x0423, 0x0423,
+ 0x0423, 0x0423, 0x0423, 0x0423, 0x0427, 0x0427, 0x04F6, 0x04F7,
+ 0x042B, 0x042B, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF
+};
+
+static GS_UINT32 sort05[] = {
+ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
+ 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
+ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
+ 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
+ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
+ 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
+ 0x0530, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
+ 0x0538, 0x0539, 0x053A, 0x053B, 0x053C, 0x053D, 0x053E, 0x053F,
+ 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
+ 0x0548, 0x0549, 0x054A, 0x054B, 0x054C, 0x054D, 0x054E, 0x054F,
+ 0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0557,
+ 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
+ 0x0560, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
+ 0x0538, 0x0539, 0x053A, 0x053B, 0x053C, 0x053D, 0x053E, 0x053F,
+ 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
+ 0x0548, 0x0549, 0x054A, 0x054B, 0x054C, 0x054D, 0x054E, 0x054F,
+ 0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0587,
+ 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
+ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
+ 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
+ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
+ 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
+ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
+ 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
+ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
+ 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
+ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
+ 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
+ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
+ 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
+ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
+ 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF
+};
+
+static GS_UINT32 sort1E[] = {
+ 0x0041, 0x0041, 0x0042, 0x0042, 0x0042, 0x0042, 0x0042, 0x0042,
+ 0x0043, 0x0043, 0x0044, 0x0044, 0x0044, 0x0044, 0x0044, 0x0044,
+ 0x0044, 0x0044, 0x0044, 0x0044, 0x0045, 0x0045, 0x0045, 0x0045,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0046, 0x0046,
+ 0x0047, 0x0047, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048, 0x0048,
+ 0x0048, 0x0048, 0x0048, 0x0048, 0x0049, 0x0049, 0x0049, 0x0049,
+ 0x004B, 0x004B, 0x004B, 0x004B, 0x004B, 0x004B, 0x004C, 0x004C,
+ 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004C, 0x004D, 0x004D,
+ 0x004D, 0x004D, 0x004D, 0x004D, 0x004E, 0x004E, 0x004E, 0x004E,
+ 0x004E, 0x004E, 0x004E, 0x004E, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x004F, 0x004F, 0x0050, 0x0050, 0x0050, 0x0050,
+ 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052, 0x0052,
+ 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053, 0x0053,
+ 0x0053, 0x0053, 0x0054, 0x0054, 0x0054, 0x0054, 0x0054, 0x0054,
+ 0x0054, 0x0054, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0055, 0x0055, 0x0055, 0x0055, 0x0056, 0x0056, 0x0056, 0x0056,
+ 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057, 0x0057,
+ 0x0057, 0x0057, 0x0058, 0x0058, 0x0058, 0x0058, 0x0059, 0x0059,
+ 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x005A, 0x0048, 0x0054,
+ 0x0057, 0x0059, 0x1E9A, 0x0053, 0x1E9C, 0x1E9D, 0x1E9E, 0x1E9F,
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041,
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041,
+ 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041, 0x0041,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045,
+ 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045, 0x0045,
+ 0x0049, 0x0049, 0x0049, 0x0049, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F, 0x004F,
+ 0x004F, 0x004F, 0x004F, 0x004F, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055, 0x0055,
+ 0x0055, 0x0055, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059, 0x0059,
+ 0x0059, 0x0059, 0x1EFA, 0x1EFB, 0x1EFC, 0x1EFD, 0x1EFE, 0x1EFF
+};
+
+static GS_UINT32 sort1F[] = {
+ 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
+ 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
+ 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x1F16, 0x1F17,
+ 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x0395, 0x1F1E, 0x1F1F,
+ 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
+ 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
+ 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399,
+ 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399, 0x0399,
+ 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x1F46, 0x1F47,
+ 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x039F, 0x1F4E, 0x1F4F,
+ 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5, 0x03A5,
+ 0x1F58, 0x03A5, 0x1F5A, 0x03A5, 0x1F5C, 0x03A5, 0x1F5E, 0x03A5,
+ 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
+ 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
+ 0x0391, 0x1FBB, 0x0395, 0x1FC9, 0x0397, 0x1FCB, 0x0399, 0x1FDB,
+ 0x039F, 0x1FF9, 0x03A5, 0x1FEB, 0x03A9, 0x1FFB, 0x1F7E, 0x1F7F,
+ 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
+ 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x0391,
+ 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
+ 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397, 0x0397,
+ 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
+ 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9, 0x03A9,
+ 0x0391, 0x0391, 0x0391, 0x0391, 0x0391, 0x1FB5, 0x0391, 0x0391,
+ 0x0391, 0x0391, 0x0391, 0x1FBB, 0x0391, 0x1FBD, 0x0399, 0x1FBF,
+ 0x1FC0, 0x1FC1, 0x0397, 0x0397, 0x0397, 0x1FC5, 0x0397, 0x0397,
+ 0x0395, 0x1FC9, 0x0397, 0x1FCB, 0x0397, 0x1FCD, 0x1FCE, 0x1FCF,
+ 0x0399, 0x0399, 0x0399, 0x1FD3, 0x1FD4, 0x1FD5, 0x0399, 0x0399,
+ 0x0399, 0x0399, 0x0399, 0x1FDB, 0x1FDC, 0x1FDD, 0x1FDE, 0x1FDF,
+ 0x03A5, 0x03A5, 0x03A5, 0x1FE3, 0x03A1, 0x03A1, 0x03A5, 0x03A5,
+ 0x03A5, 0x03A5, 0x03A5, 0x1FEB, 0x03A1, 0x1FED, 0x1FEE, 0x1FEF,
+ 0x1FF0, 0x1FF1, 0x03A9, 0x03A9, 0x03A9, 0x1FF5, 0x03A9, 0x03A9,
+ 0x039F, 0x1FF9, 0x03A9, 0x1FFB, 0x03A9, 0x1FFD, 0x1FFE, 0x1FFF
+};
+
+static GS_UINT32 sort21[] = {
+ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
+ 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
+ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
+ 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
+ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
+ 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
+ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
+ 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
+ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
+ 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
+ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
+ 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
+ 0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167,
+ 0x2168, 0x2169, 0x216A, 0x216B, 0x216C, 0x216D, 0x216E, 0x216F,
+ 0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167,
+ 0x2168, 0x2169, 0x216A, 0x216B, 0x216C, 0x216D, 0x216E, 0x216F,
+ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
+ 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
+ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
+ 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
+ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
+ 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
+ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
+ 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
+ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
+ 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
+ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
+ 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
+ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
+ 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
+ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
+ 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF
+};
+
+static GS_UINT32 sort24[] = {
+ 0x2400, 0x2401, 0x2402, 0x2403, 0x2404, 0x2405, 0x2406, 0x2407,
+ 0x2408, 0x2409, 0x240A, 0x240B, 0x240C, 0x240D, 0x240E, 0x240F,
+ 0x2410, 0x2411, 0x2412, 0x2413, 0x2414, 0x2415, 0x2416, 0x2417,
+ 0x2418, 0x2419, 0x241A, 0x241B, 0x241C, 0x241D, 0x241E, 0x241F,
+ 0x2420, 0x2421, 0x2422, 0x2423, 0x2424, 0x2425, 0x2426, 0x2427,
+ 0x2428, 0x2429, 0x242A, 0x242B, 0x242C, 0x242D, 0x242E, 0x242F,
+ 0x2430, 0x2431, 0x2432, 0x2433, 0x2434, 0x2435, 0x2436, 0x2437,
+ 0x2438, 0x2439, 0x243A, 0x243B, 0x243C, 0x243D, 0x243E, 0x243F,
+ 0x2440, 0x2441, 0x2442, 0x2443, 0x2444, 0x2445, 0x2446, 0x2447,
+ 0x2448, 0x2449, 0x244A, 0x244B, 0x244C, 0x244D, 0x244E, 0x244F,
+ 0x2450, 0x2451, 0x2452, 0x2453, 0x2454, 0x2455, 0x2456, 0x2457,
+ 0x2458, 0x2459, 0x245A, 0x245B, 0x245C, 0x245D, 0x245E, 0x245F,
+ 0x2460, 0x2461, 0x2462, 0x2463, 0x2464, 0x2465, 0x2466, 0x2467,
+ 0x2468, 0x2469, 0x246A, 0x246B, 0x246C, 0x246D, 0x246E, 0x246F,
+ 0x2470, 0x2471, 0x2472, 0x2473, 0x2474, 0x2475, 0x2476, 0x2477,
+ 0x2478, 0x2479, 0x247A, 0x247B, 0x247C, 0x247D, 0x247E, 0x247F,
+ 0x2480, 0x2481, 0x2482, 0x2483, 0x2484, 0x2485, 0x2486, 0x2487,
+ 0x2488, 0x2489, 0x248A, 0x248B, 0x248C, 0x248D, 0x248E, 0x248F,
+ 0x2490, 0x2491, 0x2492, 0x2493, 0x2494, 0x2495, 0x2496, 0x2497,
+ 0x2498, 0x2499, 0x249A, 0x249B, 0x249C, 0x249D, 0x249E, 0x249F,
+ 0x24A0, 0x24A1, 0x24A2, 0x24A3, 0x24A4, 0x24A5, 0x24A6, 0x24A7,
+ 0x24A8, 0x24A9, 0x24AA, 0x24AB, 0x24AC, 0x24AD, 0x24AE, 0x24AF,
+ 0x24B0, 0x24B1, 0x24B2, 0x24B3, 0x24B4, 0x24B5, 0x24B6, 0x24B7,
+ 0x24B8, 0x24B9, 0x24BA, 0x24BB, 0x24BC, 0x24BD, 0x24BE, 0x24BF,
+ 0x24C0, 0x24C1, 0x24C2, 0x24C3, 0x24C4, 0x24C5, 0x24C6, 0x24C7,
+ 0x24C8, 0x24C9, 0x24CA, 0x24CB, 0x24CC, 0x24CD, 0x24CE, 0x24CF,
+ 0x24B6, 0x24B7, 0x24B8, 0x24B9, 0x24BA, 0x24BB, 0x24BC, 0x24BD,
+ 0x24BE, 0x24BF, 0x24C0, 0x24C1, 0x24C2, 0x24C3, 0x24C4, 0x24C5,
+ 0x24C6, 0x24C7, 0x24C8, 0x24C9, 0x24CA, 0x24CB, 0x24CC, 0x24CD,
+ 0x24CE, 0x24CF, 0x24EA, 0x24EB, 0x24EC, 0x24ED, 0x24EE, 0x24EF,
+ 0x24F0, 0x24F1, 0x24F2, 0x24F3, 0x24F4, 0x24F5, 0x24F6, 0x24F7,
+ 0x24F8, 0x24F9, 0x24FA, 0x24FB, 0x24FC, 0x24FD, 0x24FE, 0x24FF
+};
+
+static GS_UINT32 sortFF[] = {
+ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
+ 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
+ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
+ 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
+ 0xFF20, 0xFF21, 0xFF22, 0xFF23, 0xFF24, 0xFF25, 0xFF26, 0xFF27,
+ 0xFF28, 0xFF29, 0xFF2A, 0xFF2B, 0xFF2C, 0xFF2D, 0xFF2E, 0xFF2F,
+ 0xFF30, 0xFF31, 0xFF32, 0xFF33, 0xFF34, 0xFF35, 0xFF36, 0xFF37,
+ 0xFF38, 0xFF39, 0xFF3A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
+ 0xFF40, 0xFF21, 0xFF22, 0xFF23, 0xFF24, 0xFF25, 0xFF26, 0xFF27,
+ 0xFF28, 0xFF29, 0xFF2A, 0xFF2B, 0xFF2C, 0xFF2D, 0xFF2E, 0xFF2F,
+ 0xFF30, 0xFF31, 0xFF32, 0xFF33, 0xFF34, 0xFF35, 0xFF36, 0xFF37,
+ 0xFF38, 0xFF39, 0xFF3A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
+ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
+ 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
+ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
+ 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
+ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
+ 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
+ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
+ 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
+ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
+ 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
+ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
+ 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
+ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
+ 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
+ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
+ 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
+ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
+ 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
+ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
+ 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF
+};
+
+/* Unicode-4.0.0 case folding information */
+static GS_UINT32 *unicase_sort_pages[256] = {
+ sort00, sort01, sort02, sort03, sort04, sort05, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, sort1E, sort1F,
+ NULL, sort21, NULL, NULL, sort24, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, sortFF
+};
+
+/*
+* Check whether the utf8 code is correct.
+* starting from the second byte, utf8 starts with 10.
+*/
+#define IS_CONTINUATION_BYTE(c) (((c) ^ 0x80) < 0x40)
+
+#define GS_ERR_ILLEGAL_SEQUENCE (-1)
+#define GS_ERR_TOOSMALL (-101)
+#define GS_ERR_TOOSMALL2 (-102) /* Need at least two bytes */
+#define GS_ERR_TOOSMALL3 (-103) /* Need at least three bytes */
+#define GS_ERR_TOOSMALL4 (-104) /* Need at least four bytes */
+
+/*
+* If the unicode of a character is greater than 0xFFFF,
+* the character collation is set to 0xFFFD.
+*/
+#define GS_REPLACEMENT_CHARACTER 0xFFFD
+#define NEXT_WORD_POS(p, p_word_bytes) ((p) += (p_word_bytes))
+
+GS_UNICASE_INFO g_unicase_default = {
+ unicase_sort_pages
+};
+
+static int strnncoll_utf8mb4_general_pad_space(const unsigned char* arg1, size_t len1,
+ const unsigned char* arg2, size_t len2);
+static int mb_wc_utf8mb4(const unsigned char* s, const unsigned char* end, GS_UINT32* wchar);
+static int strnncoll_utf8mb4_bin_pad_space(const unsigned char* arg1, size_t len1,
+ const unsigned char* arg2, size_t len2);
+Datum hash_utf8mb4_general_pad_space(const unsigned char *key, size_t len);
+Datum hash_utf8mb4_bin_pad_space(const unsigned char *key);
+static int get_current_char_sorted_value(const unsigned char* cur_str, const unsigned char* str_end,
+ GS_UINT32* next_word, const GS_UNICASE_INFO *uni_plane);
+bool is_b_format_collation(Oid collation);
+
+/* binary collation only support binary string types, such as : blob. */
+void check_binary_collation(Oid collation, Oid type_oid)
+{
+ if (collation == BINARY_COLLATION_OID && !DB_IS_CMPT(B_FORMAT)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("this collation only support in B-format database")));
+ }
+
+ if (IsBinaryType(type_oid) && collation != BINARY_COLLATION_OID) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("binary collation only support binary type in B format")));
+ }
+}
+
+Oid binary_need_transform_typeid(Oid typeoid, Oid* collation)
+{
+ Oid new_typid = typeoid;
+ if (*collation == BINARY_COLLATION_OID) {
+ /* use switch case stmt for extension in feature */
+ switch (typeoid) {
+ /* binary type no need to transform */
+ case BLOBOID:
+ break;
+ /* string type need to transform to binary type */
+ case TEXTOID:
+ new_typid = BLOBOID;
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("type %s cannot be set to binary collation currently", get_typename(typeoid))));
+ break;;
+ }
+ /* binary collation in attribute level collation no need to be set. */
+ *collation = InvalidOid;
+ }
+ return new_typid;
+}
+
+bool is_support_b_format_collation(Oid collation)
+{
+ if (is_b_format_collation(collation) && !DB_IS_CMPT(B_FORMAT)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("this collation only support in B-format database")));
+ }
+ return true;
+}
+
+bool is_b_format_collation(Oid collation)
+{
+ if (COLLATION_IN_B_FORMAT(collation)) {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("this collation is not currently supported ")));
+#endif
+ return true;
+ }
+ return false;
+}
+
+/*
+* In unicase_sort_pages array, Search for the sorting value based by the unicode value.
+* test_sort is map of unicode and sorting value.
+* wchar is unicode of char need sorted.
+*/
+static inline void sort_by_unicode(GS_UINT32 **test_sort, GS_UINT32 *wchar)
+{
+ if (*wchar <= 0xFFFF) {
+ const GS_UINT32 *page = NULL;
+ page = test_sort[*wchar >> 8];
+ if (page != NULL) {
+ *wchar = page[*wchar & 0xFF];
+ }
+ } else {
+ *wchar= GS_REPLACEMENT_CHARACTER;
+ }
+}
+
+int varstr_cmp_by_builtin_collations(char* arg1, int len1, char* arg2, int len2, Oid collid)
+{
+ int result = 0;
+ switch (collid) {
+ case UTF8MB4_GENERAL_CI_COLLATION_OID:
+ case UTF8MB4_UNICODE_CI_COLLATION_OID:
+ result = strnncoll_utf8mb4_general_pad_space((unsigned char*)arg1, len1, (unsigned char*)arg2, len2);
+ break;
+ case UTF8MB4_BIN_COLLATION_OID:
+ result = strnncoll_utf8mb4_bin_pad_space((unsigned char*)arg1, len1, (unsigned char*)arg2, len2);
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("this collation is not currently supported ")));
+ break;
+ }
+
+ return result;
+}
+
+Datum hash_text_by_builtin_colltions(const unsigned char *key, size_t len, Oid collid)
+{
+ Datum result = 0;
+ switch (collid) {
+ case UTF8MB4_GENERAL_CI_COLLATION_OID:
+ case UTF8MB4_UNICODE_CI_COLLATION_OID:
+ result = hash_utf8mb4_general_pad_space((unsigned char*)key, len);
+ break;
+ case UTF8MB4_BIN_COLLATION_OID:
+ result = hash_utf8mb4_bin_pad_space((unsigned char*)key);
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("this collation is not currently supported ")));
+ break;
+ }
+
+ return result;
+}
+
+/*
+* When UTF8 fails to convert Unicode,
+* the sorted result is returned by comparing each byte.
+*/
+static inline int bincmp_utf8mb4(const unsigned char *arg1, const unsigned char *arg1_end,
+ const unsigned char *arg2, const unsigned char *arg2_end)
+{
+ int arg1_len = (int) (arg1_end - arg1);
+ int arg2_len = (int) (arg2_end - arg2);
+ int len = arg1_len < arg2_len ? arg1_len : arg2_len;
+ int cmp = memcmp(arg1, arg2, len);
+ return cmp ? cmp : arg1_len - arg2_len;
+}
+
+/*
+* utf8mb4_general_ci / utf8mb4_unicode_ci compare function.
+* return value is 0, arg1 = arg2
+* return < 0 , arg1 < arg2
+* return > 0 , arg1 > arg2
+*/
+static int strnncoll_utf8mb4_general_pad_space(const unsigned char* arg1, size_t len1,
+ const unsigned char* arg2, size_t len2)
+{
+ GS_UINT32 arg1_word = 0;
+ GS_UINT32 arg2_word = 0;
+
+ const unsigned char* arg1_end = arg1 + len1;
+ const unsigned char* arg2_end = arg2 + len2;
+ const GS_UNICASE_INFO *uni_plane = &g_unicase_default;
+
+ while (arg1 < arg1_end && arg2 < arg2_end) {
+ int arg1_bytes = get_current_char_sorted_value(arg1, arg1_end, &arg1_word, uni_plane);
+ int arg2_bytes = get_current_char_sorted_value(arg2, arg2_end, &arg2_word, uni_plane);
+ /* Incorrect string, compare bytewise */
+ if (arg1_bytes <= 0 || arg2_bytes <= 0) {
+ return bincmp_utf8mb4(arg1, arg1_end, arg2, arg2_end);
+ }
+
+ if (arg1_word != arg2_word) {
+ return arg1_word > arg2_word ? 1 : -1;
+ }
+ NEXT_WORD_POS(arg1, arg1_bytes);
+ NEXT_WORD_POS(arg2, arg2_bytes);
+ }
+
+ int res = 0;
+ len1 = (size_t)(arg1_end - arg1);
+ len2 = (size_t)(arg2_end - arg2);
+
+ if (len1 != len2) {
+ int swap = 1;
+
+ if (len1 < len2) {
+ len1 = len2;
+ arg1 = arg2;
+ arg1_end = arg2_end;
+ swap = -1;
+ }
+ for (; arg1 < arg1_end; arg1++) {
+ if (*arg1 != ' ') {
+ return (*arg1 < ' ') ? -swap : swap;
+ }
+ }
+ }
+
+ return res;
+}
+
+/*
+* string compare function for collation utf8mb4_general_ci / utf8mb4_unicode_ci
+* convert utf8 to unicode. return bytes of next char
+* save the unicode value in the wchar.
+*/
+static int mb_wc_utf8mb4(const unsigned char* s, const unsigned char* end, GS_UINT32* wchar)
+{
+ int bytes = 0;
+ size_t len = (size_t)(end - s);
+ if (len <= 0) {
+ return GS_ERR_TOOSMALL;
+ }
+
+ if (s[0] < 0x80) {
+ *wchar = s[0];
+ bytes = 1;
+ } else if (s[0] < 0xc2) {
+ return GS_ERR_ILLEGAL_SEQUENCE;
+ } else if (s[0] < 0xe0) {
+ if (len < 2) {
+ return GS_ERR_TOOSMALL2;
+ }
+ if (!(IS_CONTINUATION_BYTE(s[1]))) {
+ return GS_ERR_ILLEGAL_SEQUENCE;
+ }
+ *wchar = ((GS_UINT32)(s[0] & 0x1f) << 6) | (GS_UINT32)(s[1] ^ 0x80);
+ bytes = 2;
+ } else if (s[0] < 0xF0) {
+ if (len < 3) {
+ return GS_ERR_TOOSMALL3;
+ }
+ if (!(IS_CONTINUATION_BYTE(s[1]) && IS_CONTINUATION_BYTE(s[2]) && (s[0] >= 0xe1 || s[1] >= 0xa0))) {
+ return GS_ERR_ILLEGAL_SEQUENCE;
+ }
+ *wchar = ((GS_UINT32)(s[0] & 0x0f) << 12) | ((GS_UINT32)(s[1] ^ 0x80) << 6) | (GS_UINT32)(s[2] ^ 0x80);
+ bytes = 3;
+ } else if (s[0] < 0xf5) {
+ if (len < 4) {
+ return GS_ERR_TOOSMALL4;
+ }
+ if (!(IS_CONTINUATION_BYTE(s[1]) && IS_CONTINUATION_BYTE(s[2]) && IS_CONTINUATION_BYTE(s[3]) &&
+ (s[0] >= 0xf1 || s[1] >= 0x90) && (s[0] <= 0xf3 || s[1] <= 0x8f))) {
+ return GS_ERR_ILLEGAL_SEQUENCE;
+ }
+ *wchar = ((GS_UINT32)(s[0] & 0x07) << 18) |
+ ((GS_UINT32)(s[1] ^ 0x80) << 12) |
+ ((GS_UINT32)(s[2] ^ 0x80) << 6) |
+ (GS_UINT32)(s[3] ^ 0x80);
+
+ bytes = 4;
+ } else {
+ return GS_ERR_ILLEGAL_SEQUENCE;
+ }
+ return bytes;
+}
+
+/*
+* string compare function for collation utf8mb4_bin.
+*/
+static int strnncoll_utf8mb4_bin_pad_space(const unsigned char* arg1, size_t len1,
+ const unsigned char* arg2, size_t len2)
+{
+ size_t len = len1 < len2 ? len1 : len2;
+ const unsigned char* arg1_end = arg1 + len;
+ int res = 0;
+
+ while (arg1 < arg1_end) {
+ if (*arg1++ != *arg2++) {
+ return ((int)arg1[-1] - (int)arg2[-1]);
+ }
+ }
+
+ if (len1 != len2) {
+ int swap = 1;
+ if (len1 < len2) {
+ len1 = len2;
+ arg1 = arg2;
+ swap = -1;
+ res = -res;
+ }
+ for (arg1_end = arg1 + len1 - len; arg1 < arg1_end; arg1++) {
+ if (*arg1 != ' ') {
+ return (*arg1 < ' ') ? -swap : swap;
+ }
+ }
+ }
+ return res;
+}
+
+/*
+* while collation is utf8mb4_general_ci / utf8mb4_unicode_ci,
+* use distinct and group by with string type need to
+* convert the string to a sort value before hashing.
+*/
+Datum hash_utf8mb4_general_pad_space(const unsigned char *key, size_t len)
+{
+ const GS_UNICASE_INFO *uni_plane = &g_unicase_default;
+ GS_UINT32 key_word = 0;
+ int key_bytes = 0;
+ const unsigned char* key_end = key + len;
+ GS_UINT32 nr1 = 1;
+ GS_UINT32 nr2 = 4;
+ uint ch;
+
+ while (key_end > key && key[len - 1] == ' ') {
+ key_end--;
+ len--;
+ }
+
+ while ((key_bytes = mb_wc_utf8mb4(key, key_end, &key_word)) > 0) {
+ sort_by_unicode(uni_plane->sort_page, &key_word);
+ ch = (key_word & 0xFF);
+ nr1 ^= (((nr1 & 63) + nr2) * ch) + (nr1 << 8);
+ nr2 += 3;
+
+ ch = (key_word >> 8) & 0xFF;
+ nr1 ^= (((nr1 & 63) + nr2) * ch) + (nr2 << 8);
+ nr2 += 3;
+ NEXT_WORD_POS(key, key_bytes);
+ }
+ return UInt32GetDatum(nr1);
+}
+
+/*
+* hash function for collation utf8mb4_bin
+*/
+Datum hash_utf8mb4_bin_pad_space(const unsigned char *key)
+{
+ unsigned char* remove_space_key = (unsigned char*)remove_trailing_spaces((const char*)key);
+
+ return hash_any(remove_space_key, strlen((const char*)remove_space_key));
+}
+
+/*
+* while collation is utf8mb4_general_ci or utf8mb4_unicode_ci, return the like syntax result
+* p point to like str
+* t point to compare str
+*/
+int matchtext_utf8mb4(unsigned char* t, int tlen, unsigned char* p, int plen)
+{
+ if (plen == 1 && *p == '%') {
+ return LIKE_TRUE;
+ }
+
+ GS_UINT32 p_word = 0;
+ GS_UINT32 t_word = 0;
+ unsigned char word_escape = '\\';
+ unsigned char word_one = '_';
+ unsigned char word_many = '%';
+
+ const unsigned char* p_word_end = p + plen;
+ const unsigned char* t_word_end = t + tlen;
+ const GS_UNICASE_INFO *uni_plane = &g_unicase_default;
+
+ check_stack_depth();
+ while (p < p_word_end && t < t_word_end) {
+ int p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ int t_word_bytes = get_current_char_sorted_value(t, t_word_end, &t_word, uni_plane);
+
+ if (p_word == word_escape) {
+ NEXT_WORD_POS(p, p_word_bytes);
+ if (p < p_word_end) {
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
+ errmsg("LIKE pattern must not end with escape character")));
+ }
+
+ if (p_word != t_word) {
+ return LIKE_FALSE;
+ }
+ } else if (p_word == word_many) {
+ NEXT_WORD_POS(p, p_word_bytes);
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+
+ while (p < p_word_end) {
+ if (p_word == word_many) {
+ NEXT_WORD_POS(p, p_word_bytes);
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ } else if (p_word == word_one) {
+ if (t >= t_word_end) {
+ return LIKE_ABORT;
+ }
+ NEXT_WORD_POS(t, t_word_bytes);
+ t_word_bytes = get_current_char_sorted_value(t, t_word_end, &t_word, uni_plane);
+ NEXT_WORD_POS(p, p_word_bytes);
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ } else {
+ /* Reached a non-wildcard pattern char */
+ break;
+ }
+ }
+
+ /* If we're at end of pattern, match: we have a trailing % which matches any remaining text string. */
+ if (p >= p_word_end) {
+ return LIKE_TRUE;
+ }
+
+ if (p_word == word_escape) {
+ if (p_word_end - p < 2) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
+ errmsg("LIKE pattern must not end with escape character")));
+ }
+ NEXT_WORD_POS(p, p_word_bytes);
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ }
+
+ while (t_word_end - t > 0) {
+ if (t_word == p_word) {
+ int matched = matchtext_utf8mb4(t, t_word_end - t, p, p_word_end - p);
+ if (matched != LIKE_FALSE) {
+ return matched;
+ }
+ }
+ NEXT_WORD_POS(t, t_word_bytes);
+ t_word_bytes = get_current_char_sorted_value(t, t_word_end, &t_word, uni_plane);
+ }
+ return LIKE_ABORT;
+ } else if (p_word == word_one) {
+ NEXT_WORD_POS(p, p_word_bytes);
+ p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+
+ NEXT_WORD_POS(t, t_word_bytes);
+ t_word_bytes = get_current_char_sorted_value(t, t_word_end, &t_word, uni_plane);
+ continue;
+ } else if (p_word != t_word) {
+ return LIKE_FALSE;
+ }
+
+ NEXT_WORD_POS(p, p_word_bytes);
+ NEXT_WORD_POS(t, t_word_bytes);
+ }
+
+ if (t_word_end - t > 0) {
+ return LIKE_FALSE;
+ }
+ if (p_word_end - p > 0) {
+ while (p < p_word_end) {
+ int p_word_bytes = get_current_char_sorted_value(p, p_word_end, &p_word, uni_plane);
+ p += p_word_bytes;
+ if (p_word != word_many) {
+ return LIKE_FALSE;
+ }
+ }
+ }
+ if (p >= p_word_end) {
+ return LIKE_TRUE;
+ }
+
+ return LIKE_ABORT;
+}
+
+/* IN:
+* cur_len means current position in string
+* cur_str = str + cur_len
+* str_end = str + str_len
+* return :
+* word_bytes : next word bytes
+* next_word : next word sorted unicode
+*/
+static int get_current_char_sorted_value(const unsigned char* cur_str, const unsigned char* str_end,
+ GS_UINT32* next_word, const GS_UNICASE_INFO *uni_plane)
+{
+ int word_bytes = mb_wc_utf8mb4(cur_str, str_end, next_word);
+ sort_by_unicode(uni_plane->sort_page, next_word);
+ return word_bytes;
+}
\ No newline at end of file
diff --git a/src/common/backend/catalog/heap.cpp b/src/common/backend/catalog/heap.cpp
index 5b888b99b..0c3e3edee 100644
--- a/src/common/backend/catalog/heap.cpp
+++ b/src/common/backend/catalog/heap.cpp
@@ -62,7 +62,7 @@
#include "catalog/pg_proc.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_statistic_ext.h"
-#include "catalog/pg_synonym.h"
+#include "catalog/pg_synonym.h"
#include "catalog/pg_tablespace.h"
#include "catalog/pg_type.h"
#include "catalog/pg_type_fn.h"
@@ -193,8 +193,6 @@ static HashPartitionDefState *MakeHashDefaultSubpartition(PartitionState *partit
char *tablespacename);
static void MakeDefaultSubpartitionName(PartitionState *partitionState, char **subPartitionName,
const char *partitionName);
-static void getSubPartitionInfo(char partitionStrategy, Node *partitionDefState, List **subPartitionDefState,
- char **partitionName, char **tablespacename);
/* ----------------------------------------------------------------
* XXX UGLY HARD CODED BADNESS FOLLOWS XXX
*
@@ -2640,15 +2638,15 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable
CheckAttributeNamesTypes(tupdesc, relkind, allow_system_table_mods);
- /*
- * Check relation name to ensure that it doesn't conflict with existing synonym.
- */
- if (!IsInitdb && GetSynonymOid(relname, relnamespace, true) != InvalidOid) {
- ereport(ERROR,
- (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
- get_namespace_name(relnamespace))));
- }
-
+ /*
+ * Check relation name to ensure that it doesn't conflict with existing synonym.
+ */
+ if (!IsInitdb && GetSynonymOid(relname, relnamespace, true) != InvalidOid) {
+ ereport(ERROR,
+ (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
+ get_namespace_name(relnamespace))));
+ }
+
/*
* This would fail later on anyway, if the relation already exists. But
* by catching it here we can emit a nicer error message.
@@ -3746,7 +3744,7 @@ void heap_drop_with_catalog(Oid relid)
/*
* Store a default expression for column attnum of relation rel.
*/
-void StoreAttrDefault(Relation rel, AttrNumber attnum, Node* expr, char generatedCol, Node* update_expr)
+void StoreAttrDefault(Relation rel, AttrNumber attnum, Node* expr, char generatedCol, Node* update_expr, bool skip_dep)
{
char* adbin = NULL;
char* adbin_on_update = NULL;
@@ -3817,6 +3815,9 @@ void StoreAttrDefault(Relation rel, AttrNumber attnum, Node* expr, char generate
if (t_thrd.proc->workingVersionNum >= GENERATED_COL_VERSION_NUM) {
values[Anum_pg_attrdef_adgencol - 1] = CharGetDatum(generatedCol);
+ } else {
+ /* set to default value \0 */
+ values[Anum_pg_attrdef_adgencol - 1] = CharGetDatum(0);
}
adrel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
@@ -3884,11 +3885,13 @@ void StoreAttrDefault(Relation rel, AttrNumber attnum, Node* expr, char generate
recordDependencyOn(&defobject, &colobject, DEPENDENCY_AUTO);
+ if (skip_dep) {
+ return;
+ }
/*
* Record dependencies on objects used in the expression, too.
*/
- if (generatedCol == ATTRIBUTE_GENERATED_STORED)
- {
+ if (generatedCol == ATTRIBUTE_GENERATED_STORED) {
/*
* Generated column: Dropping anything that the generation expression
* refers to automatically drops the generated column.
@@ -4047,29 +4050,11 @@ static void StoreConstraints(Relation rel, List* cooked_constraints)
SetRelationNumChecks(rel, numchecks);
}
-static void CheckAutoIncrementDataType(Form_pg_attribute attr)
-{
- switch (attr->atttypid) {
- case BOOLOID:
- case INT1OID:
- case INT2OID:
- case INT4OID:
- case INT8OID:
- case FLOAT4OID:
- case FLOAT8OID:
- break;
- default:
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("The datatype of column '%s' does not support auto_increment", attr->attname.data)));
- break;
- };
-}
-
static Node* CookAutoIncDefault(ParseState* pstate, Relation rel, RawColumnDefault* colDef, Form_pg_attribute atp)
{
AutoIncrement *autoinc = NULL;
- CheckAutoIncrementDataType(atp);
+ CheckAutoIncrementDatatype(atp->atttypid, NameStr(atp->attname));
if (RelHasAutoInc(rel)) {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
(errmsg("Incorrect column definition, there can be only one auto_increment column"))));
@@ -4144,7 +4129,7 @@ List* AddRelationNewConstraints(
CookedConstraint* cooked = NULL;
AttrNumber autoinc_attnum = RelAutoIncAttrNum(rel);
Node* update_expr = NULL;
-
+ Bitmapset* generated_by_attrs = NULL;
/*
* Get info about existing constraints.
*/
@@ -4179,6 +4164,9 @@ List* AddRelationNewConstraints(
} else {
expr = cookDefault(pstate, colDef->raw_default, atp->atttypid, atp->atttypmod, NameStr(atp->attname),
colDef->generatedCol);
+ if (colDef->generatedCol == ATTRIBUTE_GENERATED_STORED) {
+ pull_varattnos(expr, 1, &generated_by_attrs);
+ }
}
}
@@ -4217,6 +4205,13 @@ List* AddRelationNewConstraints(
expr = NULL;
update_expr = NULL;
}
+ if (autoinc_attnum > 0 &&
+ bms_is_member(autoinc_attnum - FirstLowInvalidHeapAttributeNumber, generated_by_attrs)) {
+ bms_free_ext(generated_by_attrs);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ (errmsg("generated column cannot refer to auto_increment column"))));
+ }
+ bms_free_ext(generated_by_attrs);
pstate->p_rawdefaultlist = NIL;
/*
@@ -5408,8 +5403,7 @@ static Datum BuildInterval(Node* partInterval)
* Description : Insert a entry to pg_partition. The entry is for partitioned-table or partition.
* Notes :
*/
-void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc, int2vector* pkey, oidvector* intablespace,
- Datum interval, Datum maxValues, Datum transitionPoint, Datum reloptions, bool partkeyexprIsNull, bool partkeyIsFunc)
+void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc, PartitionTupleInfo *partTupleInfo)
{
Form_pg_partition new_part_tup = new_part_desc->pd_part;
/*
@@ -5426,27 +5420,11 @@ void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc, int2ve
* We know that no xacts older than RecentXmin are still running, so
* that will do.
*/
- if (new_part_tup->parttype == PART_OBJ_TYPE_PARTED_TABLE) {
- new_part_tup->relfrozenxid = (ShortTransactionId)InvalidTransactionId;
- } else {
- Assert(new_part_tup->parttype == PART_OBJ_TYPE_TABLE_PARTITION ||
- new_part_tup->parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION);
- new_part_tup->relfrozenxid = (ShortTransactionId)u_sess->utils_cxt.RecentXmin;
- }
+ /* relfrozenxid is aborted, we use relfrozenxid64 instead */
+ new_part_tup->relfrozenxid = (ShortTransactionId)InvalidTransactionId;
/* Now build and insert the tuple */
- insertPartitionEntry(pg_part_desc,
- new_part_desc,
- new_part_desc->pd_id,
- pkey,
- intablespace,
- interval,
- maxValues,
- transitionPoint,
- reloptions,
- new_part_tup->parttype,
- partkeyexprIsNull,
- partkeyIsFunc);
+ insertPartitionEntry(pg_part_desc, new_part_desc, new_part_desc->pd_id, partTupleInfo);
}
static void deletePartitionTuple(Oid part_id)
@@ -6114,10 +6092,10 @@ Oid heapAddRangePartition(Relation pgPartRel, Oid partTableOid, Oid partTablespa
if (!PointerIsValid(newPartDef->boundary)) {
ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("boundary not defined for new partition")));
}
- if (newPartDef->boundary->length > MAX_PARTITIONKEY_NUM) {
+ if (newPartDef->boundary->length > PARTITION_PARTKEYMAXNUM) {
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("too many partition keys, allowed is %d", MAX_PARTITIONKEY_NUM)));
+ errmsg("too many partition keys, allowed is %d", PARTITION_PARTKEYMAXNUM)));
}
/*new partition name check*/
@@ -6174,23 +6152,24 @@ Oid heapAddRangePartition(Relation pgPartRel, Oid partTableOid, Oid partTablespa
reloptions);
Assert(newPartitionOid == PartitionGetPartid(newPartition));
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
if (isSubpartition) {
InitSubPartitionDef(newPartition, partTableOid, PART_STRATEGY_RANGE);
+ partTupleInfo.partitionno = INVALID_PARTITION_NO;
+ partTupleInfo.subpartitionno = newPartDef->partitionno;
} else {
InitPartitionDef(newPartition, partTableOid, PART_STRATEGY_RANGE);
+ partTupleInfo.partitionno = newPartDef->partitionno;
+ partTupleInfo.subpartitionno = -list_length(newPartDef->subPartitionDefState);
}
+ partTupleInfo.pkey = subpartition_key;
+ partTupleInfo.boundaries = boundaryValue;
+ partTupleInfo.reloptions = reloptions;
+ partTupleInfo.partkeyexprIsNull = partkeyexprIsNull;
+ partTupleInfo.partkeyIsFunc = partkeyIsFunc;
/* step 3: insert into pg_partition tuple */
- addNewPartitionTuple(pgPartRel, /* RelationData pointer for pg_partition */
- newPartition, /* PartitionData pointer for partition */
- subpartition_key, /* */
- NULL,
- (Datum)0, /* interval*/
- boundaryValue, /* max values */
- (Datum)0, /* transition point */
- reloptions,
- partkeyexprIsNull,
- partkeyIsFunc);
+ addNewPartitionTuple(pgPartRel, newPartition, &partTupleInfo);
if (isSubpartition) {
PartitionCloseSmgr(newPartition);
@@ -6283,7 +6262,7 @@ char* GenIntervalPartitionName(Relation rel)
suffix = (suffix % MAX_PARTITION_NUM == 0 ? MAX_PARTITION_NUM : suffix % MAX_PARTITION_NUM);
rc = snprintf_s(partName, NAMEDATALEN, NAMEDATALEN - 1, INTERVAL_PARTITION_NAME_PREFIX_FMT, suffix);
securec_check_ss(rc, "\0", "\0");
- existingPartOid = partitionNameGetPartitionOid(
+ existingPartOid = PartitionNameGetPartitionOid(
rel->rd_id, partName, PART_OBJ_TYPE_TABLE_PARTITION, AccessShareLock, true, false, NULL, NULL, NoLock);
if (!OidIsValid(existingPartOid)) {
return partName;
@@ -6386,17 +6365,27 @@ Oid HeapAddIntervalPartition(Relation pgPartRel, Relation rel, Oid partTableOid,
pfree(partName);
Assert(newPartitionOid == PartitionGetPartid(newPartition));
+
+ /* the partitionno on relation tuple is negative */
+ int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(partTableOid));
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ RelationResetPartitionno(partTableOid, RowExclusiveLock);
+ partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(partTableOid));
+ Assert(PARTITIONNO_IS_VALID(partitionno));
+ }
+
InitPartitionDef(newPartition, partTableOid, PART_STRATEGY_INTERVAL);
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
+ partTupleInfo.boundaries = boundaryValue;
+ partTupleInfo.reloptions = reloptions;
+ partTupleInfo.partitionno = ++partitionno;
+ partTupleInfo.subpartitionno = INVALID_PARTITION_NO;
+
/* step 3: insert into pg_partition tuple*/
- addNewPartitionTuple(pgPartRel, /* RelationData pointer for pg_partition */
- newPartition, /* PartitionData pointer for partition */
- NULL,
- NULL,
- (Datum)0, /* interval */
- boundaryValue, /* max values */
- (Datum)0, /* transition point */
- reloptions);
+ addNewPartitionTuple(pgPartRel, newPartition, &partTupleInfo);
+ /* inplace update on partitioned table, because we can't cover the wait_clean_gpi info, which is inplace updated */
+ UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(partTableOid), -partitionno, true);
relation = relation_open(partTableOid, NoLock);
PartitionCloseSmgr(newPartition);
@@ -6475,24 +6464,24 @@ Oid HeapAddListPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac
false, reloptions);
Assert(newListPartitionOid == PartitionGetPartid(newListPartition));
-
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
if (isSubpartition) {
InitSubPartitionDef(newListPartition, partTableOid, PART_STRATEGY_LIST);
+ partTupleInfo.partitionno = INVALID_PARTITION_NO;
+ partTupleInfo.subpartitionno = newListPartDef->partitionno;
} else {
InitPartitionDef(newListPartition, partTableOid, PART_STRATEGY_LIST);
+ partTupleInfo.partitionno = newListPartDef->partitionno;
+ partTupleInfo.subpartitionno = -list_length(newListPartDef->subPartitionDefState);
}
+ partTupleInfo.pkey = subpartition_key;
+ partTupleInfo.boundaries = boundaryValue;
+ partTupleInfo.reloptions = reloptions;
+ partTupleInfo.partkeyexprIsNull = partkeyexprIsNull;
+ partTupleInfo.partkeyIsFunc = partkeyIsFunc;
/* step 3: insert into pg_partition tuple */
- addNewPartitionTuple(pgPartRel, /* RelationData pointer for pg_partition */
- newListPartition, /* PartitionData pointer for partition */
- subpartition_key, /* */
- NULL,
- (Datum)0, /* interval*/
- boundaryValue, /* max values */
- (Datum)0, /* transition point */
- reloptions,
- partkeyexprIsNull,
- partkeyIsFunc);
+ addNewPartitionTuple(pgPartRel, newListPartition, &partTupleInfo);
if (isSubpartition) {
PartitionCloseSmgr(newListPartition);
@@ -6593,7 +6582,7 @@ Datum GetPartBoundaryByTuple(Relation rel, HeapTuple tuple)
return Timestamp2Boundarys(rel, Align2UpBoundary(value, partMap->intervalValue, boundaryTs));
}
-Oid AddNewIntervalPartition(Relation rel, void* insertTuple, bool isDDL)
+Oid AddNewIntervalPartition(Relation rel, void* insertTuple, int *partitionno, bool isDDL)
{
Relation pgPartRel = NULL;
Oid newPartOid = InvalidOid;
@@ -6608,21 +6597,17 @@ Oid AddNewIntervalPartition(Relation rel, void* insertTuple, bool isDDL)
CacheInvalidateRelcache(rel);
}
- /*
- * to avoid dead lock, we should release AccessShareLock on ADD_PARTITION_ACTION
- * locked by the transaction before aquire AccessExclusiveLock.
- */
- UnlockRelationForAccessIntervalPartTabIfHeld(rel);
- /* it will accept invalidation messages generated by other sessions in lockRelationForAddIntervalPartition. */
- LockRelationForAddIntervalPartition(rel);
+ /* it will accept invalidation messages */
+ LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
partitionRoutingForTuple(rel, insertTuple, u_sess->catalog_cxt.route, false);
- /* if the partition exists, return partition's oid */
+ /* if the partition exists, return partition's oid. This may occur if another session do the same work. */
if (u_sess->catalog_cxt.route->fileExist) {
Assert(OidIsValid(u_sess->catalog_cxt.route->partitionId));
- /* we should take AccessShareLock again before release AccessExclusiveLock for consistency. */
- LockRelationForAccessIntervalPartitionTab(rel);
- UnlockRelationForAddIntervalPartition(rel);
+ UnlockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ if (PointerIsValid(partitionno)) {
+ *partitionno = GetPartitionnoFromSequence(rel->partMap, u_sess->catalog_cxt.route->partSeq);
+ }
return u_sess->catalog_cxt.route->partitionId;
}
@@ -6698,6 +6683,17 @@ Oid AddNewIntervalPartition(Relation rel, void* insertTuple, bool isDDL)
UpdatePgObjectChangecsn(RelationGetRelid(rel), rel->rd_rel->relkind);
}
+ /* take ExclusiveLock to avoid PARTITION DDL COMMIT until we finish the InitPlan. Oid info will be masked here, and
+ * be locked in CommitTransaction. */
+#ifndef ENABLE_MULTIPLE_NODES
+ AddPartitionDDLInfo(RelationGetRelid(rel));
+#endif
+
+ if (PointerIsValid(partitionno)) {
+ *partitionno = GetCurrentPartitionNo(newPartOid);
+ PARTITIONNO_VALID_ASSERT(*partitionno);
+ }
+
return newPartOid;
}
@@ -6781,23 +6777,24 @@ Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac
reloptions);
Assert(newHashPartitionOid == PartitionGetPartid(newHashPartition));
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
if (isSubpartition) {
InitSubPartitionDef(newHashPartition, partTableOid, PART_STRATEGY_HASH);
+ partTupleInfo.partitionno = INVALID_PARTITION_NO;
+ partTupleInfo.subpartitionno = newHashPartDef->partitionno;
} else {
InitPartitionDef(newHashPartition, partTableOid, PART_STRATEGY_HASH);
+ partTupleInfo.partitionno = newHashPartDef->partitionno;
+ partTupleInfo.subpartitionno = -list_length(newHashPartDef->subPartitionDefState);
}
+ partTupleInfo.pkey = subpartition_key;
+ partTupleInfo.boundaries = boundaryValue;
+ partTupleInfo.reloptions = reloptions;
+ partTupleInfo.partkeyexprIsNull = partkeyexprIsNull;
+ partTupleInfo.partkeyIsFunc = partkeyIsFunc;
/* step 3: insert into pg_partition tuple */
- addNewPartitionTuple(pgPartRel, /* RelationData pointer for pg_partition */
- newHashPartition, /* PartitionData pointer for partition */
- subpartition_key, /* */
- NULL,
- (Datum)0, /* interval*/
- boundaryValue, /* max values */
- (Datum)0, /* transition point */
- reloptions,
- partkeyexprIsNull,
- partkeyIsFunc);
+ addNewPartitionTuple(pgPartRel, newHashPartition, &partTupleInfo);
if (isSubpartition) {
PartitionCloseSmgr(newHashPartition);
@@ -6925,10 +6922,10 @@ static void addNewPartitionTupleForTable(Relation pg_partition_rel, const char*
RangePartitionDefState* lastPartition = NULL;
lastPartition = (RangePartitionDefState*)lfirst(partTableState->partitionList->tail);
- if (lastPartition->boundary->length > 4) {
+ if (lastPartition->boundary->length > PARTITION_PARTKEYMAXNUM) {
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("number of partition key columns MUST less or equal than 4")));
+ errmsg("number of partition key columns MUST less or equal than %d", PARTITION_PARTKEYMAXNUM)));
}
}
@@ -6977,29 +6974,26 @@ static void addNewPartitionTupleForTable(Relation pg_partition_rel, const char*
/* Update reloptions with wait_clean_gpi=n */
newOptions = SetWaitCleanGpiRelOptions(reloptions, false);
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
+ partTupleInfo.pkey = partition_key_attr_no; /* number array for partition key column of partitioned table */
+ partTupleInfo.intablespace = interval_talespace;
+ partTupleInfo.interval = interval;
+ partTupleInfo.boundaries = (Datum)0;
+ partTupleInfo.transitionPoint = transition_point;
+ partTupleInfo.reloptions = newOptions;
+ partTupleInfo.partkeyexprIsNull = partkeyexprIsNull;
+ partTupleInfo.partkeyIsFunc = partkeyIsFunc;
+ partTupleInfo.partitionno = -list_length(partTableState->partitionList);
+ partTupleInfo.subpartitionno = INVALID_PARTITION_NO;
+
/*step 2: insert into pg_partition tuple*/
- addNewPartitionTuple(pg_partition_rel, /* RelationData pointer for pg_partition */
- new_partition, /* Local PartitionData pointer for new partition */
- partition_key_attr_no, /* number array for partition key column of partitioned table*/
- interval_talespace,
- interval, /* interval partitioned table's interval*/
- (Datum)0, /* partitioned table's boundary value is empty in pg_partition */
- transition_point, /* interval's partitioned table's transition point*/
- newOptions,
- partkeyexprIsNull,
- partkeyIsFunc);
+ addNewPartitionTuple(pg_partition_rel, new_partition, &partTupleInfo);
relation = relation_open(reloid, NoLock);
partitionClose(relation, new_partition, NoLock);
relation_close(relation, NoLock);
- if (partition_key_attr_no != NULL) {
- pfree(partition_key_attr_no);
- }
-
- if (interval_talespace != NULL) {
- pfree(interval_talespace);
- }
-
+ pfree_ext(partition_key_attr_no);
+ pfree_ext(interval_talespace);
if (interval != 0) {
pfree(DatumGetPointer(interval));
}
@@ -7098,46 +7092,27 @@ static RangePartitionDefState *MakeRangeDefaultSubpartition(PartitionState *part
return subPartitionDefState;
}
-static void getSubPartitionInfo(char partitionStrategy, Node *partitionDefState,
- List **subPartitionDefState, char **partitionName, char **tablespacename)
-{
- if (partitionStrategy == PART_STRATEGY_LIST) {
- *subPartitionDefState = ((ListPartitionDefState *)partitionDefState)->subPartitionDefState;
- *partitionName = ((ListPartitionDefState *)partitionDefState)->partitionName;
- *tablespacename = ((ListPartitionDefState *)partitionDefState)->tablespacename;
- } else if (partitionStrategy == PART_STRATEGY_HASH) {
- *subPartitionDefState = ((HashPartitionDefState *)partitionDefState)->subPartitionDefState;
- *partitionName = ((HashPartitionDefState *)partitionDefState)->partitionName;
- *tablespacename = ((HashPartitionDefState *)partitionDefState)->tablespacename;
- } else {
- *subPartitionDefState = ((RangePartitionDefState *)partitionDefState)->subPartitionDefState;
- *partitionName = ((RangePartitionDefState *)partitionDefState)->partitionName;
- *tablespacename = ((RangePartitionDefState *)partitionDefState)->tablespacename;
- }
-}
-
-Node *MakeDefaultSubpartition(PartitionState *partitionState, Node *partitionDefState)
+Node *MakeDefaultSubpartition(PartitionState *partitionState, PartitionDefState *partitionDefState)
{
PartitionState *subPartitionState = partitionState->subPartitionState;
- List *subPartitionDefStateList = NIL;
- char *partitionName = NULL;
- char *tablespacename = NULL;
- char partitionStrategy = partitionState->partitionStrategy;
char subPartitionStrategy = subPartitionState->partitionStrategy;
+ char *partitionName = partitionDefState->partitionName;
+ char *tablespacename = partitionDefState->tablespacename;
- getSubPartitionInfo(partitionStrategy, partitionDefState, &subPartitionDefStateList, &partitionName,
- &tablespacename);
if (subPartitionStrategy == PART_STRATEGY_LIST) {
ListPartitionDefState *subPartitionDefState =
MakeListDefaultSubpartition(partitionState, partitionName, tablespacename);
+ subPartitionDefState->partitionno = 1;
return (Node *)subPartitionDefState;
} else if (subPartitionStrategy == PART_STRATEGY_HASH) {
HashPartitionDefState *subPartitionDefState =
MakeHashDefaultSubpartition(partitionState, partitionName, tablespacename);
+ subPartitionDefState->partitionno = 1;
return (Node *)subPartitionDefState;
} else {
RangePartitionDefState *subPartitionDefState =
MakeRangeDefaultSubpartition(partitionState, partitionName, tablespacename);
+ subPartitionDefState->partitionno = 1;
return (Node *)subPartitionDefState;
}
}
@@ -7154,15 +7129,11 @@ List *addNewSubPartitionTuplesForPartition(Relation pgPartRel, Oid partTableOid,
}
PartitionState *subPartitionState = partitionState->subPartitionState;
- List *subPartitionDefStateList = NIL;
- char *partitionName = NULL;
- char *tablespacename = NULL;
ListCell *lc = NULL;
Oid subpartOid = InvalidOid;
- char partitionStrategy = partitionState->partitionStrategy;
char subPartitionStrategy = subPartitionState->partitionStrategy;
- getSubPartitionInfo(partitionStrategy, partitionDefState, &subPartitionDefStateList, &partitionName,
- &tablespacename);
+ List *subPartitionDefStateList = ((PartitionDefState *)partitionDefState)->subPartitionDefState;
+
foreach (lc, subPartitionDefStateList) {
if (subPartitionStrategy == PART_STRATEGY_LIST) {
ListPartitionDefState *subPartitionDefState = (ListPartitionDefState *)lfirst(lc);
@@ -7279,7 +7250,8 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel
if (strategy == PART_STRATEGY_LIST) {
ListPartitionDefState* partitionDefState = (ListPartitionDefState*)lfirst(cell);
if (partTableState->subPartitionState != NULL && partitionDefState->subPartitionDefState == NULL) {
- Node *subPartitionDefState = MakeDefaultSubpartition(partTableState, (Node *)partitionDefState);
+ Node *subPartitionDefState =
+ MakeDefaultSubpartition(partTableState, (PartitionDefState *)partitionDefState);
partitionDefState->subPartitionDefState =
lappend(partitionDefState->subPartitionDefState, subPartitionDefState);
}
@@ -7308,7 +7280,8 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel
} else if (strategy == PART_STRATEGY_HASH) {
HashPartitionDefState* partitionDefState = (HashPartitionDefState*)lfirst(cell);
if (partTableState->subPartitionState != NULL && partitionDefState->subPartitionDefState == NULL) {
- Node *subPartitionDefState = MakeDefaultSubpartition(partTableState, (Node *)partitionDefState);
+ Node *subPartitionDefState =
+ MakeDefaultSubpartition(partTableState, (PartitionDefState *)partitionDefState);
partitionDefState->subPartitionDefState =
lappend(partitionDefState->subPartitionDefState, subPartitionDefState);
}
@@ -7337,7 +7310,8 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel
} else {
RangePartitionDefState* partitionDefState = (RangePartitionDefState*)lfirst(cell);
if (partTableState->subPartitionState != NULL && partitionDefState->subPartitionDefState == NULL) {
- Node *subPartitionDefState = MakeDefaultSubpartition(partTableState, (Node *)partitionDefState);
+ Node *subPartitionDefState =
+ MakeDefaultSubpartition(partTableState, (PartitionDefState *)partitionDefState);;
partitionDefState->subPartitionDefState =
lappend(partitionDefState->subPartitionDefState, subPartitionDefState);
}
@@ -7496,7 +7470,7 @@ int lookupHBucketid(oidvector *buckets, int low, int2 bktId)
* Description :
* Notes :
*/
-Oid heapTupleGetPartitionId(Relation rel, void *tuple, bool isDDL, bool canIgnore)
+Oid heapTupleGetPartitionId(Relation rel, void *tuple, int *partitionno, bool isDDL, bool canIgnore)
{
Oid partitionid = InvalidOid;
@@ -7507,6 +7481,9 @@ Oid heapTupleGetPartitionId(Relation rel, void *tuple, bool isDDL, bool canIgnor
if (u_sess->catalog_cxt.route->fileExist) {
Assert(OidIsValid(u_sess->catalog_cxt.route->partitionId));
partitionid = u_sess->catalog_cxt.route->partitionId;
+ if (PointerIsValid(partitionno)) {
+ *partitionno = GetPartitionnoFromSequence(rel->partMap, u_sess->catalog_cxt.route->partSeq);
+ }
return partitionid;
}
@@ -7525,7 +7502,7 @@ Oid heapTupleGetPartitionId(Relation rel, void *tuple, bool isDDL, bool canIgnor
(errcode(ERRCODE_NO_DATA_FOUND), errmsg("inserted partition key does not map to any table partition")));
} break;
case PART_AREA_INTERVAL: {
- return AddNewIntervalPartition(rel, tuple, isDDL);
+ return AddNewIntervalPartition(rel, tuple, partitionno, isDDL);
} break;
case PART_AREA_LIST: {
ereport(
@@ -7553,14 +7530,15 @@ Oid heapTupleGetSubPartitionId(Relation rel, void *tuple)
{
Oid partitionId = InvalidOid;
Oid subPartitionId = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
Partition part = NULL;
Relation partRel = NULL;
/* get partititon oid for the record */
- partitionId = heapTupleGetPartitionId(rel, tuple);
- part = partitionOpen(rel, partitionId, RowExclusiveLock);
+ partitionId = heapTupleGetPartitionId(rel, tuple, &partitionno);
+ part = PartitionOpenWithPartitionno(rel, partitionId, partitionno, RowExclusiveLock);
partRel = partitionGetRelation(rel, part);
/* get subpartititon oid for the record */
- subPartitionId = heapTupleGetPartitionId(partRel, tuple);
+ subPartitionId = heapTupleGetPartitionId(partRel, tuple, NULL);
releaseDummyRelation(&partRel);
partitionClose(rel, part, RowExclusiveLock);
diff --git a/src/common/backend/catalog/index.cpp b/src/common/backend/catalog/index.cpp
index a9c9fa4d7..d982ae93b 100644
--- a/src/common/backend/catalog/index.cpp
+++ b/src/common/backend/catalog/index.cpp
@@ -203,7 +203,7 @@ static bool relationHasPrimaryKey(Relation rel)
* Caller had better have at least ShareLock on the table, else the not-null
* checking isn't trustworthy.
*/
-void index_check_primary_key(Relation heapRel, IndexInfo* indexInfo, bool is_alter_table)
+void index_check_primary_key(Relation heapRel, IndexInfo* indexInfo, bool is_alter_table, bool is_modify_primary)
{
List* cmds = NIL;
int i;
@@ -220,6 +220,14 @@ void index_check_primary_key(Relation heapRel, IndexInfo* indexInfo, bool is_alt
errmsg("multiple primary keys for table \"%s\" are not allowed", RelationGetRelationName(heapRel))));
}
+ /*
+ * if the primary key is modified to other location, AT_SetNotNull has been recorded.
+ * rewrite table data after modifying.
+ */
+ if (is_modify_primary) {
+ return;
+ }
+
/*
* Check that all of the attributes in a primary key are marked as not
* null, otherwise attempt to ALTER TABLE .. SET NOT NULL
@@ -1285,14 +1293,12 @@ Oid partition_index_create(const char* partIndexName, /* the name of partition i
partitionIndex->pd_part->relallvisible = 0;
partitionIndex->pd_part->relfrozenxid = (ShortTransactionId)InvalidTransactionId;
+ PartitionTupleInfo partTupleInfo = PartitionTupleInfo();
/* insert into pg_partition */
#ifndef ENABLE_MULTIPLE_NODES
- insertPartitionEntry(pg_partition_rel, partitionIndex, partitionIndex->pd_id, NULL, NULL, 0, 0, 0, indexRelOptions,
- PART_OBJ_TYPE_INDEX_PARTITION);
-#else
- insertPartitionEntry(
- pg_partition_rel, partitionIndex, partitionIndex->pd_id, NULL, NULL, 0, 0, 0, 0, PART_OBJ_TYPE_INDEX_PARTITION);
+ partTupleInfo.reloptions = indexRelOptions;
#endif
+ insertPartitionEntry(pg_partition_rel, partitionIndex, partitionIndex->pd_id, &partTupleInfo);
/* Make the above change visible */
CommandCounterIncrement();
diff --git a/src/common/backend/catalog/namespace.cpp b/src/common/backend/catalog/namespace.cpp
index 70e1bd455..166a5eba7 100644
--- a/src/common/backend/catalog/namespace.cpp
+++ b/src/common/backend/catalog/namespace.cpp
@@ -80,6 +80,7 @@
#include "c.h"
#include "pgstat.h"
#include "catalog/pg_proc_fn.h"
+#include "catalog/gs_utf8_collation.h"
#ifdef ENABLE_MULTIPLE_NODES
#include "streaming/planner.h"
@@ -3097,6 +3098,7 @@ void DeconstructQualifiedName(const List* names, char** nspname_p, char** objnam
if (OidIsValid(PackageNameGetOid(pkgname, nspoid))) {
schemaname = strVal(linitial(names));
objname = pkgname;
+ pkgname = NULL;
} else {
pkgname = NULL;
}
@@ -4064,6 +4066,29 @@ void RemoveTmpNspFromSearchPath(Oid tmpnspId)
MemoryContextSwitchTo(oldcxt);
}
+/* If the collate string is in uppercase, change to lowercase and search it again */
+Oid get_collation_oid_with_lower_name(const char* collation_name, int charset)
+{
+ Oid colloid = InvalidOid;
+ char* lower_coll_name = pstrdup(collation_name);
+ lower_coll_name = pg_strtolower(lower_coll_name);
+ if (charset == PG_INVALID_ENCODING) {
+ CatCList* list = NULL;
+ HeapTuple coll_tup;
+ list = SearchSysCacheList1(COLLNAMEENCNSP, PointerGetDatum(lower_coll_name));
+ if (list->n_members == 1) {
+ coll_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, 0);
+ colloid = HeapTupleGetOid(coll_tup);
+ }
+ ReleaseSysCacheList(list);
+ } else {
+ colloid = GetSysCacheOid3(COLLNAMEENCNSP, PointerGetDatum(lower_coll_name),
+ Int32GetDatum(charset), ObjectIdGetDatum(PG_CATALOG_NAMESPACE));
+ }
+
+ return colloid;
+}
+
/*
* get_collation_oid - find a collation by possibly qualified name
*/
@@ -4086,11 +4111,11 @@ Oid get_collation_oid(List* name, bool missing_ok)
/* first try for encoding-specific entry, then any-encoding */
colloid = GetSysCacheOid3(
COLLNAMEENCNSP, PointerGetDatum(collation_name), Int32GetDatum(dbencoding), ObjectIdGetDatum(namespaceId));
- if (OidIsValid(colloid))
+ if (OidIsValid(colloid) && is_support_b_format_collation(colloid))
return colloid;
colloid = GetSysCacheOid3(
COLLNAMEENCNSP, PointerGetDatum(collation_name), Int32GetDatum(-1), ObjectIdGetDatum(namespaceId));
- if (OidIsValid(colloid))
+ if (OidIsValid(colloid) && is_support_b_format_collation(colloid))
return colloid;
} else {
/* search for it in search path */
@@ -4110,14 +4135,14 @@ Oid get_collation_oid(List* name, bool missing_ok)
PointerGetDatum(collation_name),
Int32GetDatum(dbencoding),
ObjectIdGetDatum(namespaceId));
- if (OidIsValid(colloid)) {
+ if (OidIsValid(colloid) && is_support_b_format_collation(colloid)) {
list_free_ext(tempActiveSearchPath);
return colloid;
}
colloid = GetSysCacheOid3(
COLLNAMEENCNSP, PointerGetDatum(collation_name), Int32GetDatum(-1), ObjectIdGetDatum(namespaceId));
- if (OidIsValid(colloid)) {
+ if (OidIsValid(colloid) && is_support_b_format_collation(colloid)) {
list_free_ext(tempActiveSearchPath);
return colloid;
}
@@ -4126,6 +4151,12 @@ Oid get_collation_oid(List* name, bool missing_ok)
list_free_ext(tempActiveSearchPath);
}
+ if (DB_IS_CMPT(B_FORMAT)) {
+ colloid = get_collation_oid_with_lower_name(collation_name, dbencoding);
+ if (OidIsValid(colloid) && is_support_b_format_collation(colloid)) {
+ return colloid;
+ }
+ }
/* Not found in path */
if (!missing_ok)
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -4538,6 +4569,7 @@ static void InitTempTableNamespace(void)
create_stmt->schemaElts = NULL;
create_stmt->schemaname = namespaceName;
create_stmt->temptype = Temp_Rel;
+ create_stmt->charset = PG_INVALID_ENCODING;
ret = snprintf_s(
str, sizeof(str), sizeof(str) - 1, "CREATE SCHEMA %s AUTHORIZATION \"%s\"", namespaceName, bootstrap_username);
securec_check_ss(ret, "\0", "\0");
@@ -4592,6 +4624,7 @@ static void InitTempTableNamespace(void)
create_stmt->schemaElts = NULL;
create_stmt->schemaname = toastNamespaceName;
create_stmt->temptype = Temp_Toast;
+ create_stmt->charset = PG_INVALID_ENCODING;
rc = memset_s(str, sizeof(str), 0, sizeof(str));
securec_check(rc, "", "");
ret = snprintf_s(str,
diff --git a/src/common/backend/catalog/performance_views.sql b/src/common/backend/catalog/performance_views.sql
index 3d92cd288..fbb0effb9 100644
--- a/src/common/backend/catalog/performance_views.sql
+++ b/src/common/backend/catalog/performance_views.sql
@@ -3993,7 +3993,8 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
- OUT trace_id text)
+ OUT trace_id text,
+ OUT advise text)
RETURNS setof record
AS $$
DECLARE
@@ -4061,6 +4062,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
+ advise := row_data.advise;
return next;
END LOOP;
END LOOP;
@@ -4123,7 +4125,8 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
OUT lwlock_wait_time bigint,
OUT details bytea,
OUT is_slow_sql bool,
- OUT trace_id text)
+ OUT trace_id text,
+ OUT advise text)
RETURNS setof record
AS $$
DECLARE
@@ -4191,6 +4194,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
details := row_data.details;
is_slow_sql := row_data.is_slow_sql;
trace_id := row_data.trace_id;
+ advise := row_data.advise;
return next;
END LOOP;
END LOOP;
@@ -4437,7 +4441,7 @@ DECLARE
BEGIN
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
- query_str := 'select * from get_node_stat_reset_time()';
+ query_str := 'select * from pg_catalog.get_node_stat_reset_time()';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
reset_time := row_data.get_node_stat_reset_time;
diff --git a/src/common/backend/catalog/pg_job_proc.cpp b/src/common/backend/catalog/pg_job_proc.cpp
index d4a2546f0..5825ed57d 100644
--- a/src/common/backend/catalog/pg_job_proc.cpp
+++ b/src/common/backend/catalog/pg_job_proc.cpp
@@ -189,7 +189,7 @@ HeapTuple search_from_pg_job_proc_no_exception(Relation rel, Datum job_name)
* @param name
* @return HeapTuple
*/
-static HeapTuple search_from_pg_job_proc(Relation rel, Datum name)
+HeapTuple search_from_pg_job_proc(Relation rel, Datum name)
{
HeapTuple tuple = search_from_pg_job_proc_no_exception(rel, name);
if (!HeapTupleIsValid(tuple)) {
diff --git a/src/common/backend/catalog/pg_namespace.cpp b/src/common/backend/catalog/pg_namespace.cpp
index d4667fe88..61ad105cc 100644
--- a/src/common/backend/catalog/pg_namespace.cpp
+++ b/src/common/backend/catalog/pg_namespace.cpp
@@ -28,6 +28,7 @@
#include "utils/rel.h"
#include "utils/rel_gs.h"
#include "utils/syscache.h"
+#include "catalog/pg_collation.h"
/* ----------------
* NamespaceCreate
@@ -41,7 +42,7 @@
* schema to become part of the extension.)
* ---------------
*/
-Oid NamespaceCreate(const char* nspName, Oid ownerId, bool isTemp, bool hasBlockChain)
+Oid NamespaceCreate(const char* nspName, Oid ownerId, bool isTemp, bool hasBlockChain, Oid colloid)
{
Relation nspdesc;
HeapTuple tup;
@@ -78,6 +79,12 @@ Oid NamespaceCreate(const char* nspName, Oid ownerId, bool isTemp, bool hasBlock
nulls[Anum_pg_namespace_nspacl - 1] = true;
values[Anum_pg_namespace_in_redistribution - 1] = 'n';
+ if (colloid != InvalidOid) {
+ values[Anum_pg_namespace_nspcollation - 1] = colloid;
+ } else {
+ nulls[Anum_pg_namespace_nspcollation - 1] = true;
+ }
+
nspdesc = heap_open(NamespaceRelationId, RowExclusiveLock);
tupDesc = nspdesc->rd_att;
@@ -138,4 +145,19 @@ bool IsLedgerNameSpace(Oid nspOid)
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("schema of oid \"%u\" does not exist", nspOid)));
}
return is_nspblockchain;
+}
+
+Oid get_nsp_default_collation(Oid nsp_oid)
+{
+ Oid nsp_def_coll = InvalidOid;
+ HeapTuple tp = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nsp_oid));
+ if (HeapTupleIsValid(tp)) {
+ bool is_null = true;
+ Datum datum = SysCacheGetAttr(NAMESPACEOID, tp, Anum_pg_namespace_nspcollation, &is_null);
+ if (!is_null) {
+ nsp_def_coll = DatumGetObjectId(datum);
+ }
+ ReleaseSysCache(tp);
+ }
+ return nsp_def_coll;
}
\ No newline at end of file
diff --git a/src/common/backend/catalog/pg_partition.cpp b/src/common/backend/catalog/pg_partition.cpp
index b67be9629..408ec9b6a 100644
--- a/src/common/backend/catalog/pg_partition.cpp
+++ b/src/common/backend/catalog/pg_partition.cpp
@@ -24,6 +24,7 @@
* -------------------------------------------------------------------------
*/
+#include "access/sysattr.h"
#include "catalog/namespace.h"
#include "catalog/pg_partition_fn.h"
#include "catalog/pg_partition.h"
@@ -35,6 +36,7 @@
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/inval.h"
+#include "utils/knl_relcache.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "access/genam.h"
@@ -45,9 +47,8 @@
#include "utils/snapmgr.h"
#include "utils/lsyscache.h"
-void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, Oid new_part_id, int2vector* pkey,
- const oidvector* tablespaces, Datum interval, Datum maxValues, Datum transitionPoint, Datum reloptions,
- char parttype, bool partkeyexprIsNull, bool partkeyIsFunc)
+void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, Oid new_part_id,
+ PartitionTupleInfo *partTupleInfo)
{
Datum values[Natts_pg_partition];
bool nulls[Natts_pg_partition];
@@ -56,9 +57,10 @@ void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, O
errno_t errorno = EOK;
pd_part = new_part_desc->pd_part;
- Assert(pd_part->parttype == PART_OBJ_TYPE_PARTED_TABLE || pd_part->parttype == PART_OBJ_TYPE_TOAST_TABLE ||
- pd_part->parttype == PART_OBJ_TYPE_TABLE_PARTITION || pd_part->parttype == PART_OBJ_TYPE_INDEX_PARTITION ||
- pd_part->parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION);
+ char parttype = pd_part->parttype;
+ Assert(parttype == PART_OBJ_TYPE_PARTED_TABLE || parttype == PART_OBJ_TYPE_TOAST_TABLE ||
+ parttype == PART_OBJ_TYPE_TABLE_PARTITION || parttype == PART_OBJ_TYPE_INDEX_PARTITION ||
+ parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION);
/* This is a tad tedious, but way cleaner than what we used to do... */
errorno = memset_s(values, sizeof(values), 0, sizeof(values));
@@ -68,7 +70,7 @@ void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, O
securec_check_c(errorno, "\0", "\0");
values[Anum_pg_partition_relname - 1] = NameGetDatum(&pd_part->relname);
- values[Anum_pg_partition_parttype - 1] = CharGetDatum(pd_part->parttype);
+ values[Anum_pg_partition_parttype - 1] = CharGetDatum(parttype);
values[Anum_pg_partition_parentid - 1] = ObjectIdGetDatum(pd_part->parentid);
values[Anum_pg_partition_rangenum - 1] = UInt32GetDatum(pd_part->rangenum);
values[Anum_pg_partition_intervalnum - 1] = UInt32GetDatum(pd_part->intervalnum);
@@ -89,15 +91,15 @@ void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, O
values[Anum_pg_partition_relfrozenxid - 1] = ShortTransactionIdGetDatum(pd_part->relfrozenxid);
/* partition key */
- if (pkey != NULL) {
- values[Anum_pg_partition_partkey - 1] = PointerGetDatum(pkey);
+ if (partTupleInfo->pkey != NULL) {
+ values[Anum_pg_partition_partkey - 1] = PointerGetDatum(partTupleInfo->pkey);
} else {
nulls[Anum_pg_partition_partkey - 1] = true;
}
/* interval tablespaces */
- if (tablespaces != NULL) {
- values[Anum_pg_partition_intablespace - 1] = PointerGetDatum(tablespaces);
+ if (partTupleInfo->intablespace != NULL) {
+ values[Anum_pg_partition_intablespace - 1] = PointerGetDatum(partTupleInfo->intablespace);
} else {
nulls[Anum_pg_partition_intablespace - 1] = true;
}
@@ -105,38 +107,42 @@ void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, O
nulls[Anum_pg_partition_intspnum - 1] = true;
/* interval */
- if (interval != (Datum)0) {
- values[Anum_pg_partition_interval - 1] = interval;
+ if (partTupleInfo->interval != (Datum)0) {
+ values[Anum_pg_partition_interval - 1] = partTupleInfo->interval;
} else {
nulls[Anum_pg_partition_interval - 1] = true;
}
/* maxvalue */
- if (maxValues != (Datum)0) {
- values[Anum_pg_partition_boundaries - 1] = maxValues;
+ if (partTupleInfo->boundaries != (Datum)0) {
+ values[Anum_pg_partition_boundaries - 1] = partTupleInfo->boundaries;
} else {
nulls[Anum_pg_partition_boundaries - 1] = true;
}
/* transit point */
- if (transitionPoint != (Datum)0) {
- values[Anum_pg_partition_transit - 1] = transitionPoint;
+ if (partTupleInfo->transitionPoint != (Datum)0) {
+ values[Anum_pg_partition_transit - 1] = partTupleInfo->transitionPoint;
} else {
nulls[Anum_pg_partition_transit - 1] = true;
}
- if (reloptions != (Datum)0) {
- values[Anum_pg_partition_reloptions - 1] = reloptions;
+ if (partTupleInfo->reloptions != (Datum)0) {
+ values[Anum_pg_partition_reloptions - 1] = partTupleInfo->reloptions;
nulls[Anum_pg_partition_reloptions - 1] = false;
} else {
nulls[Anum_pg_partition_reloptions - 1] = true;
}
- if (parttype == PART_OBJ_TYPE_TABLE_PARTITION) {
+ /* we only set relfrozenxid64 in a leaf partition, which owns a relfilenode */
+ bool isleafpartition =
+ ((parttype == PART_OBJ_TYPE_TABLE_PARTITION || parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION) &&
+ OidIsValid(pd_part->relfilenode));
+ if (isleafpartition) {
values[Anum_pg_partition_relfrozenxid64 - 1] = u_sess->utils_cxt.RecentXmin;
#ifndef ENABLE_MULTIPLE_NODES
- if (!is_cstore_option(RELKIND_RELATION, reloptions)) {
+ if (!is_cstore_option(RELKIND_RELATION, partTupleInfo->reloptions)) {
values[Anum_pg_partition_relminmxid - 1] = GetOldestMultiXactId();
} else {
values[Anum_pg_partition_relminmxid - 1] = InvalidMultiXactId;
@@ -148,13 +154,25 @@ void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, O
values[Anum_pg_partition_relminmxid - 1] = InvalidMultiXactId;
#endif
}
- if (partkeyexprIsNull) {
+ if (partTupleInfo->partkeyexprIsNull) {
nulls[Anum_pg_partition_partkeyexpr - 1] = true;
- } else if (partkeyIsFunc) {
+ } else if (partTupleInfo->partkeyIsFunc) {
values[Anum_pg_partition_partkeyexpr - 1] = CStringGetTextDatum("partkeyisfunc");
} else {
values[Anum_pg_partition_partkeyexpr - 1] = CStringGetTextDatum("");
}
+
+ if (partTupleInfo->partitionno != INVALID_PARTITION_NO) {
+ values[Anum_pg_partition_partitionno - 1] = Int32GetDatum(partTupleInfo->partitionno);
+ } else {
+ nulls[Anum_pg_partition_partitionno - 1] = true;
+ }
+ if (partTupleInfo->subpartitionno != INVALID_PARTITION_NO) {
+ values[Anum_pg_partition_subpartitionno - 1] = Int32GetDatum(partTupleInfo->subpartitionno);
+ } else {
+ nulls[Anum_pg_partition_subpartitionno - 1] = true;
+ }
+
/* form a tuple using values and null array, and insert it */
tup = heap_form_tuple(RelationGetDescr(pg_partition_desc), values, nulls);
HeapTupleSetOid(tup, new_part_id);
@@ -183,7 +201,7 @@ typedef struct {
void ExceptionHandlerForPartition(GetPartitionOidArgs *args, Oid partitionOid, bool missingOk)
{
- // For subpartition, we'll deal with it in subPartitionGetSubPartitionOid
+ // For subpartition, we'll deal with it in SubPartitionGetSubPartitionOid
if (args->objectType != PART_OBJ_TYPE_TABLE_SUB_PARTITION && !OidIsValid(partitionOid)) {
if (!missingOk) {
if (args->partitionName != NULL) {
@@ -207,7 +225,7 @@ void ExceptionHandlerForPartition(GetPartitionOidArgs *args, Oid partitionOid, b
* @Return: partition oid
* @See also:
*/
-static Oid partitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode, bool missingOk, bool noWait)
+static Oid PartitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode, bool missingOk, bool noWait)
{
Oid partitionOid = InvalidOid;
Oid partitionOldOid = InvalidOid;
@@ -351,23 +369,26 @@ static Oid partitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode
return partitionOid;
}
-Oid SubPartitionGetSubPartitionOid(GetPartitionOidArgs *args, Oid partitionedRelationOid, Oid *partOidForSubPart,
- LOCKMODE lockMode, bool missingOk, bool noWait)
+static Oid SubPartitionGetSubPartitionOid(GetPartitionOidArgs *args, Oid partitionedRelationOid, Oid *partOidForSubPart,
+ LOCKMODE partlock, LOCKMODE subpartlock, bool missingOk, bool noWait)
{
- Relation rel = relation_open(partitionedRelationOid, lockMode);
+ Relation rel = relation_open(partitionedRelationOid, AccessShareLock);
List *partOidList = relationGetPartitionOidList(rel);
ListCell *cell = NULL;
Oid partitionOid = InvalidOid;
foreach (cell, partOidList) {
Oid partOid = lfirst_oid(cell);
args->partitionedRelOid = partOid;
- partitionOid = partitionGetPartitionOid(args, lockMode, missingOk, noWait);
+ partitionOid = PartitionGetPartitionOid(args, subpartlock, missingOk, noWait);
if (OidIsValid(partitionOid)) {
+ if (partlock != NoLock) {
+ LockPartitionOid(partitionedRelationOid, partOid, partlock);
+ }
*partOidForSubPart = partOid;
break;
}
}
- relation_close(rel, lockMode);
+ relation_close(rel, AccessShareLock);
if (!OidIsValid(partitionOid)) {
if (!missingOk) {
@@ -394,9 +415,9 @@ Oid SubPartitionGetSubPartitionOid(GetPartitionOidArgs *args, Oid partitionedRel
*
* If nowait = true, throw an error if we'd have to wait for a lock.
*/
-Oid partitionNameGetPartitionOid(Oid partitionedRelationOid, const char* partitionName, char objectType,
+Oid PartitionNameGetPartitionOid(Oid partitionedRelationOid, const char* partitionName, char objectType,
LOCKMODE lockMode, bool missingOk, bool noWait, PartitionNameGetPartidCallback callback, void* callback_arg,
- LOCKMODE callbackobj_lockMode, Oid *partOidForSubPart)
+ LOCKMODE callbackobj_lockMode)
{
if (!OidIsValid(partitionedRelationOid) || !PointerIsValid(partitionName)) {
return InvalidOid;
@@ -418,19 +439,45 @@ Oid partitionNameGetPartitionOid(Oid partitionedRelationOid, const char* partiti
Relation partitionRelRelation = relation_open(PartitionRelationId, AccessShareLock);
- Oid partitionOid = InvalidOid;
- if (objectType == PART_OBJ_TYPE_TABLE_SUB_PARTITION) {
- partitionOid = SubPartitionGetSubPartitionOid(&args, partitionedRelationOid, partOidForSubPart, lockMode,
- missingOk, noWait);
- } else {
- partitionOid = partitionGetPartitionOid(&args, lockMode, missingOk, noWait);
- }
+ Oid partitionOid = PartitionGetPartitionOid(&args, lockMode, missingOk, noWait);
relation_close(partitionRelRelation, AccessShareLock);
return partitionOid;
}
+Oid SubPartitionNameGetSubPartitionOid(Oid partitionedRelationOid, const char* subpartitionName, LOCKMODE partlock,
+ LOCKMODE subpartlock, bool missingOk, bool noWait, PartitionNameGetPartidCallback callback, void* callback_arg,
+ LOCKMODE callbackobj_lockMode, Oid *partOidForSubPart)
+{
+ if (!OidIsValid(partitionedRelationOid) || !PointerIsValid(subpartitionName)) {
+ return InvalidOid;
+ }
+
+ GetPartitionOidArgs args;
+ /* get partition oid from given name */
+ args.givenPartitionName = true;
+ args.partitionedRelOid = partitionedRelationOid;
+ args.partitionName = subpartitionName;
+ args.objectType = PART_OBJ_TYPE_TABLE_SUB_PARTITION;
+ args.callback = callback;
+ args.callbackArgs = callback_arg;
+ args.callbackObjLockMode = callbackobj_lockMode;
+ /* the following arguments is not used. */
+ args.partitionedRel = NULL;
+ args.partKeyValueList = NULL;
+ args.topClosed = false;
+
+ Relation partitionRelRelation = relation_open(PartitionRelationId, AccessShareLock);
+
+ Oid subpartitionOid = SubPartitionGetSubPartitionOid(&args, partitionedRelationOid, partOidForSubPart, partlock,
+ subpartlock, missingOk, noWait);
+
+ relation_close(partitionRelRelation, AccessShareLock);
+
+ return subpartitionOid;
+}
+
/*
* @Description: get partition oid from given key values list.
* @Param[IN] lockMode: lock mode for this partition
@@ -441,9 +488,9 @@ Oid partitionNameGetPartitionOid(Oid partitionedRelationOid, const char* partiti
* @Param[IN] rel: partitioned relation
* @Param[IN] topClosed:
* @Return: partition oid
- * @See also: partitionNameGetPartitionOid()
+ * @See also: PartitionNameGetPartitionOid()
*/
-Oid partitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed,
+Oid PartitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed,
bool missingOk, bool noWait)
{
GetPartitionOidArgs args;
@@ -461,11 +508,11 @@ Oid partitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMOD
args.callbackArgs = NULL;
args.callbackObjLockMode = NoLock;
- return partitionGetPartitionOid(&args, lockMode, missingOk, noWait);
+ return PartitionGetPartitionOid(&args, lockMode, missingOk, noWait);
}
-Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList,
- LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart)
+Oid SubPartitionValuesGetSubPartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList,
+ LOCKMODE partlock, LOCKMODE subpartlock, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart)
{
GetPartitionOidArgs args;
/* get partition oid from given values */
@@ -481,7 +528,7 @@ Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, L
args.callbackArgs = NULL;
args.callbackObjLockMode = NoLock;
- *partOidForSubPart = partitionGetPartitionOid(&args, lockMode, missingOk, noWait);
+ *partOidForSubPart = PartitionGetPartitionOid(&args, partlock, missingOk, noWait);
if (!OidIsValid(*partOidForSubPart)) {
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition which owns the subpartition is missing"),
@@ -490,9 +537,10 @@ Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, L
erraction("Check system table 'pg_partition' for more information")));
}
- Partition part = partitionOpen(rel, *partOidForSubPart, lockMode);
+ Partition part = partitionOpen(rel, *partOidForSubPart, NoLock);
Relation partrel = partitionGetRelation(rel, part);
- Oid subpartOid = partitionValuesGetPartitionOid(partrel, subpartKeyValueList, lockMode, true, true, false);
+ Oid subpartOid =
+ PartitionValuesGetPartitionOid(partrel, subpartKeyValueList, subpartlock, topClosed, missingOk, noWait);
releaseDummyRelation(&partrel);
partitionClose(rel, part, NoLock);
@@ -1021,6 +1069,31 @@ void freeSubPartList(List* plist)
}
}
+static int PartOidGetPartitionNo(PartitionMap *map, Oid partOid)
+{
+ Oid partitionId = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
+ int sumtotal = getPartitionNumber(map);
+
+ for (int conuter = 0; conuter < sumtotal; ++conuter) {
+ if (map->type == PART_TYPE_LIST) {
+ partitionId = ((ListPartitionMap*)map)->listElements[conuter].partitionOid;
+ partitionno = ((ListPartitionMap*)map)->listElements[conuter].partitionno;
+ } else if (map->type == PART_TYPE_HASH) {
+ partitionId = ((HashPartitionMap*)map)->hashElements[conuter].partitionOid;
+ partitionno = ((HashPartitionMap*)map)->hashElements[conuter].partitionno;
+ } else {
+ partitionId = ((RangePartitionMap*)map)->rangeElements[conuter].partitionOid;
+ partitionno = ((RangePartitionMap*)map)->rangeElements[conuter].partitionno;
+ }
+ if (partitionId == partOid) {
+ return partitionno;
+ }
+ }
+
+ return INVALID_PARTITION_NO;
+}
+
/* IMPORTANT: This function will case invalidation message process, the relation may
be rebuild, and the relation->partMap may be changed.
After call this founction, should not call getNumberOfRangePartitions/getNumberOfPartitions,
@@ -1030,22 +1103,35 @@ List* relationGetPartitionList(Relation relation, LOCKMODE lockmode)
{
List* partitionOidList = NIL;
List* partitionList = NIL;
+ ListCell* cell = NULL;
+ Oid partitionId = InvalidOid;
+ Partition partition = NULL;
+ incre_partmap_refcount(relation->partMap);
partitionOidList = relationGetPartitionOidList(relation);
- if (PointerIsValid(partitionOidList)) {
- ListCell* cell = NULL;
- Oid partitionId = InvalidOid;
- Partition partition = NULL;
+ if (!PointerIsValid(partitionOidList)) {
+ decre_partmap_refcount(relation->partMap);
+ return NIL;
+ }
- foreach (cell, partitionOidList) {
- partitionId = lfirst_oid(cell);
- Assert(OidIsValid(partitionId));
- partition = partitionOpen(relation, partitionId, lockmode);
- partitionList = lappend(partitionList, partition);
+ foreach (cell, partitionOidList) {
+ partitionId = lfirst_oid(cell);
+ Assert(OidIsValid(partitionId));
+ partition = tryPartitionOpen(relation, partitionId, lockmode);
+ if (!PartitionIsValid(partition)) {
+ PARTITION_LOG("could not open partition with partition oid %u, partitionno will be used to search the "
+ "new partition", partitionId);
+ int partitionno = PartOidGetPartitionNo(relation->partMap, partitionId);
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ continue;
+ }
+ partition = PartitionOpenWithPartitionno(relation, partitionId, partitionno, lockmode);
}
- list_free_ext(partitionOidList);
+ partitionList = lappend(partitionList, partition);
}
+ list_free_ext(partitionOidList);
+ decre_partmap_refcount(relation->partMap);
return partitionList;
}
@@ -1054,27 +1140,41 @@ List* RelationGetSubPartitionList(Relation relation, LOCKMODE lockmode)
{
List* partitionOidList = NIL;
List* subPartList = NIL;
+ ListCell* cell = NULL;
+ Oid partitionId = InvalidOid;
+ Partition partition = NULL;
+ incre_partmap_refcount(relation->partMap);
partitionOidList = relationGetPartitionOidList(relation);
- if (PointerIsValid(partitionOidList)) {
- ListCell* cell = NULL;
- Oid partitionId = InvalidOid;
- Partition partition = NULL;
+ if (!PointerIsValid(partitionOidList)) {
+ decre_partmap_refcount(relation->partMap);
+ return NIL;
+ }
- foreach (cell, partitionOidList) {
- partitionId = lfirst_oid(cell);
- Assert(OidIsValid(partitionId));
- partition = partitionOpen(relation, partitionId, lockmode);
- Relation partRel = partitionGetRelation(relation, partition);
- List* subPartListTmp = relationGetPartitionList(partRel, lockmode);
- subPartList = list_concat(subPartList, subPartListTmp);
- releaseDummyRelation(&partRel);
- partitionClose(relation, partition, lockmode);
+ foreach (cell, partitionOidList) {
+ partitionId = lfirst_oid(cell);
+ Assert(OidIsValid(partitionId));
+ partition = tryPartitionOpen(relation, partitionId, lockmode);
+ if (!PartitionIsValid(partition)) {
+ PARTITION_LOG("could not open partition with partition oid %u, partitionno will be used to search the "
+ "new partition", partitionId);
+ int partitionno = PartOidGetPartitionNo(relation->partMap, partitionId);
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ continue;
+ }
+ partition = PartitionOpenWithPartitionno(relation, partitionId, partitionno, lockmode);
}
- list_free_ext(partitionOidList);
+ Relation partRel = partitionGetRelation(relation, partition);
+ List* subPartListTmp = relationGetPartitionList(partRel, lockmode);
+ subPartList = list_concat(subPartList, subPartListTmp);
+ releaseDummyRelation(&partRel);
+ partitionClose(relation, partition, lockmode);
}
+ list_free_ext(partitionOidList);
+ decre_partmap_refcount(relation->partMap);
+
return subPartList;
}
@@ -1256,8 +1356,12 @@ Relation SubPartitionGetRelation(Relation heap, Partition subPart, LOCKMODE lock
Partition SubPartitionOidGetPartition(Relation rel, Oid subPartOid, LOCKMODE lockmode)
{
+ /* this function is used for partitionOpen(relOid, subpartOid). Do not use it as much as possible.
+ * We cannot add high lock on the partOid in case of deadlock. */
+ LOCKMODE partlock = lockmode > ShareUpdateExclusiveLock ? ShareUpdateExclusiveLock : lockmode;
+
Oid parentOid = partid_get_parentid(subPartOid);
- Partition part = partitionOpen(rel, parentOid, lockmode);
+ Partition part = partitionOpen(rel, parentOid, partlock);
Relation partRel = partitionGetRelation(rel, part);
Partition subPart = partitionOpen(partRel, subPartOid, lockmode);
releaseDummyRelation(&partRel);
@@ -1491,42 +1595,471 @@ Oid GetBaseRelOidOfParition(Relation relation)
return relation->parentId;
}
-/* NB: all operations on ADD_PARTITION_ACTION sequence lock must use TopTransactionResourceOwner. */
-void LockRelationForAddIntervalPartition(Relation rel)
+/* Set PARTITION OBJECT_LOCK */
+void LockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type)
{
ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner;
t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.TopTransactionResourceOwner;
- LockPartition(RelationGetRelid(rel), ADD_PARTITION_ACTION,
- AccessExclusiveLock, PARTITION_SEQUENCE_LOCK);
+ LockDatabaseObject(relOid, object, 0, type);
t_thrd.utils_cxt.CurrentResourceOwner = currentOwner;
+
+ const char* objectname = (object == PARTITION_OBJECT_LOCK_SDEQUENCE) ? "partition": "interval";
+ PARTITION_LOG("Successfully obtain %s object lock: relid %u locklevel %d", objectname, relOid, type);
}
-void LockRelationForAccessIntervalPartitionTab(Relation rel)
+/* Release PARTITION OBJECT_LOCK */
+void UnlockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type)
{
ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner;
t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.TopTransactionResourceOwner;
- LockPartition(RelationGetRelid(rel), ADD_PARTITION_ACTION,
- AccessShareLock, PARTITION_SEQUENCE_LOCK);
+ UnlockDatabaseObject(relOid, object, 0, type);
t_thrd.utils_cxt.CurrentResourceOwner = currentOwner;
+
+ const char* objectname = (object == PARTITION_OBJECT_LOCK_SDEQUENCE) ? "partition": "interval";
+ PARTITION_LOG("Successfully release %s object lock: relid %u locklevel %d", objectname, relOid, type);
}
-void UnlockRelationForAccessIntervalPartTabIfHeld(Relation rel)
+bool ConditionalLockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type)
{
ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner;
t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.TopTransactionResourceOwner;
-
- UnlockPartitionSeqIfHeld(RelationGetRelid(rel), ADD_PARTITION_ACTION, AccessShareLock);
+ bool flag = ConditionalLockDatabaseObject(relOid, object, 0, type);
t_thrd.utils_cxt.CurrentResourceOwner = currentOwner;
+
+ if (flag) {
+ const char* objectname = (object == PARTITION_OBJECT_LOCK_SDEQUENCE) ? "partition": "interval";
+ PARTITION_LOG("Successfully obtain %s object lock: relid %u locklevel %d", objectname, relOid, type);
+ }
+ return flag;
}
-void UnlockRelationForAddIntervalPartition(Relation rel)
+#ifndef ENABLE_MULTIPLE_NODES
+/* Set DML tag for a partitioned table */
+void AddPartitionDMLInfo(Oid relOid)
{
- ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner;
- t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.TopTransactionResourceOwner;
+ MemoryContext old_cxt = MemoryContextSwitchTo(u_sess->top_transaction_mem_cxt);
+ u_sess->storage_cxt.partition_dml_oids = lappend_oid(u_sess->storage_cxt.partition_dml_oids, relOid);
+ MemoryContextSwitchTo(old_cxt);
+}
- UnlockPartition(RelationGetRelid(rel), ADD_PARTITION_ACTION,
- AccessExclusiveLock, PARTITION_SEQUENCE_LOCK);
- t_thrd.utils_cxt.CurrentResourceOwner = currentOwner;
+/* Set DDL tag for a partitioned table */
+void AddPartitionDDLInfo(Oid relOid)
+{
+ MemoryContext old_cxt = MemoryContextSwitchTo(u_sess->top_transaction_mem_cxt);
+ u_sess->storage_cxt.partition_ddl_oids = lappend_oid(u_sess->storage_cxt.partition_ddl_oids, relOid);
+ MemoryContextSwitchTo(old_cxt);
+}
+
+/* Add AccessExclusiveLock for all partitioned tables added in AddPartitionDDLInfo */
+void LockPartitionDDLOperation()
+{
+ ListCell *cell;
+ foreach(cell, u_sess->storage_cxt.partition_ddl_oids) {
+ Oid relid = lfirst_oid(cell);
+ LockPartitionObject(relid, PARTITION_OBJECT_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+}
+#endif
+
+HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapshot)
+{
+ HeapTuple pg_partition_tuple;
+ Relation pg_partition_desc;
+ SysScanDesc pg_partition_scan;
+ ScanKeyData key[1];
+
+ /*
+ * If something goes wrong during backend startup, we might find ourselves
+ * trying to read pg_partition before we've selected a database. That ain't
+ * gonna work, so bail out with a useful error message. If this happens,
+ * it probably means a partcache entry that needs to be nailed isn't.
+ */
+ if (!OidIsValid(u_sess->proc_cxt.MyDatabaseId)) {
+ ereport(FATAL,
+ (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("cannot read pg_class without having selected a database")));
+ }
+
+ if (snapshot == NULL) {
+ snapshot = GetCatalogSnapshot();
+ }
+
+ /*
+ * form a scan key
+ */
+ ScanKeyInit(&key[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(targetPartId));
+
+ /*
+ * Open pg_partition and fetch a tuple. Force heap scan if we haven't yet
+ * built the critical partcache entries (this includes initdb and startup
+ * without a pg_internal.init file). The caller can also force a heap
+ * scan by setting indexOK == false.
+ */
+ pg_partition_desc = heap_open(PartitionRelationId, AccessShareLock);
+ pg_partition_scan = systable_beginscan(pg_partition_desc,
+ PartitionOidIndexId,
+ indexOK && LocalRelCacheCriticalRelcachesBuilt(),
+ snapshot,
+ 1,
+ key);
+
+ pg_partition_tuple = systable_getnext(pg_partition_scan);
+
+ /*
+ * Must copy tuple before releasing buffer.
+ */
+ if (HeapTupleIsValid(pg_partition_tuple)) {
+ pg_partition_tuple = heap_copytuple(pg_partition_tuple);
+ }
+
+ /* all done */
+ systable_endscan(pg_partition_scan);
+ heap_close(pg_partition_desc, AccessShareLock);
+
+ return pg_partition_tuple;
+}
+
+/* get the oid of pg_partition entry from a partitioned table's oid */
+Oid RelOidGetPartitionTupleid(Oid relOid)
+{
+ Relation pg_partition;
+ HeapTuple tup;
+ ScanKeyData key[2];
+ SysScanDesc scan;
+ Oid partOid;
+
+ pg_partition = heap_open(PartitionRelationId, AccessShareLock);
+ ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ,
+ CharGetDatum(PART_OBJ_TYPE_PARTED_TABLE));
+ ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relOid));
+
+ scan = systable_beginscan(pg_partition, PartitionParentOidIndexId, true, NULL, 2, key);
+ if (!HeapTupleIsValid((tup = systable_getnext(scan)))) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("cache lookup failed for relation %u", relOid)));
+ }
+
+ partOid = HeapTupleGetOid(tup);
+ systable_endscan(scan);
+ heap_close(pg_partition, AccessShareLock);
+
+ return partOid;
+}
+
+/* get the partitionno of a pg_partition entry */
+int GetCurrentPartitionNo(Oid partOid)
+{
+ HeapTuple tup;
+ Datum datum;
+ bool isNull;
+ int partitionno;
+
+ tup = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partOid));
+ if (!HeapTupleIsValid(tup)) {
+ return INVALID_PARTITION_NO;
+ }
+ datum = SysCacheGetAttr(PARTRELID, tup, Anum_pg_partition_partitionno, &isNull);
+ if (isNull) {
+ partitionno = INVALID_PARTITION_NO;
+ } else {
+ partitionno = DatumGetInt32(datum);
+ Assert(partitionno != INVALID_PARTITION_NO);
+ }
+ ReleaseSysCache(tup);
+
+ return partitionno;
+}
+
+/* get the subpartitionno of a pg_partition entry */
+int GetCurrentSubPartitionNo(Oid partOid)
+{
+ HeapTuple tup;
+ Datum datum;
+ bool isNull;
+ int subpartitionno;
+
+ tup = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partOid));
+ if (!HeapTupleIsValid(tup)) {
+ return INVALID_PARTITION_NO;
+ }
+ datum = SysCacheGetAttr(PARTRELID, tup, Anum_pg_partition_subpartitionno, &isNull);
+ if (isNull) {
+ subpartitionno = INVALID_PARTITION_NO;
+ } else {
+ subpartitionno = DatumGetInt32(datum);
+ Assert(subpartitionno != INVALID_PARTITION_NO);
+ }
+ ReleaseSysCache(tup);
+
+ return subpartitionno;
+}
+
+/* update the partitionno to new value */
+void UpdateCurrentPartitionNo(Oid partOid, int partitionno, bool inplace)
+{
+ Relation pg_partition;
+ HeapTuple tup;
+ HeapTuple ntup;
+ Datum values[Natts_pg_partition] = {0};
+ bool nulls[Natts_pg_partition] = {false};
+ bool replaces[Natts_pg_partition] = {false};
+
+ if (GetCurrentPartitionNo(partOid) == partitionno) {
+ return;
+ }
+
+ int absno = DatumGetInt32(DirectFunctionCall1(int4abs, Int32GetDatum(partitionno)));
+ if (absno > MAX_PARTITION_NO) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("current partitionno %d is out of range", partitionno)));
+ }
+
+ pg_partition = heap_open(PartitionRelationId, RowExclusiveLock);
+ tup = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partOid));
+ if (!HeapTupleIsValid(tup)) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("cache lookup failed for partition %u", partOid)));
+ }
+
+ replaces[Anum_pg_partition_partitionno - 1] = true;
+ if (partitionno == INVALID_PARTITION_NO) {
+ nulls[Anum_pg_partition_partitionno - 1] = true;
+ } else {
+ values[Anum_pg_partition_partitionno - 1] = Int32GetDatum(partitionno);
+ }
+ ntup = heap_modify_tuple(tup, RelationGetDescr(pg_partition), values, nulls, replaces);
+
+ /* Only the tuple of partitioned table can use inplace-update! Since wait_clean_cpi is set inplace-update, we keep
+ consistent. Because the partitionno of partitioned table is only used to mask the max value, rollback will not
+ violate it. */
+ if (inplace) {
+ heap_inplace_update(pg_partition, ntup);
+ } else {
+ simple_heap_update(pg_partition, &ntup->t_self, ntup);
+ CatalogUpdateIndexes(pg_partition, ntup);
+ }
+
+ heap_freetuple_ext(ntup);
+ ReleaseSysCache(tup);
+ heap_close(pg_partition, RowExclusiveLock);
+
+ CommandCounterIncrement();
+}
+
+/* update the subpartitionno to new value */
+void UpdateCurrentSubPartitionNo(Oid partOid, int subpartitionno)
+{
+ Relation pg_partition;
+ HeapTuple tup;
+ HeapTuple ntup;
+ Datum values[Natts_pg_partition] = {0};
+ bool nulls[Natts_pg_partition] = {false};
+ bool replaces[Natts_pg_partition] = {false};
+
+ if (GetCurrentSubPartitionNo(partOid) == subpartitionno) {
+ return;
+ }
+
+ int absno = DatumGetInt32(DirectFunctionCall1(int4abs, Int32GetDatum(subpartitionno)));
+ if (absno > MAX_PARTITION_NO) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("current subpartitionno %d is out of range", subpartitionno)));
+ }
+
+ pg_partition = heap_open(PartitionRelationId, RowExclusiveLock);
+ tup = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partOid));
+ if (!HeapTupleIsValid(tup)) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("cache lookup failed for subpartition %u", partOid)));
+ }
+
+ replaces[Anum_pg_partition_subpartitionno - 1] = true;
+ if (subpartitionno == INVALID_PARTITION_NO) {
+ nulls[Anum_pg_partition_subpartitionno - 1] = true;
+ } else {
+ values[Anum_pg_partition_subpartitionno - 1] = Int32GetDatum(subpartitionno);
+ }
+ ntup = heap_modify_tuple(tup, RelationGetDescr(pg_partition), values, nulls, replaces);
+
+ simple_heap_update(pg_partition, &ntup->t_self, ntup);
+ CatalogUpdateIndexes(pg_partition, ntup);
+
+ heap_freetuple_ext(ntup);
+ ReleaseSysCache(tup);
+ heap_close(pg_partition, RowExclusiveLock);
+
+ CommandCounterIncrement();
+ CacheInvalidatePartcacheByPartid(partOid);
+}
+
+Oid GetPartOidWithPartitionno(Oid parentid, int partitionno, char parttype)
+{
+ SysScanDesc scan;
+ ScanKeyData key[2];
+ Relation partition = NULL;
+ HeapTuple tuple = NULL;
+ Oid newpartOid = InvalidOid;
+ int newpartno;
+ Datum datum;
+ bool isNull = false;
+
+ Assert(parttype == PART_OBJ_TYPE_TABLE_PARTITION || parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION);
+ bool issubpartition = (parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION);
+
+ Assert(PARTITIONNO_IS_VALID(partitionno));
+
+ ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(parttype));
+ ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(parentid));
+ partition = heap_open(PartitionRelationId, AccessShareLock);
+ scan = systable_beginscan(partition, PartitionParentOidIndexId, true, NULL, 2, key);
+ while (HeapTupleIsValid((tuple = systable_getnext(scan)))) {
+ if (issubpartition) {
+ datum = heap_getattr(tuple, Anum_pg_partition_subpartitionno, RelationGetDescr(partition), &isNull);
+ } else {
+ datum = heap_getattr(tuple, Anum_pg_partition_partitionno, RelationGetDescr(partition), &isNull);
+ }
+ newpartno = isNull ? INVALID_PARTITION_NO : DatumGetInt32(datum);
+ if (newpartno == partitionno) {
+ newpartOid = HeapTupleGetOid(tuple);
+ break;
+ }
+ }
+ systable_endscan(scan);
+ heap_close(partition, AccessShareLock);
+
+ return newpartOid;
+}
+
+Oid InvisiblePartidGetNewPartid(Oid partOid)
+{
+ HeapTuple partTuple = NULL;
+ Form_pg_partition partForm = NULL;
+ Relation partition = NULL;
+ char parttype;
+ Oid parentid = InvalidOid;
+ Datum datum;
+ bool isNull = false;
+ int partitionno;
+
+ partTuple = ScanPgPartition(partOid, false, SnapshotAny);
+ if (!HeapTupleIsValid(partTuple)) {
+ return InvalidOid;
+ }
+
+ partForm = (Form_pg_partition)GETSTRUCT(partTuple);
+ parttype = partForm->parttype;
+ parentid = partForm->parentid;
+ partition = heap_open(PartitionRelationId, AccessShareLock);
+ if (partForm->parttype == PART_OBJ_TYPE_TABLE_PARTITION) {
+ datum = heap_getattr(partTuple, Anum_pg_partition_partitionno, RelationGetDescr(partition), &isNull);
+ } else if (partForm->parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION) {
+ datum = heap_getattr(partTuple, Anum_pg_partition_subpartitionno, RelationGetDescr(partition), &isNull);
+ } else {
+ pfree_ext(partTuple);
+ heap_close(partition, AccessShareLock);
+ return InvalidOid;
+ }
+ partitionno = isNull ? INVALID_PARTITION_NO : DatumGetInt32(datum);
+ pfree_ext(partTuple);
+ heap_close(partition, AccessShareLock);
+
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ return InvalidOid;
+ }
+ return GetPartOidWithPartitionno(parentid, partitionno, parttype);
+}
+
+/* init all partitionno/subpartitionno for a partition definition */
+void SetPartitionnoForPartitionState(PartitionState *partTableState)
+{
+ ListCell* cell = NULL;
+ ListCell* subcell = NULL;
+ int partitionno = 0;
+ foreach (cell, partTableState->partitionList) {
+ partitionno++;
+ PartitionDefState* partitionDefState = (PartitionDefState*)lfirst(cell);
+ partitionDefState->partitionno = partitionno;
+ int subpartitionno = 0;
+ foreach(subcell, partitionDefState->subPartitionDefState) {
+ subpartitionno++;
+ PartitionDefState* subpartitionDefState = (PartitionDefState*)lfirst(subcell);
+ subpartitionDefState->partitionno = subpartitionno;
+ }
+ }
+}
+
+/* reset all partitionno/subpartitionno to an ordered format */
+void RelationResetPartitionno(Oid relOid, LOCKMODE relationlock)
+{
+ Relation rel = heap_open(relOid, relationlock);
+ if (!RELATION_IS_PARTITIONED(rel)) {
+ heap_close(rel, relationlock);
+ return;
+ }
+
+ bool issubpartition = RelationIsSubPartitioned(rel);
+ List *partidList = relationGetPartitionOidList(rel);
+ ListCell *cell = NULL;
+ int partitionno = 0;
+ foreach (cell, partidList) {
+ Oid partid = lfirst_oid(cell);
+ partitionno++;
+ UpdateCurrentPartitionNo(partid, partitionno, false);
+
+ if (issubpartition) {
+ Partition part = partitionOpen(rel, partid, relationlock);
+ Relation partrel = partitionGetRelation(rel, part);
+ List *subpartidList = relationGetPartitionOidList(partrel);
+ ListCell *subcell = NULL;
+ int subpartitionno = 0;
+ foreach (subcell, subpartidList) {
+ Oid subpartid = lfirst_oid(subcell);
+ subpartitionno++;
+ UpdateCurrentSubPartitionNo(subpartid, subpartitionno);
+ }
+ releasePartitionOidList(&subpartidList);
+ releaseDummyRelation(&partrel);
+ partitionClose(rel, part, NoLock);
+
+ UpdateCurrentSubPartitionNo(partid, -subpartitionno);
+ }
+ }
+ releasePartitionOidList(&partidList);
+
+ UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(relOid), -partitionno, false);
+
+ CacheInvalidateRelcache(rel);
+ CommandCounterIncrement();
+ heap_close(rel, NoLock);
+}
+
+/* get partitionno from partseq */
+int GetPartitionnoFromSequence(PartitionMap *partmap, int partseq)
+{
+ if (partseq < 0) {
+ return INVALID_PARTITION_NO;
+ }
+
+ incre_partmap_refcount(partmap);
+ int partitionno = INVALID_PARTITION_NO;
+ if (partmap->type == PART_TYPE_RANGE || partmap->type == PART_TYPE_INTERVAL) {
+ RangePartitionMap *partitionmap = (RangePartitionMap *)partmap;
+ if (partseq < partitionmap->rangeElementsNum) {
+ partitionno = partitionmap->rangeElements[partseq].partitionno;
+ }
+ } else if (partmap->type == PART_TYPE_LIST) {
+ ListPartitionMap *partitionmap = (ListPartitionMap *)partmap;
+ if (partseq < partitionmap->listElementsNum) {
+ partitionno = partitionmap->listElements[partseq].partitionno;
+ }
+ } else if (partmap->type == PART_TYPE_HASH) {
+ HashPartitionMap *partitionmap = (HashPartitionMap *)partmap;
+ if (partseq < partitionmap->hashElementsNum) {
+ partitionno = partitionmap->hashElements[partseq].partitionno;
+ }
+ }
+ decre_partmap_refcount(partmap);
+
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
+ return partitionno;
}
bool PartExprKeyIsNull(Relation rel, Relation partitionRel)
diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp
index 5a2a325db..c0db94009 100644
--- a/src/common/backend/catalog/pg_proc.cpp
+++ b/src/common/backend/catalog/pg_proc.cpp
@@ -30,9 +30,10 @@
#include "catalog/pg_proc.h"
#include "catalog/gs_encrypted_proc.h"
#include "catalog/pg_proc_fn.h"
-#include "catalog/pg_synonym.h"
+#include "catalog/pg_synonym.h"
#include "catalog/pg_type.h"
#include "client_logic/client_logic_proc.h"
+#include "client_logic/client_logic.h"
#include "commands/defrem.h"
#include "commands/user.h"
#include "commands/trigger.h"
@@ -1054,6 +1055,8 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage
bool anyrangeOutParam = false;
bool internalInParam = false;
bool internalOutParam = false;
+ bool fullEncryptedInParam = false;
+ bool fullEncryptedOutParam = false;
Oid variadicType = InvalidOid;
Acl* proacl = NULL;
Relation rel;
@@ -1079,15 +1082,15 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage
/* sanity checks */
Assert(PointerIsValid(prosrc));
- /*
- * Check function name to ensure that it doesn't conflict with existing synonym.
- */
- if (!IsInitdb && GetSynonymOid(procedureName, procNamespace, true) != InvalidOid) {
- ereport(ERROR,
- (errmsg("function name is already used by an existing synonym in schema \"%s\"",
- get_namespace_name(procNamespace))));
- }
-
+ /*
+ * Check function name to ensure that it doesn't conflict with existing synonym.
+ */
+ if (!IsInitdb && GetSynonymOid(procedureName, procNamespace, true) != InvalidOid) {
+ ereport(ERROR,
+ (errmsg("function name is already used by an existing synonym in schema \"%s\"",
+ get_namespace_name(procNamespace))));
+ }
+
parameterCount = parameterTypes->dim1;
if (parameterCount < 0 || parameterCount > FUNC_MAX_ARGS)
ereport(ERROR,
@@ -1151,6 +1154,12 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage
case INTERNALOID:
internalInParam = true;
break;
+ case BYTEAWITHOUTORDERWITHEQUALCOLOID:
+ case BYTEAWITHOUTORDERCOLOID:
+ case BYTEAWITHOUTORDERWITHEQUALCOLARRAYOID:
+ case BYTEAWITHOUTORDERCOLARRAYOID:
+ fullEncryptedInParam = true;
+ break;
default:
break;
}
@@ -1179,6 +1188,12 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage
case INTERNALOID:
internalOutParam = true;
break;
+ case BYTEAWITHOUTORDERWITHEQUALCOLOID:
+ case BYTEAWITHOUTORDERCOLOID:
+ case BYTEAWITHOUTORDERWITHEQUALCOLARRAYOID:
+ case BYTEAWITHOUTORDERCOLARRAYOID:
+ fullEncryptedOutParam = true;
+ break;
default:
break;
}
@@ -1194,6 +1209,11 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage
*
* But when we are in inplace-upgrade, we can create function with polymorphic return type
*/
+ if (!u_sess->attr.attr_common.enable_full_encryption && !u_sess->attr.attr_common.IsInplaceUpgrade &&
+ (fullEncryptedInParam || fullEncryptedOutParam || is_enc_type(returnType))) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot create function"),
+ errdetail("function does not support full encrypted type parameter when client encryption is disabled.")));
+ }
if ((IsPolymorphicType(returnType) || genericOutParam) && !u_sess->attr.attr_common.IsInplaceUpgrade &&
!genericInParam)
ereport(ERROR,
diff --git a/src/common/backend/catalog/pg_synonym.cpp b/src/common/backend/catalog/pg_synonym.cpp
index 551a76a20..acfdf0bbf 100644
--- a/src/common/backend/catalog/pg_synonym.cpp
+++ b/src/common/backend/catalog/pg_synonym.cpp
@@ -46,6 +46,7 @@
#include "utils/rel.h"
#include "access/heapam.h"
#include "miscadmin.h"
+#include "client_logic/client_logic.h"
static Oid SynonymCreate(
Oid synNamespace, const char* synName, Oid synOwner, const char* objSchema, const char* objName, bool replace);
@@ -116,15 +117,21 @@ void CreateSynonym(CreateSynonymStmt* stmt)
objSchema = get_namespace_name(GetOidBySchemaName());
}
- /*
- * Check synonym name to ensure that it doesn't conflict with existing view, table, function, and procedure.
- */
- if (get_relname_relid(synName, synNamespace) != InvalidOid || get_func_oid(synName, synNamespace, NULL) != InvalidOid) {
- ereport(ERROR, (errmsg("synonym name is already used by an existing object")));
- }
-
- /* Main entry to create a synonym */
- SynonymCreate(synNamespace, synName, GetUserId(), objSchema, objName, stmt->replace);
+ /*
+ * Check synonym name to ensure that it doesn't conflict with existing view, table, function, and procedure.
+ */
+ if (get_relname_relid(synName, synNamespace) != InvalidOid || get_func_oid(synName, synNamespace, NULL) != InvalidOid) {
+ ereport(ERROR, (errmsg("synonym name is already used by an existing object")));
+ }
+
+ if (IsFullEncryptedRel(objSchema, objName)) {
+ ereport(ERROR, (errmsg("Unsupport to CREATE SYNONYM for encryption table.")));
+ } else if (IsFuncProcOnEncryptedRel(objSchema, objName)) {
+ ereport(ERROR, (errmsg("Unsupport to CREATE SYNONYM for encryption procedure or function.")));
+ } else {
+ /* Main entry to create a synonym */
+ SynonymCreate(synNamespace, synName, GetUserId(), objSchema, objName, stmt->replace);
+ }
}
/*
diff --git a/src/common/backend/catalog/storage.cpp b/src/common/backend/catalog/storage.cpp
index 8b02109df..67e231c5f 100644
--- a/src/common/backend/catalog/storage.cpp
+++ b/src/common/backend/catalog/storage.cpp
@@ -965,6 +965,7 @@ void push_del_rel_to_hashtbl(bool isCommit)
entry->rnode.bucketNode = pending->relnode.bucketNode;
entry->rnode.opt = pending->relnode.opt;
entry->maxSegNo = -1;
+ entry->fileUnlink = false;
}
BatchClearBadBlock(pending->relnode, pending->forknum, 0);
}
diff --git a/src/common/backend/catalog/system_views.sql b/src/common/backend/catalog/system_views.sql
index a09aa266f..c397c07d7 100644
--- a/src/common/backend/catalog/system_views.sql
+++ b/src/common/backend/catalog/system_views.sql
@@ -3473,7 +3473,8 @@ CREATE unlogged table statement_history(
lwlock_wait_time bigint,
details bytea,
is_slow_sql bool,
- trace_id text
+ trace_id text,
+ advise text
);
REVOKE ALL on table pg_catalog.statement_history FROM public;
create index statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql);
diff --git a/src/common/backend/catalog/toasting.cpp b/src/common/backend/catalog/toasting.cpp
index aeda86583..04d5c6c93 100644
--- a/src/common/backend/catalog/toasting.cpp
+++ b/src/common/backend/catalog/toasting.cpp
@@ -441,7 +441,7 @@ bool CreateToastTableForSubPartition(Relation partRel, Oid subPartOid, Datum rel
create_toast_table(subPartRel, InvalidOid, InvalidOid, reloptions, true, (partLockMode == AccessShareLock));
releaseDummyRelation(&subPartRel);
- partitionClose(partRel, partition, partLockMode);
+ partitionClose(partRel, partition, NoLock);
return result;
}
@@ -450,7 +450,8 @@ bool CreateToastTableForPartitioneOfSubpartTable(Relation rel, Oid partOid, Datu
{
bool result = false;
ListCell *cell = NULL;
- Partition part = partitionOpen(rel, partOid, partLockMode);
+ LOCKMODE partlock = partLockMode > ShareUpdateExclusiveLock ? ShareUpdateExclusiveLock : partLockMode;
+ Partition part = partitionOpen(rel, partOid, partlock);
Relation partRel = partitionGetRelation(rel, part);
List *partitionList = relationGetPartitionOidList(partRel);
@@ -460,7 +461,7 @@ bool CreateToastTableForPartitioneOfSubpartTable(Relation rel, Oid partOid, Datu
}
releaseDummyRelation(&partRel);
- partitionClose(rel, part, partLockMode);
+ partitionClose(rel, part, NoLock);
return result;
}
@@ -736,6 +737,7 @@ static void InitLobTempToastNamespace(void)
create_stmt->schemaElts = NULL;
create_stmt->schemaname = toastNamespaceName;
create_stmt->temptype = Temp_Lob_Toast;
+ create_stmt->charset = PG_INVALID_ENCODING;
rc = memset_s(str, sizeof(str), 0, sizeof(str));
securec_check(rc, "", "");
ret = snprintf_s(str,
diff --git a/src/common/backend/client_logic/client_logic.cpp b/src/common/backend/client_logic/client_logic.cpp
old mode 100644
new mode 100755
index 5d04034c8..db6ee6d55
--- a/src/common/backend/client_logic/client_logic.cpp
+++ b/src/common/backend/client_logic/client_logic.cpp
@@ -51,6 +51,8 @@
#include "pgxc/pgxc.h"
#include "utils/fmgroids.h"
#include "funcapi.h"
+#include "postgres_ext.h"
+#include "catalog/pg_proc.h"
const size_t ENCRYPTED_VALUE_MIN_LENGTH = 170;
const size_t ENCRYPTED_VALUE_MAX_LENGTH = 1024;
@@ -1283,6 +1285,65 @@ bool is_enc_type(const char *type_name)
return false;
}
+bool IsFullEncryptedRel(char* objSchema, char* objName)
+{
+ bool is_encrypted = false;
+ Oid namespaceId = get_namespace_oid((const char*)objSchema, false);
+ Oid relnameId = get_relname_relid((const char*)objName, namespaceId);
+ CatCList *catlist = SearchSysCacheList1(CERELIDCOUMNNAME, ObjectIdGetDatum(relnameId));
+ if (catlist != NULL && catlist->n_members > 0) {
+ is_encrypted = true;
+ }
+ ReleaseSysCacheList(catlist);
+ return is_encrypted;
+}
+
+bool IsFuncProcOnEncryptedRel(char* objSchema, char* objName)
+{
+ bool is_encrypted = false;
+ CatCList* catlist_funcs = NULL;
+ catlist_funcs = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(objName));
+ Oid namespaceId = get_namespace_oid((const char*)objSchema, false);
+ for (int i = 0; i < catlist_funcs->n_members; i++) {
+ if (is_encrypted) {
+ break;
+ }
+ HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist_funcs, i);
+ if (HeapTupleIsValid(proctup)) {
+ Form_pg_proc pform = (Form_pg_proc)GETSTRUCT(proctup);
+ Oid oldTupleOid = HeapTupleGetOid(proctup);
+ /* compare function's namespace */
+ if (pform->pronamespace != namespaceId) {
+ continue;
+ }
+ HeapTuple gs_oldtup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(oldTupleOid));
+ if (!HeapTupleIsValid(gs_oldtup)) {
+ continue;
+ }
+ if (gs_oldtup->t_len > 0) {
+ is_encrypted = true;
+ }
+ ReleaseSysCache(gs_oldtup);
+ }
+ }
+ ReleaseSysCacheList(catlist_funcs);
+ return is_encrypted;
+}
+
+bool is_full_encrypted_rel(Relation rel)
+{
+ if (rel == NULL || rel->rd_att == NULL || rel->rd_rel->relkind != RELKIND_RELATION) {
+ return false;
+ }
+ TupleDesc tup_desc = rel->rd_att;
+ for (int i = 0; i < tup_desc->natts; i++) {
+ if (is_enc_type(tup_desc->attrs[i].atttypid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* if a column is encrypted, we will rewrite its type and
* (1) store its source col_type in catalog 'gs_encrypted_columns'
@@ -1349,4 +1410,24 @@ Datum get_client_info(PG_FUNCTION_ARGS)
tuplestore_donestoring(rsinfo->setResult);
return (Datum)0;
+}
+
+const char *get_typename_by_id(Oid typeOid)
+{
+ if (typeOid == BYTEAWITHOUTORDERWITHEQUALCOLOID) {
+ return "byteawithoutorderwithequal";
+ } else if (typeOid == BYTEAWITHOUTORDERCOLOID) {
+ return "byteawithoutorder";
+ }
+ return NULL;
+}
+
+const char *get_encryption_type_name(EncryptionType algorithm_type)
+{
+ if (algorithm_type == EncryptionType::DETERMINISTIC_TYPE) {
+ return "DETERMINISTIC";
+ } else if (algorithm_type == EncryptionType::RANDOMIZED_TYPE) {
+ return "RANDOMIZED";
+ }
+ return NULL;
}
\ No newline at end of file
diff --git a/src/common/backend/libpq/pqcomm.cpp b/src/common/backend/libpq/pqcomm.cpp
index 38af34e27..eb2a9f3ce 100644
--- a/src/common/backend/libpq/pqcomm.cpp
+++ b/src/common/backend/libpq/pqcomm.cpp
@@ -564,7 +564,7 @@ static void StreamDoUnlink(int code, Datum arg)
*/
int StreamServerPort(int family, char* hostName, unsigned short portNumber, const char* unixSocketName,
pgsocket ListenSocket[], int MaxListen, bool add_localaddr_flag,
- bool is_create_psql_sock, bool is_create_libcomm_sock,
+ bool is_create_psql_sock, bool is_create_libcomm_sock, ListenChanelType listen_channel,
ProtocolExtensionConfig* protocol_config) {
#define RETRY_SLEEP_TIME 1000000L
pgsocket fd = PGINVALID_SOCKET;
@@ -816,19 +816,23 @@ int StreamServerPort(int family, char* hostName, unsigned short portNumber, cons
result = inet_net_ntop(AF_INET6,
&((struct sockaddr_in6*)sinp)->sin6_addr,
128,
- g_instance.listen_cxt.LocalAddrList[g_instance.listen_cxt.LocalIpNum],
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum],
IP_LEN);
} else if (addr->ai_family == AF_INET) {
result = inet_net_ntop(AF_INET,
&((struct sockaddr_in*)sinp)->sin_addr,
32,
- g_instance.listen_cxt.LocalAddrList[g_instance.listen_cxt.LocalIpNum],
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum],
IP_LEN);
}
if (result == NULL) {
ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT)));
} else {
- g_instance.listen_cxt.LocalIpNum++;
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]set LocalIpNum[%d] %s",
+ t_thrd.postmaster_cxt.LocalIpNum,
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum])));
+ t_thrd.postmaster_cxt.LocalIpNum++;
}
}
if (is_create_psql_sock) {
@@ -837,6 +841,20 @@ int StreamServerPort(int family, char* hostName, unsigned short portNumber, cons
g_instance.listen_cxt.listen_sock_type[listen_index] = HA_LISTEN_SOCKET;
}
+ /*
+ * note:
+ * NORMAL_LISTEN_CHANEL include : listen_address or libcomm_bind_addr.
+ * REPL_LISTEN_CHANEL include : replication_info
+ * EXT_LISTEN_CHANEL include : listen_address_ext
+ */
+ g_instance.listen_cxt.listen_chanel_type[listen_index] = listen_channel;
+
+ /* for debug info */
+ rc = strcpy_s(g_instance.listen_cxt.all_listen_addr_list[listen_index], IP_LEN,
+ (hostName == NULL) ? ((addr->ai_family == AF_UNIX) ? "unix domain" : "*") : hostName);
+ securec_check(rc, "", "");
+ g_instance.listen_cxt.all_listen_port_list[listen_index] = portNumber;
+
continue;
errhandle:
diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp
index 6c7c375e7..82012ad8e 100644
--- a/src/common/backend/nodes/copyfuncs.cpp
+++ b/src/common/backend/nodes/copyfuncs.cpp
@@ -181,6 +181,7 @@ static PlannedStmt* _copyPlannedStmt(const PlannedStmt* from)
COPY_SCALAR_FIELD(is_stream_plan);
COPY_SCALAR_FIELD(multi_node_hint);
COPY_SCALAR_FIELD(uniqueSQLId);
+ COPY_SCALAR_FIELD(cause_type);
/*
* Not copy ng_queryMem to avoid memory leak in CachedPlan context,
@@ -1274,6 +1275,9 @@ static HashJoin* _copyHashJoin(const HashJoin* from)
COPY_SCALAR_FIELD(isSonicHash);
CopyMemInfoFields(&from->mem_info, &newnode->mem_info);
COPY_SCALAR_FIELD(joinRows);
+#ifndef ENABLE_MULTIPLE_NODES
+ COPY_NODE_FIELD(hash_collations);
+#endif
return newnode;
}
@@ -1336,6 +1340,11 @@ static Group* _copyGroup(const Group* from)
if (from->numCols > 0) {
COPY_POINTER_FIELD(grpColIdx, from->numCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(grpOperators, from->numCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ if (from->grp_collations) {
+ COPY_POINTER_FIELD(grp_collations, from->numCols * sizeof(Oid));
+ }
+#endif
}
return newnode;
@@ -1355,6 +1364,11 @@ static Agg* _copyAgg(const Agg* from)
if (from->numCols > 0) {
COPY_POINTER_FIELD(grpColIdx, from->numCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(grpOperators, from->numCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ if (from->grp_collations) {
+ COPY_POINTER_FIELD(grp_collations, from->numCols * sizeof(Oid));
+ }
+#endif
}
COPY_SCALAR_FIELD(numGroups);
COPY_NODE_FIELD(groupingSets);
@@ -1389,11 +1403,17 @@ static WindowAgg* _copyWindowAgg(const WindowAgg* from)
if (from->partNumCols > 0) {
COPY_POINTER_FIELD(partColIdx, from->partNumCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(partOperators, from->partNumCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ COPY_POINTER_FIELD(part_collations, from->partNumCols * sizeof(Oid));
+#endif
}
COPY_SCALAR_FIELD(ordNumCols);
if (from->ordNumCols > 0) {
COPY_POINTER_FIELD(ordColIdx, from->ordNumCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(ordOperators, from->ordNumCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ COPY_POINTER_FIELD(ord_collations, from->ordNumCols * sizeof(Oid));
+#endif
}
COPY_SCALAR_FIELD(frameOptions);
COPY_NODE_FIELD(startOffset);
@@ -1422,6 +1442,9 @@ static Unique* _copyUnique(const Unique* from)
if (from->numCols > 0) {
COPY_POINTER_FIELD(uniqColIdx, from->numCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(uniqOperators, from->numCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ COPY_POINTER_FIELD(uniq_collations, from->numCols * sizeof(Oid));
+#endif
}
return newnode;
@@ -1472,6 +1495,9 @@ static SetOp* _copySetOp(const SetOp* from)
if (from->numCols > 0) {
COPY_POINTER_FIELD(dupColIdx, from->numCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(dupOperators, from->numCols * sizeof(Oid));
+#ifndef ENABLE_MULTIPLE_NODES
+ COPY_POINTER_FIELD(dup_collations, from->numCols * sizeof(Oid));
+#endif
}
COPY_SCALAR_FIELD(flagColIdx);
COPY_SCALAR_FIELD(firstFlag);
@@ -2329,6 +2355,9 @@ static IntoClause* _copyIntoClause(const IntoClause* from)
COPY_NODE_FIELD(distributeby);
COPY_NODE_FIELD(subcluster);
#endif
+ COPY_NODE_FIELD(copyOption);
+ COPY_STRING_FIELD(filename);
+ COPY_SCALAR_FIELD(is_outfile);
return newnode;
}
@@ -2853,6 +2882,7 @@ static CaseExpr* _copyCaseExpr(const CaseExpr* from)
COPY_NODE_FIELD(args);
COPY_NODE_FIELD(defresult);
COPY_LOCATION_FIELD(location);
+ COPY_SCALAR_FIELD(fromDecode);
return newnode;
}
@@ -3214,6 +3244,7 @@ static PartitionState* _copyPartitionState(const PartitionState* from)
COPY_SCALAR_FIELD(rowMovement);
COPY_NODE_FIELD(subPartitionState);
COPY_NODE_FIELD(partitionNameList);
+ COPY_SCALAR_FIELD(partitionsNum);
return newnode;
}
@@ -3228,6 +3259,7 @@ static RangePartitionDefState* _copyRangePartitionDefState(const RangePartitionD
COPY_SCALAR_FIELD(curStartVal);
COPY_STRING_FIELD(partitionInitName);
COPY_NODE_FIELD(subPartitionDefState);
+ COPY_SCALAR_FIELD(partitionno);
return newnode;
}
@@ -3240,6 +3272,7 @@ static HashPartitionDefState* _copyHashPartitionDefState(const HashPartitionDefS
COPY_NODE_FIELD(boundary);
COPY_STRING_FIELD(tablespacename);
COPY_NODE_FIELD(subPartitionDefState);
+ COPY_SCALAR_FIELD(partitionno);
return newnode;
}
@@ -3252,6 +3285,7 @@ static ListPartitionDefState* _copyListPartitionDefState(const ListPartitionDefS
COPY_NODE_FIELD(boundary);
COPY_STRING_FIELD(tablespacename);
COPY_NODE_FIELD(subPartitionDefState);
+ COPY_SCALAR_FIELD(partitionno);
return newnode;
}
@@ -3923,6 +3957,7 @@ static TypeName* _copyTypeName(const TypeName* from)
COPY_LOCATION_FIELD(location);
COPY_LOCATION_FIELD(end_location);
COPY_SCALAR_FIELD(pct_rowtype);
+ COPY_SCALAR_FIELD(charset);
return newnode;
}
@@ -4858,6 +4893,8 @@ static AlterTableCmd* _copyAlterTableCmd(const AlterTableCmd* from)
COPY_SCALAR_FIELD(additional_property);
COPY_NODE_FIELD(bucket_list);
COPY_SCALAR_FIELD(alterGPI);
+ COPY_SCALAR_FIELD(is_first);
+ COPY_STRING_FIELD(after_name);
return newnode;
}
@@ -5087,6 +5124,8 @@ static void CopyCreateStmtFields(const CreateStmt* from, CreateStmt* newnode)
COPY_NODE_FIELD(uuids);
COPY_SCALAR_FIELD(relkind);
COPY_NODE_FIELD(autoIncStart);
+ COPY_SCALAR_FIELD(charset);
+ COPY_STRING_FIELD(collate);
}
static CreateStmt* _copyCreateStmt(const CreateStmt* from)
@@ -5959,6 +5998,8 @@ static AlterSchemaStmt* _copyAlterSchemaStmt(const AlterSchemaStmt* from)
COPY_STRING_FIELD(schemaname);
COPY_STRING_FIELD(authid);
COPY_SCALAR_FIELD(hasBlockChain);
+ COPY_SCALAR_FIELD(charset);
+ COPY_STRING_FIELD(collate);
return newnode;
}
@@ -6273,6 +6314,8 @@ static CreateSchemaStmt* _copyCreateSchemaStmt(const CreateSchemaStmt* from)
COPY_SCALAR_FIELD(hasBlockChain);
COPY_NODE_FIELD(schemaElts);
COPY_NODE_FIELD(uuids);
+ COPY_SCALAR_FIELD(charset);
+ COPY_STRING_FIELD(collate);
return newnode;
}
@@ -6910,6 +6953,8 @@ static SubPartitionPruningResult *_copySubPartitionPruningResult(const SubPartit
COPY_SCALAR_FIELD(partSeq);
COPY_BITMAPSET_FIELD(bm_selectedSubPartitions);
COPY_NODE_FIELD(ls_selectedSubPartitions);
+ COPY_SCALAR_FIELD(partitionno);
+ COPY_NODE_FIELD(ls_selectedSubPartitionnos);
return newnode;
}
@@ -6938,15 +6983,15 @@ static IndexOptInfo *_copyPartialIndexOptInfo(const IndexOptInfo *from)
COPY_SCALAR_FIELD(pages);
COPY_SCALAR_FIELD(tuples);
+
COPY_SCALAR_FIELD(ncolumns);
COPY_SCALAR_FIELD(nkeycolumns);
- COPY_SCALAR_FIELD(relam);
- newnode->opfamily = (Oid *)palloc0(sizeof(Oid) * from->nkeycolumns);
- rc = memcpy_s(newnode->opfamily,
- sizeof(Oid) * from->nkeycolumns,
- from->opfamily,
- sizeof(int) * from->nkeycolumns);
+ newnode->indexkeys = (int *)palloc0(sizeof(int) * from->ncolumns);
+ rc = memcpy_s(newnode->indexkeys,
+ sizeof(int) * from->ncolumns,
+ from->indexkeys,
+ sizeof(int) * from->ncolumns);
securec_check(rc, "", "");
newnode->indexcollations = (Oid *)palloc0(sizeof(Oid) * from->nkeycolumns);
@@ -6956,16 +7001,63 @@ static IndexOptInfo *_copyPartialIndexOptInfo(const IndexOptInfo *from)
sizeof(int) * from->nkeycolumns);
securec_check(rc, "", "");
- newnode->indexkeys = (int *)palloc0(sizeof(int) * from->ncolumns);
- rc = memcpy_s(newnode->indexkeys,
- sizeof(int) * from->ncolumns,
- from->indexkeys,
- sizeof(int) * from->ncolumns);
+ newnode->opfamily = (Oid *)palloc0(sizeof(Oid) * from->nkeycolumns);
+ rc = memcpy_s(newnode->opfamily,
+ sizeof(Oid) * from->nkeycolumns,
+ from->opfamily,
+ sizeof(int) * from->nkeycolumns);
securec_check(rc, "", "");
+ newnode->opcintype = (Oid *)palloc0(sizeof(Oid) * from->nkeycolumns);
+ rc = memcpy_s(newnode->opcintype,
+ sizeof(Oid) * from->nkeycolumns,
+ from->opcintype,
+ sizeof(Oid) * from->nkeycolumns);
+ securec_check(rc, "", "");
+
+ newnode->sortopfamily = (Oid *)palloc0(sizeof(Oid) * from->nkeycolumns);
+ rc = memcpy_s(newnode->sortopfamily,
+ sizeof(Oid) * from->nkeycolumns,
+ from->sortopfamily,
+ sizeof(Oid) * from->nkeycolumns);
+ securec_check(rc, "", "");
+
+ newnode->reverse_sort = (bool *)palloc0(sizeof(bool) * from->nkeycolumns);
+ rc = memcpy_s(newnode->reverse_sort,
+ sizeof(bool) * from->nkeycolumns,
+ from->reverse_sort,
+ sizeof(bool) * from->nkeycolumns);
+ securec_check(rc, "", "");
+
+ newnode->nulls_first = (bool *)palloc0(sizeof(bool) * from->nkeycolumns);
+ rc = memcpy_s(newnode->nulls_first,
+ sizeof(bool) * from->nkeycolumns,
+ from->nulls_first,
+ sizeof(bool) * from->nkeycolumns);
+ securec_check(rc, "", "");
+
+ COPY_SCALAR_FIELD(relam);
+ COPY_SCALAR_FIELD(amcostestimate);
+
+ COPY_NODE_FIELD(indexprs);
COPY_NODE_FIELD(indpred);
+
+ COPY_NODE_FIELD(indextlist);
+
+ COPY_SCALAR_FIELD(isGlobal);
+ COPY_SCALAR_FIELD(crossbucket);
COPY_SCALAR_FIELD(predOK);
COPY_SCALAR_FIELD(unique);
+ COPY_SCALAR_FIELD(immediate);
+ COPY_SCALAR_FIELD(hypothetical);
+ COPY_SCALAR_FIELD(canreturn);
+ COPY_SCALAR_FIELD(amcanorderbyop);
+ COPY_SCALAR_FIELD(amoptionalkey);
+ COPY_SCALAR_FIELD(amsearcharray);
+ COPY_SCALAR_FIELD(amsearchnulls);
+ COPY_SCALAR_FIELD(amhasgettuple);
+ COPY_SCALAR_FIELD(amhasgetbitmap);
+
return newnode;
}
@@ -7115,6 +7207,15 @@ static AutoIncrement *_copyAutoIncrement(const AutoIncrement *from)
return newnode;
}
+static CharsetCollateOptions *_copyCharsetcollateOptions(const CharsetCollateOptions* from)
+{
+ CharsetCollateOptions* newnode = makeNode(CharsetCollateOptions);
+ COPY_SCALAR_FIELD(cctype);
+ COPY_SCALAR_FIELD(charset);
+ COPY_STRING_FIELD(collate);
+ return newnode;
+}
+
static PrefixKey* _copyPrefixKey(const PrefixKey* from)
{
PrefixKey* newnode = makeNode(PrefixKey);
@@ -7125,6 +7226,54 @@ static PrefixKey* _copyPrefixKey(const PrefixKey* from)
return newnode;
}
+static CreateEventStmt *node_copy_create_event_info(const CreateEventStmt *from)
+{
+ CreateEventStmt* newnode = makeNode(CreateEventStmt);
+ COPY_NODE_FIELD(event_name);
+ COPY_NODE_FIELD(start_time_expr);
+ COPY_NODE_FIELD(end_time_expr);
+ COPY_NODE_FIELD(interval_time);
+ COPY_STRING_FIELD(def_name);
+ COPY_STRING_FIELD(event_comment_str);
+ COPY_STRING_FIELD(event_query_str);
+ COPY_SCALAR_FIELD(complete_preserve);
+ COPY_SCALAR_FIELD(if_not_exists);
+ COPY_SCALAR_FIELD(event_status);
+ return newnode;
+}
+
+static AlterEventStmt *node_copy_alter_event_info(const AlterEventStmt *from)
+{
+ AlterEventStmt* newnode = makeNode(AlterEventStmt);
+ COPY_NODE_FIELD(def_name);
+ COPY_NODE_FIELD(event_name);
+ COPY_NODE_FIELD(start_time_expr);
+ COPY_NODE_FIELD(end_time_expr);
+ COPY_NODE_FIELD(interval_time);
+ COPY_NODE_FIELD(complete_preserve);
+ COPY_NODE_FIELD(event_status);
+ COPY_NODE_FIELD(event_comment_str);
+ COPY_NODE_FIELD(event_query_str);
+ COPY_NODE_FIELD(new_name);
+ return newnode;
+}
+
+static DropEventStmt *node_copy_drop_event_info(const DropEventStmt *from)
+{
+ DropEventStmt* newnode = makeNode(DropEventStmt);
+ COPY_NODE_FIELD(event_name);
+ COPY_SCALAR_FIELD(missing_ok);
+ return newnode;
+}
+
+static ShowEventStmt *node_copy_show_event_info(const ShowEventStmt *from)
+{
+ ShowEventStmt* newnode = makeNode(ShowEventStmt);
+ COPY_NODE_FIELD(from_clause);
+ COPY_STRING_FIELD(where_clause);
+ return newnode;
+}
+
/*
* copyObject
*
@@ -8415,9 +8564,24 @@ void* copyObject(const void* from)
case T_PlannerGlobal:
retval = _copyPlannerGlobal((PlannerGlobal*)from);
break;
+ case T_CharsetCollateOptions:
+ retval = _copyCharsetcollateOptions((CharsetCollateOptions*)from);
+ break;
case T_IndexOptInfo:
retval = _copyPartialIndexOptInfo((IndexOptInfo *)from);
break;
+ case T_CreateEventStmt:
+ retval =node_copy_create_event_info((CreateEventStmt *)from);
+ break;
+ case T_AlterEventStmt:
+ retval =node_copy_alter_event_info((AlterEventStmt *)from);
+ break;
+ case T_DropEventStmt:
+ retval =node_copy_drop_event_info((DropEventStmt *)from);
+ break;
+ case T_ShowEventStmt:
+ retval =node_copy_show_event_info((ShowEventStmt *)from);
+ break;
default:
ereport(ERROR,
(errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("copyObject: unrecognized node type: %d", (int)nodeTag(from))));
diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp
index e33289161..59f127436 100644
--- a/src/common/backend/nodes/equalfuncs.cpp
+++ b/src/common/backend/nodes/equalfuncs.cpp
@@ -145,6 +145,9 @@ static bool _equalIntoClause(const IntoClause* a, const IntoClause* b)
COMPARE_SCALAR_FIELD(ivm);
COMPARE_SCALAR_FIELD(relkind);
COMPARE_NODE_FIELD(userVarList);
+ COMPARE_NODE_FIELD(copyOption);
+ COMPARE_STRING_FIELD(filename);
+ COMPARE_SCALAR_FIELD(is_outfile);
return true;
}
@@ -490,6 +493,7 @@ static bool _equalCaseExpr(const CaseExpr* a, const CaseExpr* b)
COMPARE_NODE_FIELD(args);
COMPARE_NODE_FIELD(defresult);
COMPARE_LOCATION_FIELD(location);
+ COMPARE_SCALAR_FIELD(fromDecode);
return true;
}
@@ -1100,6 +1104,8 @@ static bool _equalAlterTableCmd(const AlterTableCmd* a, const AlterTableCmd* b)
COMPARE_STRING_FIELD(target_partition_tablespace);
COMPARE_NODE_FIELD(bucket_list);
COMPARE_SCALAR_FIELD(alterGPI);
+ COMPARE_SCALAR_FIELD(is_first);
+ COMPARE_STRING_FIELD(after_name);
return true;
}
@@ -1261,6 +1267,8 @@ static bool _equalRangePartitionDefState(const RangePartitionDefState* a, const
COMPARE_STRING_FIELD(tablespacename);
COMPARE_SCALAR_FIELD(curStartVal);
COMPARE_STRING_FIELD(partitionInitName);
+ COMPARE_NODE_FIELD(subPartitionDefState);
+ COMPARE_SCALAR_FIELD(partitionno);
return true;
}
@@ -1270,6 +1278,8 @@ static bool _equalListPartitionDefState(const ListPartitionDefState* a, const Li
COMPARE_STRING_FIELD(partitionName);
COMPARE_NODE_FIELD(boundary);
COMPARE_STRING_FIELD(tablespacename);
+ COMPARE_NODE_FIELD(subPartitionDefState);
+ COMPARE_SCALAR_FIELD(partitionno);
return true;
}
@@ -1279,6 +1289,8 @@ static bool _equalHashPartitionDefState(const HashPartitionDefState* a, const Ha
COMPARE_STRING_FIELD(partitionName);
COMPARE_NODE_FIELD(boundary);
COMPARE_STRING_FIELD(tablespacename);
+ COMPARE_NODE_FIELD(subPartitionDefState);
+ COMPARE_SCALAR_FIELD(partitionno);
return true;
}
@@ -1338,6 +1350,7 @@ static bool _equalPartitionState(const PartitionState* a, const PartitionState*
COMPARE_SCALAR_FIELD(rowMovement);
COMPARE_NODE_FIELD(subPartitionState);
COMPARE_NODE_FIELD(partitionNameList);
+ COMPARE_SCALAR_FIELD(partitionsNum);
return true;
}
@@ -2066,6 +2079,8 @@ static bool _equalAlterSchemaStmt(const AlterSchemaStmt* a, const AlterSchemaStm
COMPARE_STRING_FIELD(schemaname);
COMPARE_STRING_FIELD(authid);
COMPARE_SCALAR_FIELD(hasBlockChain);
+ COMPARE_SCALAR_FIELD(charset);
+ COMPARE_STRING_FIELD(collate);
return true;
}
@@ -2325,6 +2340,9 @@ static bool _equalCreateSchemaStmt(const CreateSchemaStmt* a, const CreateSchema
COMPARE_STRING_FIELD(authid);
COMPARE_SCALAR_FIELD(hasBlockChain);
COMPARE_NODE_FIELD(schemaElts);
+ COMPARE_NODE_FIELD(uuids);
+ COMPARE_SCALAR_FIELD(charset);
+ COMPARE_STRING_FIELD(collate);
return true;
}
@@ -2517,6 +2535,8 @@ static bool _equalTypeName(const TypeName* a, const TypeName* b)
COMPARE_NODE_FIELD(arrayBounds);
COMPARE_LOCATION_FIELD(location);
COMPARE_LOCATION_FIELD(end_location);
+ COMPARE_SCALAR_FIELD(pct_rowtype);
+ COMPARE_SCALAR_FIELD(charset);
return true;
}
@@ -3392,6 +3412,14 @@ static bool _equalAutoIncrement(const AutoIncrement* a, const AutoIncrement* b)
return true;
}
+static bool _equalCharsetcollateOptions(const CharsetCollateOptions* a, const CharsetCollateOptions* b)
+{
+ COMPARE_SCALAR_FIELD(cctype);
+ COMPARE_SCALAR_FIELD(charset);
+ COMPARE_STRING_FIELD(collate);
+ return true;
+}
+
static bool _equalPrefixKey(const PrefixKey* a, const PrefixKey* b)
{
COMPARE_NODE_FIELD(arg);
@@ -3399,6 +3427,50 @@ static bool _equalPrefixKey(const PrefixKey* a, const PrefixKey* b)
return true;
}
+static bool node_equal_create_event_info(const CreateEventStmt* a, const CreateEventStmt* b)
+{
+ COMPARE_NODE_FIELD(event_name);
+ COMPARE_NODE_FIELD(start_time_expr);
+ COMPARE_NODE_FIELD(end_time_expr);
+ COMPARE_NODE_FIELD(interval_time);
+ COMPARE_STRING_FIELD(def_name);
+ COMPARE_STRING_FIELD(event_comment_str);
+ COMPARE_STRING_FIELD(event_query_str);
+ COMPARE_SCALAR_FIELD(complete_preserve);
+ COMPARE_SCALAR_FIELD(if_not_exists);
+ COMPARE_SCALAR_FIELD(event_status);
+ return true;
+}
+
+static bool node_equal_alter_event_info(const AlterEventStmt* a, const AlterEventStmt* b)
+{
+ COMPARE_NODE_FIELD(def_name);
+ COMPARE_NODE_FIELD(event_name);
+ COMPARE_NODE_FIELD(start_time_expr);
+ COMPARE_NODE_FIELD(end_time_expr);
+ COMPARE_NODE_FIELD(interval_time);
+ COMPARE_NODE_FIELD(complete_preserve);
+ COMPARE_NODE_FIELD(event_status);
+ COMPARE_NODE_FIELD(event_comment_str);
+ COMPARE_NODE_FIELD(event_query_str);
+ COMPARE_NODE_FIELD(new_name);
+ return true;
+}
+
+static bool node_equal_drop_event_info(const DropEventStmt* a, const DropEventStmt* b)
+{
+ COMPARE_NODE_FIELD(event_name);
+ COMPARE_SCALAR_FIELD(missing_ok);
+ return true;
+}
+
+static bool node_equal_show_event_info(const ShowEventStmt* a, const ShowEventStmt* b)
+{
+ COMPARE_NODE_FIELD(from_clause);
+ COMPARE_STRING_FIELD(where_clause);
+ return true;
+}
+
/*
* equal
* returns whether two nodes are equal
@@ -4287,9 +4359,25 @@ bool equal(const void* a, const void* b)
case T_AutoIncrement:
retval = _equalAutoIncrement((const AutoIncrement *)a, (const AutoIncrement *)b);
break;
+ case T_CharsetCollateOptions:
+ retval = _equalCharsetcollateOptions((const CharsetCollateOptions *)a,
+ (const CharsetCollateOptions *)b);
+ break;
case T_PrefixKey:
retval = _equalPrefixKey((PrefixKey *)a, (PrefixKey *)b);
break;
+ case T_CreateEventStmt:
+ retval = node_equal_create_event_info((const CreateEventStmt *)a, (const CreateEventStmt *)b);
+ break;
+ case T_AlterEventStmt:
+ retval = node_equal_alter_event_info((const AlterEventStmt *)a, (const AlterEventStmt *)b);
+ break;
+ case T_DropEventStmt:
+ retval = node_equal_drop_event_info((const DropEventStmt *)a, (const DropEventStmt *)b);
+ break;
+ case T_ShowEventStmt:
+ retval = node_equal_show_event_info((const ShowEventStmt *)a, (const ShowEventStmt *)b);
+ break;
default:
ereport(ERROR,
diff --git a/src/common/backend/nodes/makefuncs.cpp b/src/common/backend/nodes/makefuncs.cpp
index 7077cd226..30396d51b 100644
--- a/src/common/backend/nodes/makefuncs.cpp
+++ b/src/common/backend/nodes/makefuncs.cpp
@@ -26,6 +26,7 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "storage/item/itemptr.h"
+#include "mb/pg_wchar.h"
#ifndef FRONTEND_PARSER
#include "utils/lsyscache.h"
@@ -489,6 +490,7 @@ TypeName* makeTypeNameFromNameList(List* names)
n->typemod = -1;
n->location = -1;
n->pct_rowtype = false;
+ n->charset = PG_INVALID_ENCODING;
return n;
}
@@ -503,6 +505,7 @@ TypeName* makeTypeNameFromOid(Oid typeOid, int32 typmod)
n->typeOid = typeOid;
n->typemod = typmod;
n->location = -1;
+ n->charset = PG_INVALID_ENCODING;
return n;
}
diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp
index 852011e44..0b081b1d9 100644
--- a/src/common/backend/nodes/nodeFuncs.cpp
+++ b/src/common/backend/nodes/nodeFuncs.cpp
@@ -33,8 +33,10 @@
#include "parser/parse_expr.h"
#endif /* FRONTEND_PARSER */
#include "storage/tcap.h"
+#include "parser/parse_utilcmd.h"
static bool expression_returns_set_walker(Node* node, void* context);
+static bool expression_rownum_walker(Node* node, void* context);
static int leftmostLoc(int loc1, int loc2);
/*
@@ -70,7 +72,7 @@ Oid exprType(const Node* expr)
type = ((const Const*)expr)->consttype;
break;
case T_UserVar:
- type = ((const Const*)(((UserVar*)expr)->value))->consttype;
+ type = exprType((const Node*)(((UserVar*)expr)->value));
break;
case T_Param:
type = ((const Param*)expr)->paramtype;
@@ -675,6 +677,32 @@ static bool expression_returns_set_walker(Node* node, void* context)
return expression_tree_walker(node, (bool (*)())expression_returns_set_walker, context);
}
+/*
+ * expression_contains_rownum
+ * Test whether an expression contains rownum.
+ *
+ * Because we use expression_tree_walker(), this can also be applied to
+ * whole targetlists; it'll produce TRUE if any one of the tlist items
+ * contain rownum.
+ */
+bool expression_contains_rownum(Node* node)
+{
+ return expression_rownum_walker(node, NULL);
+}
+
+static bool expression_rownum_walker(Node* node, void* context)
+{
+ if (node == NULL) {
+ return false;
+ }
+
+ if (IsA(node, Rownum)) {
+ return true;
+ }
+
+ return expression_tree_walker(node, (bool (*)())expression_rownum_walker, context);
+}
+
/*
* exprCollation -
* returns the Oid of the collation of the expression's result.
@@ -884,6 +912,15 @@ Oid exprCollation(const Node* expr)
return coll;
}
+/*
+ * exprCharset -
+ * returns the character set of the expression's result.
+ */
+int exprCharset(const Node* expr)
+{
+ return get_charset_by_collation(exprCollation(expr));
+}
+
/*
* exprInputCollation -
* returns the Oid of the collation a function should use, if available.
@@ -3319,6 +3356,8 @@ bool raw_expression_tree_walker(Node* node, bool (*walker)(), void* context)
return p2walker(((UpsertClause*)node)->targetList, context);
case T_CommonTableExpr:
return p2walker(((CommonTableExpr*)node)->ctequery, context);
+ case T_AutoIncrement:
+ return p2walker(((AutoIncrement*)node)->expr, context);
default:
ereport(ERROR,
(errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized node type: %d", (int)nodeTag(node))));
diff --git a/src/common/backend/nodes/nodes.cpp b/src/common/backend/nodes/nodes.cpp
index b24b14b32..556e94540 100755
--- a/src/common/backend/nodes/nodes.cpp
+++ b/src/common/backend/nodes/nodes.cpp
@@ -317,6 +317,10 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"},
{T_FetchStmt, "FetchStmt"},
{T_IndexStmt, "IndexStmt"},
{T_CreateFunctionStmt, "CreateFunctionStmt"},
+ {T_CreateEventStmt, "CreateEventStmt"},
+ {T_AlterEventStmt, "AlterEventStmt"},
+ {T_DropEventStmt, "DropEventStmt"},
+ {T_ShowEventStmt, "ShowEventStmt"},
{T_CreatePackageStmt, "CreatePackageStmt"},
{T_CreatePackageBodyStmt, "CreatePackageBodyStmt"},
{T_AddTableIntoCBIState, "AddTableIntoCBIState"},
diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp
index c0794e857..ff90ad0f3 100755
--- a/src/common/backend/nodes/outfuncs.cpp
+++ b/src/common/backend/nodes/outfuncs.cpp
@@ -635,6 +635,9 @@ static void _outPlannedStmt(StringInfo str, PlannedStmt* node)
}
WRITE_BOOL_FIELD(isRowTriggerShippable);
WRITE_BOOL_FIELD(is_stream_plan);
+ if (t_thrd.proc->workingVersionNum >= SLOW_SQL_VERSION_NUM) {
+ WRITE_UINT_FIELD(cause_type);
+ }
}
/*
@@ -671,9 +674,11 @@ static void _outPlanInfo(StringInfo str, Plan* node)
WRITE_BOOL_FIELD(vec_output);
WRITE_BOOL_FIELD(hasUniqueResults);
WRITE_BOOL_FIELD(isDeltaTable);
- WRITE_INT_FIELD(operatorMemKB[0]);
- WRITE_INT_FIELD(operatorMemKB[1]);
- WRITE_INT_FIELD(operatorMaxMem);
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_INT_FIELD(operatorMemKB[0]);
+ WRITE_INT_FIELD(operatorMemKB[1]);
+ WRITE_INT_FIELD(operatorMaxMem);
+ }
WRITE_BOOL_FIELD(parallel_enabled);
WRITE_BOOL_FIELD(hasHashFilter);
@@ -686,10 +691,12 @@ static void _outPlanInfo(StringInfo str, Plan* node)
WRITE_BOOL_FIELD(is_sync_plannode);
if (t_thrd.proc->workingVersionNum >= ML_OPT_MODEL_VERSION_NUM) {
- WRITE_FLOAT_FIELD(pred_rows, "%.0f");
- WRITE_FLOAT_FIELD(pred_startup_time, "%.0f");
- WRITE_FLOAT_FIELD(pred_total_time, "%.0f");
- WRITE_FLOAT_FIELD(pred_max_memory, "%ld");
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_FLOAT_FIELD(pred_rows, "%.0f");
+ WRITE_FLOAT_FIELD(pred_startup_time, "%.0f");
+ WRITE_FLOAT_FIELD(pred_total_time, "%.0f");
+ WRITE_FLOAT_FIELD(pred_max_memory, "%ld");
+ }
}
}
@@ -713,7 +720,9 @@ static void _outPruningResult(StringInfo str, PruningResult* node)
if (t_thrd.proc->workingVersionNum >= PBESINGLEPARTITION_VERSION_NUM) {
WRITE_BOOL_FIELD(isPbeSinlePartition);
}
- /* skip PartitionMap */
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ WRITE_NODE_FIELD(ls_selectedPartitionnos);
+ }
}
static void _outSubPartitionPruningResult(StringInfo str, SubPartitionPruningResult* node)
@@ -723,6 +732,10 @@ static void _outSubPartitionPruningResult(StringInfo str, SubPartitionPruningRes
WRITE_INT_FIELD(partSeq);
WRITE_BITMAPSET_FIELD(bm_selectedSubPartitions);
WRITE_NODE_FIELD(ls_selectedSubPartitions);
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ WRITE_INT_FIELD(partitionno);
+ WRITE_NODE_FIELD(ls_selectedSubPartitionnos);
+ }
}
/*
@@ -1078,7 +1091,9 @@ static void _outIndexScan(StringInfo str, IndexScan* node)
WRITE_BOOL_FIELD(is_ustore);
}
if (t_thrd.proc->workingVersionNum >= PLAN_SELECT_VERSION_NUM) {
- WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ }
WRITE_BOOL_FIELD(is_partial);
}
}
@@ -1324,7 +1339,9 @@ static void _outIndexOnlyScan(StringInfo str, IndexOnlyScan* node)
WRITE_NODE_FIELD(indextlist);
WRITE_ENUM_FIELD(indexorderdir, ScanDirection);
if (t_thrd.proc->workingVersionNum >= PLAN_SELECT_VERSION_NUM) {
- WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ }
WRITE_BOOL_FIELD(is_partial);
}
}
@@ -1350,7 +1367,9 @@ static void _outBitmapIndexScan(StringInfo str, BitmapIndexScan* node)
WRITE_BOOL_FIELD(is_ustore);
}
if (t_thrd.proc->workingVersionNum >= PLAN_SELECT_VERSION_NUM) {
- WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_FLOAT_FIELD(selectivity, "%.4f");
+ }
WRITE_BOOL_FIELD(is_partial);
}
}
@@ -1637,6 +1656,11 @@ static void _outHashJoin(StringInfo str, HashJoin* node)
WRITE_BOOL_FIELD(rebuildHashTable);
WRITE_BOOL_FIELD(isSonicHash);
out_mem_info(str, &node->mem_info);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_NODE_FIELD(hash_collations);
+ }
+#endif
}
static void _outVecHashJoin(StringInfo str, VecHashJoin* node)
@@ -1704,6 +1728,11 @@ static void _outAgg(StringInfo str, Agg* node)
}
WRITE_GRPOP_FIELD(grpOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(grp_collations, numCols);
+ }
+#endif
WRITE_LONG_FIELD(numGroups);
WRITE_NODE_FIELD(groupingSets);
@@ -1737,6 +1766,11 @@ static void _outWindowAgg(StringInfo str, WindowAgg* node)
}
WRITE_GRPOP_FIELD(partOperators, partNumCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(part_collations, partNumCols);
+ }
+#endif
WRITE_INT_FIELD(ordNumCols);
@@ -1746,6 +1780,11 @@ static void _outWindowAgg(StringInfo str, WindowAgg* node)
}
WRITE_GRPOP_FIELD(ordOperators, ordNumCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(ord_collations, ordNumCols);
+ }
+#endif
WRITE_INT_FIELD(frameOptions);
WRITE_NODE_FIELD(startOffset);
WRITE_NODE_FIELD(endOffset);
@@ -1768,6 +1807,11 @@ static void _outGroup(StringInfo str, Group* node)
}
WRITE_GRPOP_FIELD(grpOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(grp_collations, numCols);
+ }
+#endif
}
static void _outVecGroup(StringInfo str, VecGroup* node)
@@ -1899,6 +1943,11 @@ static void _outUnique(StringInfo str, Unique* node)
}
WRITE_GRPOP_FIELD(uniqOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(uniq_collations, numCols);
+ }
+#endif
}
static void _outVecUnique(StringInfo str, VecUnique* node)
@@ -1953,6 +2002,11 @@ static void _outSetOp(StringInfo str, SetOp* node)
}
WRITE_GRPOP_FIELD(dupOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ WRITE_GRPOP_FIELD(dup_collations, numCols);
+ }
+#endif
WRITE_INT_FIELD(flagColIdx);
WRITE_INT_FIELD(firstFlag);
@@ -2112,6 +2166,11 @@ static void _outIntoClause(StringInfo str, IntoClause* node)
if (t_thrd.proc->workingVersionNum >= SELECT_INTO_VAR_VERSION_NUM) {
WRITE_NODE_FIELD(userVarList);
}
+ if (t_thrd.proc->workingVersionNum >= SELECT_INTO_FILE_VERSION_NUM) {
+ WRITE_NODE_FIELD(copyOption);
+ WRITE_STRING_FIELD(filename);
+ WRITE_BOOL_FIELD(is_outfile);
+ }
}
static void _outVar(StringInfo str, Var* node)
@@ -2557,9 +2616,10 @@ static void _outSubPlan(StringInfo str, SubPlan* node)
WRITE_NODE_FIELD(setParam);
WRITE_NODE_FIELD(parParam);
WRITE_NODE_FIELD(args);
- WRITE_FLOAT_FIELD(startup_cost, "%.2f");
- WRITE_FLOAT_FIELD(per_call_cost, "%.2f");
-
+ if (u_sess->opt_cxt.out_plan_stat) {
+ WRITE_FLOAT_FIELD(startup_cost, "%.2f");
+ WRITE_FLOAT_FIELD(per_call_cost, "%.2f");
+ }
WRITE_TYPEINFO_FIELD(firstColType);
}
@@ -3572,6 +3632,12 @@ static void _outRangePartitionDefState(StringInfo str, RangePartitionDefState* n
WRITE_STRING_FIELD(partitionName);
WRITE_NODE_FIELD(boundary);
WRITE_STRING_FIELD(tablespacename);
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ WRITE_NODE_FIELD(subPartitionDefState);
+ WRITE_INT_FIELD(partitionno);
+ WRITE_NODE_FIELD(curStartVal);
+ WRITE_STRING_FIELD(partitionInitName);
+ }
}
static void _outListPartitionDefState(StringInfo str, ListPartitionDefState* node)
@@ -3581,6 +3647,10 @@ static void _outListPartitionDefState(StringInfo str, ListPartitionDefState* nod
WRITE_STRING_FIELD(partitionName);
WRITE_NODE_FIELD(boundary);
WRITE_STRING_FIELD(tablespacename);
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ WRITE_NODE_FIELD(subPartitionDefState);
+ WRITE_INT_FIELD(partitionno);
+ }
}
static void _outHashPartitionDefState(StringInfo str, HashPartitionDefState* node)
@@ -3590,6 +3660,10 @@ static void _outHashPartitionDefState(StringInfo str, HashPartitionDefState* nod
WRITE_STRING_FIELD(partitionName);
WRITE_NODE_FIELD(boundary);
WRITE_STRING_FIELD(tablespacename);
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ WRITE_NODE_FIELD(subPartitionDefState);
+ WRITE_INT_FIELD(partitionno);
+ }
}
static void _outIntervalPartitionDefState(StringInfo str, IntervalPartitionDefState* node)
@@ -3616,6 +3690,7 @@ static void _outPartitionState(StringInfo str, PartitionState* node)
WRITE_ENUM_FIELD(rowMovement, RowMovementValue);
WRITE_NODE_FIELD(subPartitionState);
WRITE_NODE_FIELD(partitionNameList);
+ WRITE_INT_FIELD(partitionsNum);
}
static void _outRangePartitionindexDefState(StringInfo str, RangePartitionindexDefState* node)
@@ -3941,7 +4016,10 @@ static void _outTypeName(StringInfo str, TypeName* node)
{
WRITE_LOCATION_FIELD(end_location);
}
-
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM)
+ {
+ WRITE_INT_FIELD(charset);
+ }
WRITE_TYPEINFO_FIELD(typeOid);
}
@@ -5819,6 +5897,14 @@ static void _outAutoIncrement(StringInfo str, AutoIncrement* node)
WRITE_OID_FIELD(autoincout_funcid);
}
+static void _outCharsetcollateOptions(StringInfo str, CharsetCollateOptions* node)
+{
+ WRITE_NODE_TYPE("CHARSETCOLLATE");
+ WRITE_ENUM_FIELD(cctype, CharsetCollateType);
+ WRITE_INT_FIELD(charset);
+ WRITE_STRING_FIELD(collate);
+}
+
static void _outPrefixKey(StringInfo str, PrefixKey* node)
{
WRITE_NODE_TYPE("PREFIXKEY");
@@ -6715,6 +6801,9 @@ static void _outNode(StringInfo str, const void* obj)
case T_PLDebug_frame:
_outPLDebug_frame(str, (PLDebug_frame*) obj);
break;
+ case T_CharsetCollateOptions:
+ _outCharsetcollateOptions(str, (CharsetCollateOptions*)obj);
+ break;
case T_AutoIncrement:
_outAutoIncrement(str, (AutoIncrement*)obj);
break;
diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp
index 792c9e661..498f7a548 100755
--- a/src/common/backend/nodes/readfuncs.cpp
+++ b/src/common/backend/nodes/readfuncs.cpp
@@ -1962,6 +1962,16 @@ static IntoClause* _readIntoClause(void)
IF_EXIST(userVarList) {
READ_NODE_FIELD(userVarList);
}
+ IF_EXIST(copyOption) {
+ READ_NODE_FIELD(copyOption);
+ }
+ IF_EXIST(filename) {
+ READ_STRING_FIELD(filename);
+ }
+ IF_EXIST(is_outfile) {
+ READ_BOOL_FIELD(is_outfile);
+ }
+
READ_DONE();
}
@@ -3408,6 +3418,11 @@ static Agg* _readAgg(Agg* local_node)
READ_INT_FIELD(numCols);
READ_ATTR_ARRAY(grpColIdx, numCols);
READ_OPERATOROID_ARRAY(grpOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(grp_collations, numCols);
+ }
+#endif
READ_LONG_FIELD(numGroups);
READ_NODE_FIELD(groupingSets);
@@ -3438,9 +3453,19 @@ static WindowAgg* _readWindowAgg(WindowAgg* local_node)
READ_INT_FIELD(partNumCols);
READ_ATTR_ARRAY(partColIdx, partNumCols);
READ_OPERATOROID_ARRAY(partOperators, partNumCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(part_collations, partNumCols);
+ }
+#endif
READ_INT_FIELD(ordNumCols);
READ_ATTR_ARRAY(ordColIdx, ordNumCols);
READ_OPERATOROID_ARRAY(ordOperators, ordNumCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(ord_collations, ordNumCols);
+ }
+#endif
READ_INT_FIELD(frameOptions);
READ_NODE_FIELD(startOffset);
@@ -3528,7 +3553,9 @@ static PruningResult* _readPruningResult(PruningResult* local_node)
IF_EXIST(isPbeSinlePartition) {
READ_BOOL_FIELD(isPbeSinlePartition);
}
- /* skip PartitionMap */
+ IF_EXIST(ls_selectedPartitionnos) {
+ READ_NODE_FIELD(ls_selectedPartitionnos);
+ }
READ_DONE();
}
@@ -3542,6 +3569,12 @@ static SubPartitionPruningResult* _readSubPartitionPruningResult(SubPartitionPru
READ_INT_FIELD(partSeq);
READ_BITMAPSET_FIELD(bm_selectedSubPartitions);
READ_NODE_FIELD(ls_selectedSubPartitions);
+ IF_EXIST(partitionno) {
+ READ_NODE_FIELD(partitionno);
+ }
+ IF_EXIST(ls_selectedSubPartitionnos) {
+ READ_NODE_FIELD(ls_selectedSubPartitionnos);
+ }
READ_DONE();
}
@@ -3952,6 +3985,11 @@ static Unique* _readUnique(Unique* local_node)
READ_INT_FIELD(numCols);
READ_ATTR_ARRAY(uniqColIdx, numCols);
READ_OPERATOROID_ARRAY(uniqOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(uniq_collations, numCols);
+ }
+#endif
READ_DONE();
}
@@ -4311,6 +4349,11 @@ static Group* _readGroup(Group* local_node)
READ_INT_FIELD(numCols);
READ_ATTR_ARRAY(grpColIdx, numCols);
READ_OPERATOROID_ARRAY(grpOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(grp_collations, numCols);
+ }
+#endif
READ_DONE();
}
@@ -4355,7 +4398,22 @@ static Hash* _readHash(Hash* local_node)
static HashJoin* _readHashJoin(HashJoin* local_node)
{
READ_LOCALS_NULL(HashJoin);
- READ_HASHJOIN_FIELD();
+
+ READ_TEMP_LOCALS();
+ _readJoin(&local_node->join);
+ READ_NODE_FIELD(hashclauses);
+ READ_BOOL_FIELD(streamBothSides);
+ READ_BOOL_FIELD(transferFilterFlag);
+ READ_BOOL_FIELD(rebuildHashTable);
+ READ_BOOL_FIELD(isSonicHash);
+ read_mem_info(&local_node->mem_info);
+
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_NODE_FIELD(hash_collations);
+ }
+#endif
+ READ_DONE();
}
static MergeJoin* _readMergeJoin(MergeJoin* local_node)
@@ -4504,6 +4562,9 @@ static PlannedStmt* _readPlannedStmt(void)
}
READ_BOOL_FIELD(isRowTriggerShippable);
READ_BOOL_FIELD(is_stream_plan);
+ IF_EXIST(cause_type) {
+ READ_UINT_FIELD(cause_type);
+ }
READ_DONE();
}
@@ -4565,6 +4626,11 @@ static SetOp* _readSetOp(SetOp* local_node)
READ_INT_FIELD(numCols);
READ_ATTR_ARRAY(dupColIdx, numCols);
READ_OPERATOROID_ARRAY(dupOperators, numCols);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM) {
+ READ_OPERATOROID_ARRAY(dup_collations, numCols);
+ }
+#endif
READ_INT_FIELD(flagColIdx);
READ_INT_FIELD(firstFlag);
READ_LONG_FIELD(numGroups);
@@ -5510,6 +5576,10 @@ static TypeName* _readTypeName()
{
READ_LOCATION_FIELD(end_location);
}
+ IF_EXIST(charset)
+ {
+ READ_INT_FIELD(charset);
+ }
READ_TYPEINFO_FIELD(typeOid);
READ_DONE();
@@ -5741,6 +5811,18 @@ static RangePartitionDefState* _readRangePartitionDefState()
READ_STRING_FIELD(partitionName);
READ_NODE_FIELD(boundary);
READ_STRING_FIELD(tablespacename);
+ IF_EXIST(subPartitionDefState) {
+ READ_NODE_FIELD(subPartitionDefState);
+ }
+ IF_EXIST(partitionno) {
+ READ_NODE_FIELD(partitionno);
+ }
+ IF_EXIST(curStartVal) {
+ READ_NODE_FIELD(curStartVal);
+ }
+ IF_EXIST(partitionInitName) {
+ READ_NODE_FIELD(partitionInitName);
+ }
READ_DONE();
}
@@ -5752,6 +5834,12 @@ static ListPartitionDefState* _readListPartitionDefState()
READ_STRING_FIELD(partitionName);
READ_NODE_FIELD(boundary);
READ_STRING_FIELD(tablespacename);
+ IF_EXIST(subPartitionDefState) {
+ READ_NODE_FIELD(subPartitionDefState);
+ }
+ IF_EXIST(partitionno) {
+ READ_NODE_FIELD(partitionno);
+ }
READ_DONE();
}
@@ -5763,6 +5851,12 @@ static HashPartitionDefState* _readHashPartitionDefState()
READ_STRING_FIELD(partitionName);
READ_NODE_FIELD(boundary);
READ_STRING_FIELD(tablespacename);
+ IF_EXIST(subPartitionDefState) {
+ READ_NODE_FIELD(subPartitionDefState);
+ }
+ IF_EXIST(partitionno) {
+ READ_NODE_FIELD(partitionno);
+ }
READ_DONE();
}
@@ -5798,6 +5892,9 @@ static PartitionState* _readPartitionState()
READ_ENUM_FIELD(rowMovement, RowMovementValue);
READ_NODE_FIELD(subPartitionState);
READ_NODE_FIELD(partitionNameList);
+ IF_EXIST(partitionsNum) {
+ READ_INT_FIELD(partitionsNum);
+ }
if (local_node->partitionStrategy == '0') {
local_node->partitionStrategy = 0;
@@ -5941,6 +6038,17 @@ static AutoIncrement* _readAutoIncrement()
READ_DONE();
}
+static CharsetCollateOptions* _readCharsetcollateOptions()
+{
+ READ_LOCALS(CharsetCollateOptions);
+
+ READ_ENUM_FIELD(cctype, CharsetCollateType);
+ READ_INT_FIELD(charset);
+ READ_STRING_FIELD(collate);
+
+ READ_DONE();
+}
+
static PrefixKey* _readPrefixKey()
{
READ_LOCALS(PrefixKey);
@@ -6438,6 +6546,8 @@ Node* parseNodeString(void)
return_value = _readUserSetElem();
} else if (MATCH("USERVAR", 7)) {
return_value = _readUserVar();
+ } else if (MATCH("CHARSETCOLLATE", 14)) {
+ return_value = _readCharsetcollateOptions();
} else {
ereport(ERROR,
(errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE),
diff --git a/src/common/backend/parser/analyze.cpp b/src/common/backend/parser/analyze.cpp
index e0eb53373..3c307463b 100644
--- a/src/common/backend/parser/analyze.cpp
+++ b/src/common/backend/parser/analyze.cpp
@@ -268,6 +268,56 @@ Query* parse_sub_analyze(Node* parseTree, ParseState* parentParseState, CommonTa
return query;
}
+Node* parse_into_claues(Node* parseTree, IntoClause* intoClause)
+{
+ if (intoClause->userVarList) {
+ UserSetElem* uset = makeNode(UserSetElem);
+ uset->name = intoClause->userVarList;
+
+ SubLink* sl = makeNode(SubLink);
+ sl->subLinkType = EXPR_SUBLINK;
+ sl->testexpr = NULL;
+ sl->operName = NIL;
+ sl->subselect = parseTree;
+ sl->location = -1;
+
+ SelectIntoVarList *sis = makeNode(SelectIntoVarList);
+ sis->sublink = sl;
+ sis->userVarList = uset->name;
+
+ uset->val = (Expr *)sis;
+
+ VariableSetStmt* vss = makeNode(VariableSetStmt);
+ vss->kind = VAR_SET_DEFINED;
+ vss->name = "SELECT INTO VARLIST";
+ vss->defined_args = list_make1((Node *)uset);
+ vss->is_local = false;
+ vss->is_multiset = true;
+
+ VariableMultiSetStmt* vmss = makeNode(VariableMultiSetStmt);
+ vmss->args = list_make1((Node *)vss);
+ return (Node *)vmss;
+ } else if (intoClause->filename) {
+ CopyStmt* cn = makeNode(CopyStmt);
+ cn->relation = NULL;
+ cn->attlist = NIL;
+ cn->is_from = false;
+ cn->options = intoClause->copyOption;
+ cn->filename = intoClause->filename;
+ cn->filetype = intoClause->is_outfile ? S_OUTFILE : S_DUMPFILE;
+
+ cn->query = parseTree;
+ return (Node*)cn;
+ } else {
+ CreateTableAsStmt* ctas = makeNode(CreateTableAsStmt);
+ ctas->query = parseTree;
+ ctas->into = intoClause;
+ ctas->relkind = OBJECT_TABLE;
+ ctas->is_select_into = true;
+ return (Node*)ctas;
+ }
+}
+
/*
* transformTopLevelStmt -
* transform a Parse tree into a Query tree.
@@ -289,53 +339,14 @@ Query* transformTopLevelStmt(ParseState* pstate, Node* parseTree, bool isFirstNo
AssertEreport(stmt && IsA(stmt, SelectStmt) && stmt->larg == NULL, MOD_OPT, "failure to check parseTree");
if (stmt->intoClause) {
- if (stmt->intoClause->userVarList) {
- UserSetElem* uset = makeNode(UserSetElem);
- uset->name = stmt->intoClause->userVarList;
- stmt->intoClause = NULL;
-
- SubLink* sl = makeNode(SubLink);
- sl->subLinkType = EXPR_SUBLINK;
- sl->testexpr = NULL;
- sl->operName = NIL;
- sl->subselect = (Node *)stmt;
- sl->location = -1;
-
- SelectIntoVarList *sis = makeNode(SelectIntoVarList);
- sis->sublink = sl;
- sis->userVarList = uset->name;
-
- uset->val = (Expr *)sis;
-
- VariableSetStmt* vss = makeNode(VariableSetStmt);
- vss->kind = VAR_SET_DEFINED;
- vss->name = "SELECT INTO VARLIST";
- vss->defined_args = list_make1((Node *)uset);
- vss->is_local = false;
- vss->is_multiset = true;
-
- VariableMultiSetStmt* vmss = makeNode(VariableMultiSetStmt);
- vmss->args = list_make1((Node *)vss);
- parseTree = (Node *)vmss;
- } else {
- CreateTableAsStmt* ctas = makeNode(CreateTableAsStmt);
-
- ctas->query = parseTree;
- ctas->into = stmt->intoClause;
- ctas->relkind = OBJECT_TABLE;
- ctas->is_select_into = true;
-
- /*
- * Remove the intoClause from the SelectStmt. This makes it safe
- * for transformSelectStmt to complain if it finds intoClause set
- * (implying that the INTO appeared in a disallowed place).
- */
- stmt->intoClause = NULL;
-
- parseTree = (Node*)ctas;
- }
+ parseTree = parse_into_claues(parseTree, stmt->intoClause);
+ /*
+ * Remove the intoClause from the SelectStmt. This makes it safe
+ * for lexical_select_stmt to complain if it finds intoClause set
+ * (implying that the INTO appeared in a disallowed place).
+ */
+ stmt->intoClause = NULL;
}
-
}
if (u_sess->hook_cxt.transformStmtHook != NULL) {
@@ -416,6 +427,58 @@ Query* transformCreateModelStmt(ParseState* pstate, CreateModelStmt* stmt)
return result;
}
+Query* transformVariableCreateEventStmt(ParseState* pstate, CreateEventStmt* stmt)
+{
+ Query* result = makeNode(Query);
+ result->commandType = CMD_UTILITY;
+ Node* old_time_expr = NULL;
+ Node* new_time_expr = NULL;
+ if (stmt->start_time_expr) {
+ old_time_expr = stmt->start_time_expr;
+ new_time_expr = transformExpr(pstate, old_time_expr);
+ stmt->start_time_expr = new_time_expr;
+ }
+ if (stmt->end_time_expr) {
+ old_time_expr = stmt->end_time_expr;
+ new_time_expr = transformExpr(pstate, old_time_expr);
+ stmt->end_time_expr = new_time_expr;
+ }
+ result->utilityStmt = (Node*)stmt;
+ return result;
+}
+
+Query* transformVariableAlterEventStmt(ParseState* pstate, AlterEventStmt* stmt)
+{
+ Query* result = makeNode(Query);
+ result->commandType = CMD_UTILITY;
+ Node* old_time_expr = NULL;
+ Node* new_time_node = NULL;
+ DefElem* new_time_expr = NULL;
+ if (stmt->start_time_expr) {
+ old_time_expr = stmt->start_time_expr->arg;
+ new_time_node = transformExpr(pstate, old_time_expr);
+ if (new_time_node) {
+ new_time_expr = makeDefElem("start_date", new_time_node);
+ } else {
+ new_time_expr = NULL;
+ }
+ stmt->start_time_expr = new_time_expr;
+ }
+ if (stmt->end_time_expr) {
+ old_time_expr = stmt->end_time_expr->arg;
+ new_time_node = transformExpr(pstate, old_time_expr);
+ if (new_time_node) {
+ new_time_expr = makeDefElem("start_date", new_time_node);
+ } else {
+ new_time_expr = NULL;
+ }
+ stmt->end_time_expr = new_time_expr;
+ }
+ result->utilityStmt = (Node*)stmt;
+ return result;
+}
+
+
/*
* transformStmt -
* recursively transform a Parse tree into a Query tree.
@@ -497,10 +560,10 @@ Query* transformStmt(ParseState* pstate, Node* parseTree, bool isFirstNode, bool
} break;
case T_VariableSetStmt: {
- VariableSetStmt* stmt;
- stmt = (VariableSetStmt*)parseTree;
+ VariableSetStmt* stmt = (VariableSetStmt*)parseTree;
- if (DB_IS_CMPT(B_FORMAT) && u_sess->attr.attr_common.enable_set_variable_b_format && stmt->kind == VAR_SET_VALUE) {
+ if (DB_IS_CMPT(B_FORMAT) && stmt->kind == VAR_SET_VALUE &&
+ (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
transformVariableSetValueStmt(pstate, stmt);
}
result = makeNode(Query);
@@ -512,6 +575,14 @@ Query* transformStmt(ParseState* pstate, Node* parseTree, bool isFirstNode, bool
result = transformVariableMutiSetStmt(pstate, (VariableMultiSetStmt*)parseTree);
break;
+ case T_CreateEventStmt:
+ result = transformVariableCreateEventStmt(pstate, (CreateEventStmt*) parseTree);
+ break;
+
+ case T_AlterEventStmt:
+ result = transformVariableAlterEventStmt(pstate, (AlterEventStmt*) parseTree);
+ break;
+
default:
/*
@@ -2554,6 +2625,7 @@ static Query* transformVariableMutiSetStmt(ParseState* pstate, VariableMultiSetS
List* stmts = muti_stmt->args;
ListCell* cell = NULL;
VariableSetStmt* set_stmt;
+ List *usersetlist = NIL;
foreach(cell, stmts) {
Node* stmt = (Node*)lfirst(cell);
@@ -2565,8 +2637,13 @@ static Query* transformVariableMutiSetStmt(ParseState* pstate, VariableMultiSetS
set_stmt = (VariableSetStmt*)stmt;
}
- if (set_stmt->kind == VAR_SET_DEFINED)
+ if (set_stmt->kind == VAR_SET_DEFINED) {
transformVariableSetStmt(pstate, set_stmt);
+ if (strcmp(set_stmt->name, "USER DEFINED VARIABLE") == 0) {
+ usersetlist = list_concat(usersetlist, set_stmt->defined_args);
+ }
+ }
+
if (set_stmt->kind == VAR_SET_VALUE)
transformVariableSetValueStmt(pstate, set_stmt);
@@ -2574,11 +2651,24 @@ static Query* transformVariableMutiSetStmt(ParseState* pstate, VariableMultiSetS
AlterSystemStmt *newnode = makeNode(AlterSystemStmt);
newnode->setstmt = set_stmt;
resultList = lappend(resultList, newnode);
+ } else if (set_stmt->kind == VAR_SET_DEFINED) {
+ if (strcmp(set_stmt->name, "USER DEFINED VARIABLE") != 0) {
+ resultList = lappend(resultList, set_stmt);
+ }
} else {
resultList = lappend(resultList, set_stmt);
}
}
+ if (list_length(usersetlist) != 0) {
+ VariableSetStmt* new_set_stmt = makeNode(VariableSetStmt);
+ new_set_stmt->kind = VAR_SET_DEFINED;
+ new_set_stmt->name = "USER DEFINED VARIABLE";
+ new_set_stmt->is_local = false;
+ new_set_stmt->defined_args = usersetlist;
+ resultList = lappend(resultList, new_set_stmt);
+ }
+
list_free(muti_stmt->args);
muti_stmt->args = resultList;
@@ -3857,6 +3947,23 @@ static void CheckUpdateRelation(Relation targetrel)
}
}
+void UpdateParseCheck(ParseState *pstate, Node *qry)
+{
+ /*
+ * Top-level aggregates are simply disallowed in UPDATE, per spec. (From
+ * an implementation point of view, this is forced because the implicit
+ * ctid reference would otherwise be an ungrouped variable.)
+ */
+ if (pstate->p_hasAggs) {
+ ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), errmsg("cannot use aggregate function in UPDATE"),
+ parser_errposition(pstate, locate_agg_of_level(qry, 0))));
+ }
+ if (pstate->p_hasWindowFuncs) {
+ ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), errmsg("cannot use window function in UPDATE"),
+ parser_errposition(pstate, locate_windowfunc(qry))));
+ }
+}
+
/*
* transformUpdateStmt -
* transforms an update statement
@@ -3950,23 +4057,7 @@ static Query* transformUpdateStmt(ParseState* pstate, UpdateStmt* stmt)
qry->hasSubLinks = pstate->p_hasSubLinks;
- /*
- * Top-level aggregates are simply disallowed in UPDATE, per spec. (From
- * an implementation point of view, this is forced because the implicit
- * ctid reference would otherwise be an ungrouped variable.)
- */
- if (pstate->p_hasAggs) {
- ereport(ERROR,
- (errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in UPDATE"),
- parser_errposition(pstate, locate_agg_of_level((Node*)qry, 0))));
- }
- if (pstate->p_hasWindowFuncs) {
- ereport(ERROR,
- (errcode(ERRCODE_WINDOWING_ERROR),
- errmsg("cannot use window function in UPDATE"),
- parser_errposition(pstate, locate_windowfunc((Node*)qry))));
- }
+ UpdateParseCheck(pstate, (Node *)qry);
assign_query_collations(pstate, qry);
qry->hintState = stmt->hintState;
diff --git a/src/common/backend/parser/gram.xml b/src/common/backend/parser/gram.xml
index 2849dd005..61cc0a2f7 100755
--- a/src/common/backend/parser/gram.xml
+++ b/src/common/backend/parser/gram.xml
@@ -645,7 +645,7 @@ ALTER WORKLOAD GROUP wg_name
stmt schema_stmt
- AlterDatabaseStmt AlterDatabaseSetStmt AlterDataSourceStmt AlterDomainStmt AlterEnumStmt
+ AlterDatabaseStmt AlterDatabaseSetStmt AlterDataSourceStmt AlterDomainStmt AlterEnumStmt AlterEventStmt
AlterFdwStmt AlterForeignServerStmt AlterGroupStmt AlterSchemaStmt
AlterObjectSchemaStmt AlterOwnerStmt AlterSeqStmt AlterTableStmt
AlterExtensionStmt AlterExtensionContentsStmt AlterForeignTableStmt
@@ -351,13 +355,13 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
CreateAssertStmt CreateTrigStmt
CreateUserStmt CreateUserMappingStmt CreateRoleStmt CreateRlsPolicyStmt CreateSynonymStmt
CreatedbStmt DeclareCursorStmt DefineStmt DeleteStmt DiscardStmt DoStmt
- DropGroupStmt DropOpClassStmt DropOpFamilyStmt DropPLangStmt DropStmt
+ DropGroupStmt DropOpClassStmt DropOpFamilyStmt DropPLangStmt DropStmt DropEventStmt ShowEventStmt
DropAssertStmt DropSynonymStmt DropTrigStmt DropRuleStmt DropCastStmt DropRoleStmt DropRlsPolicyStmt
DropUserStmt DropdbStmt DropTableSpaceStmt DropDataSourceStmt DropDirectoryStmt DropFdwStmt
DropForeignServerStmt DropUserMappingStmt ExplainStmt ExecDirectStmt FetchStmt
GrantStmt GrantRoleStmt GrantDbStmt IndexStmt InsertStmt ListenStmt LoadStmt
LockStmt NotifyStmt ExplainableStmt PreparableStmt
- CreateFunctionStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt
+ CreateFunctionStmt CreateEventStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt
RemoveFuncStmt RemoveOperStmt RemovePackageStmt RenameStmt RevokeStmt RevokeRoleStmt RevokeDbStmt
RuleActionStmt RuleActionStmtOrEmpty RuleStmt
SecLabelStmt SelectStmt TimeCapsuleStmt TransactionStmt TruncateStmt CallFuncStmt
@@ -404,8 +408,8 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type OptNoLog
%type alter_table_cmd alter_partition_cmd alter_type_cmd opt_collate_clause exchange_partition_cmd move_partition_cmd
- modify_column_cmd
- replica_identity
+ modify_column_cmd reset_partition_cmd
+ replica_identity add_column_first_after event_from_clause
%type alter_table_cmds alter_partition_cmds alter_table_or_partition alter_type_cmds add_column_cmds modify_column_cmds
%type opt_drop_behavior
@@ -413,11 +417,13 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type createdb_opt_list alterdb_opt_list copy_opt_list
transaction_mode_list weak_password_string_list
create_extension_opt_list alter_extension_opt_list
- pgxcnode_list pgxcnodes bucket_maps bucket_list
- opt_pgxcnodes
-%type createdb_opt_item alterdb_opt_item copy_opt_item
- transaction_mode_item
+ pgxcnode_list pgxcnodes bucket_maps bucket_list lines_options_fin
+ opt_pgxcnodes fields_options_list fields_options_fin lines_options_list
+%type createdb_opt_item alterdb_opt_item copy_opt_item characterset_option
+ transaction_mode_item lines_option_item fields_options_item
create_extension_opt_item alter_extension_opt_item
+ start_opt preserve_opt rename_opt status_opt comments_opt action_opt
+ end_opt definer_name_opt
%type opt_lock lock_type cast_context opt_wait
%type vacuum_option_list vacuum_option_elem opt_verify_options
@@ -445,7 +451,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type TriggerFuncArg
%type TriggerWhen
-%type copy_file_name
+%type copy_file_name definer_opt user ev_body ev_where_body event_where_clause
database_name access_method_clause access_method access_method_clause_without_keyword attr_name
name namedata_string fdwName cursor_name file_name
index_name cluster_index_specification
@@ -453,7 +459,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
application_name password_string hint_string
%type func_name func_name_opt_arg pkg_name handler_name qual_Op qual_all_Op subquery_Op
opt_class opt_inline_handler opt_validator validator_clause
- opt_collate
+ opt_collation
%type qualified_name insert_target OptConstrFromTable opt_index_name insert_partition_clause update_delete_partition_clause
@@ -492,7 +498,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
opt_include opt_c_include index_including_params
sort_clause opt_sort_clause sortby_list index_params constraint_params
name_list from_clause from_list opt_array_bounds
- qualified_name_list any_name any_name_list
+ qualified_name_list any_name any_name_list collate_name
any_operator expr_list attrs callfunc_args
target_list insert_column_list set_target_list rename_clause_list rename_clause
set_clause_list set_clause multiple_set_clause
@@ -614,7 +620,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type relation_expr
%type relation_expr_opt_alias delete_relation_expr_opt_alias
%type target_el single_set_clause set_target insert_column_item connect_by_root_expr
-%type tablesample_clause timecapsule_clause opt_timecapsule_clause opt_repeatable_clause
+%type tablesample_clause timecapsule_clause opt_timecapsule_clause opt_repeatable_clause end_expr start_expr
%type generic_option_name
%type generic_option_arg
@@ -639,14 +645,19 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type character
%type extract_arg
%type timestamp_units
-%type opt_charset
+
+%type character_set
+%type charset opt_charset convert_charset default_charset
+%type collate opt_collate default_collate
+%type CharsetCollate charset_collate optCharsetCollate
+
%type opt_varying opt_timezone opt_no_inherit
-%type Iconst SignedIconst
+%type Iconst SignedIconst opt_partitions_num opt_subpartitions_num
%type Sconst comment_text notify_payload
%type RoleId TypeOwner opt_granted_by opt_boolean_or_string ColId_or_Sconst definer_user definer_expression
%type var_list guc_value_extension_list
-%type ColId ColLabel var_name type_function_name param_name
+%type ColId ColLabel var_name type_function_name param_name charset_collate_name
%type var_value zone_value
%type unreserved_keyword type_func_name_keyword
@@ -676,7 +687,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type document_or_content
%type xml_whitespace_option
-%type func_application func_with_separator func_expr_common_subexpr index_functional_expr_key func_application_special
+%type func_application func_with_separator func_expr_common_subexpr index_functional_expr_key func_application_special functime_app
%type func_expr func_expr_windowless
%type common_table_expr
%type with_clause opt_with_clause
@@ -688,9 +699,9 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type window_clause window_definition_list opt_partition_clause
%type window_definition over_clause window_specification
opt_frame_clause frame_extent frame_bound
-%type opt_existing_window_name
+%type opt_existing_window_name opt_unique_key
%type opt_if_not_exists
-%type OptCompress
+%type OptCompress generated_column_option
%type KVType
%type ColCmprsMode
%type subprogram_body
@@ -703,11 +714,12 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
subpartitioning_clause range_subpartitioning_clause hash_subpartitioning_clause
list_subpartitioning_clause subpartition_item opt_subpartition_index_def
range_subpartition_index_list range_subpartition_index_item
-%type range_partition_definition_list list_partition_definition_list hash_partition_definition_list maxValueList
- column_item_list tablespaceList opt_interval_tablespaceList
+%type range_partition_definition_list list_partition_definition_list hash_partition_definition_list maxValueList listValueList
+ column_item_list tablespaceList opt_interval_tablespaceList opt_hash_partition_definition_list
split_dest_partition_define_list split_dest_listsubpartition_define_list split_dest_rangesubpartition_define_list
- range_start_end_list range_less_than_list opt_range_every_list subpartition_definition_list
+ range_start_end_list range_less_than_list opt_range_every_list subpartition_definition_list range_partition_boundary
%type partition_name
+%type opt_columns opt_in_p
%type opt_row_movement_clause
/* PGXC_BEGIN */
@@ -717,7 +729,8 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type range_slice_definition_list range_slice_less_than_list range_slice_start_end_list
list_distribution_rules_list list_distribution_rule_row list_distribution_rule_single
%type range_slice_less_than_item range_slice_start_end_item
- list_dist_state OptListDistribution list_dist_value
+ list_dist_state OptListDistribution list_dist_value interval_intexpr functime_expr initime
+ interval_list every_interval interval_cell ev_timeexpr
%type OptSubCluster OptSubClusterInternal
/* PGXC_END */
@@ -765,7 +778,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type alter_policy_filter_list alter_policy_privileges_list alter_policy_access_list
%type policy_name policy_privilege_type policy_access_type policy_filter_type policy_filter_name policy_target_type
%type policy_target_name
-%type policy_status_opt
+%type policy_status_opt opt_ev_on_completion
%type policy_privilege_elem policy_access_elem policy_target_elem_opt
%type policy_filter_elem pp_policy_filter_elem filter_term pp_filter_term filter_expr pp_filter_expr filter_paren filter_expr_list filter_set policy_filter_value
%type policy_filters_list policy_filter_opt policy_privileges_list policy_access_list policy_targets_list
@@ -786,7 +799,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
/* LOAD DATA */
%type load_options_list load_table_options_list opt_load_data_options_list load_when_option_list
-%type load_type_set load_oper_table_type
+%type load_type_set load_oper_table_type opt_ev_status
%type load_table_options_item opt_load_data_options_item load_when_option load_options_item
%type load_quote_str load_col_nullif_spec
@@ -795,6 +808,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
%type load_column_expr_list copy_column_sequence_list copy_column_filler_list copy_column_constant_list
%type load_col_data_type
%type load_col_sequence_item_sart column_sequence_item_step column_sequence_item_sart
+%type comment_opt
%type trigger_order
%type delimiter_str_name delimiter_str_names
/*
@@ -826,11 +840,11 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS
BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL
- CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P
- CHARACTER CHARACTERISTICS CHARACTERSET CHECK CHECKPOINT CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE
- CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COMMENT COMMENTS COMMIT
- COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPRESS CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINTS
- CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P CONNECT COORDINATOR COORDINATORS COPY COST CREATE
+ CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHANGE CHAR_P
+ CHARACTER CHARACTERISTICS CHARACTERSET CHARSET CHECK CHECKPOINT CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE
+ CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COLUMNS COMMENT COMMENTS COMMIT
+ COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPLETION COMPRESS CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINTS
+ CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P CONVERT_P CONNECT COORDINATOR COORDINATORS COPY COST CREATE
CROSS CSN CSV CUBE CURRENT_P
CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA
CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
@@ -841,11 +855,11 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
/* PGXC_BEGIN */
DICTIONARY DIRECT DIRECTORY DISABLE_P DISCARD DISTINCT DISTRIBUTE DISTRIBUTION DO DOCUMENT_P DOMAIN_P DOUBLE_P
/* PGXC_END */
- DROP DUPLICATE DISCONNECT
+ DROP DUPLICATE DISCONNECT DUMPFILE
- EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE END_P ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVERY EXCEPT EXCHANGE
+ EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE END_P ENDS ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE
EXCLUDE EXCLUDED EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPIRED_P EXPLAIN
- EXTENSION EXTERNAL EXTRACT
+ EXTENSION EXTERNAL EXTRACT ESCAPED
FALSE_P FAMILY FAST FENCED FETCH FIELDS FILEHEADER_P FILL_MISSING_FIELDS FILLER FILTER FIRST_P FIXED_P FLOAT_P FOLLOWING FOLLOWS_P FOR FORCE FOREIGN FORMATTER FORWARD
FEATURES // DB4AI
@@ -865,7 +879,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
KEY KILL KEY_PATH KEY_STORE
- LABEL LANGUAGE LARGE_P LAST_P LC_COLLATE_P LC_CTYPE_P LEADING LEAKPROOF
+ LABEL LANGUAGE LARGE_P LAST_P LC_COLLATE_P LC_CTYPE_P LEADING LEAKPROOF LINES
LEAST LESS LEFT LEVEL LIKE LIMIT LIST LISTEN LOAD LOCAL LOCALTIME LOCALTIMESTAMP
LOCATION LOCK_P LOCKED LOG_P LOGGING LOGIN_ANY LOGIN_FAILURE LOGIN_SUCCESS LOGOUT LOOP
MAPPING MASKING MASTER MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MERGE MINUS_P MINUTE_P MINVALUE MINEXTENTS MODE MODIFY_P MONTH_P MOVE MOVEMENT
@@ -874,7 +888,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
NOT NOTHING NOTIFY NOTNULL NOWAIT NULL_P NULLCOLS NULLIF NULLS_P NUMBER_P NUMERIC NUMSTR NVARCHAR NVARCHAR2 NVL
OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR
- ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER
+ ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER OUTFILE
PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION
/* PGXC_BEGIN */
@@ -893,11 +907,11 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
RESET RESIZE RESOURCE RESTART RESTRICT RETURN RETURNING RETURNS REUSE REVOKE RIGHT ROLE ROLES ROLLBACK ROLLUP
ROTATION ROW ROWNUM ROWS ROWTYPE_P RULE
- SAMPLE SAVEPOINT SCHEMA SCROLL SEARCH SECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES
+ SAMPLE SAVEPOINT SCHEDULE SCHEMA SCROLL SEARCH SECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES
SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHIPPABLE SHOW SHUTDOWN SIBLINGS
- SIMILAR SIMPLE SIZE SKIP SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPILL SPLIT STABLE STANDALONE_P START STARTWITH
- STATEMENT STATEMENT_ID STATISTICS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBPARTITION SUBSCRIPTION SUBSTRING
- SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR SHOW_ERRORS
+ SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPILL SPLIT STABLE STANDALONE_P START STARTS STARTWITH
+ STATEMENT STATEMENT_ID STATISTICS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBPARTITION SUBPARTITIONS SUBSCRIPTION SUBSTRING
+ SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR STARTING SHOW_ERRORS
TABLE TABLES TABLESAMPLE TABLESPACE TARGET TEMP TEMPLATE TEMPORARY TERMINATED TEXT_P THAN THEN TIME TIME_FORMAT_P TIMECAPSULE TIMESTAMP TIMESTAMP_FORMAT_P TIMESTAMPDIFF TINYINT
TO TRAILING TRANSACTION TRANSFORM TREAT TRIGGER TRIM TRUE_P
@@ -944,6 +958,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
/* Precedence: lowest to highest */
%nonassoc COMMENT
+%nonassoc FIRST_P AFTER
%nonassoc PARTIAL_EMPTY_PREC
%nonassoc CLUSTER
%nonassoc SET /* see relation_expr_opt_alias */
@@ -1175,6 +1190,10 @@ stmt :
| CreateForeignTableStmt
| CreateDataSourceStmt
| CreateFunctionStmt
+ | CreateEventStmt
+ | AlterEventStmt
+ | DropEventStmt
+ | ShowEventStmt
| CreatePackageStmt
| CreatePackageBodyStmt
| CreateGroupStmt
@@ -1999,6 +2018,7 @@ CreateSchemaStmt:
n->authid = $5;
n->hasBlockChain = $6;
n->schemaElts = $7;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE SCHEMA ColId OptBlockchainWith OptSchemaEltList
@@ -2010,6 +2030,7 @@ CreateSchemaStmt:
n->authid = NULL;
n->hasBlockChain = $4;
n->schemaElts = $5;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE SCHEMA IF_P NOT EXISTS ColId OptBlockchainWith OptSchemaEltList
@@ -2036,7 +2057,18 @@ CreateSchemaStmt:
n->hasBlockChain = $9;
n->schemaElts = $10;
$$ = (Node *)n;
- }
+ }
+ | CREATE SCHEMA ColId CharsetCollate
+ {
+ CreateSchemaStmt *n = makeNode(CreateSchemaStmt);
+ n->schemaname = $3;
+ n->authid = NULL;
+ n->hasBlockChain = false;
+ n->schemaElts = NULL;
+ n->charset = $4->charset;
+ n->collate = $4->collate;
+ $$ = (Node *)n;
+ }
;
OptSchemaName:
@@ -2077,6 +2109,18 @@ AlterSchemaStmt:
n->schemaname = $3;
n->authid = NULL;
n->hasBlockChain = $4;
+ n->charset = PG_INVALID_ENCODING;
+ n->collate = NULL;
+ $$ = (Node *)n;
+ }
+ | ALTER SCHEMA ColId CharsetCollate
+ {
+ AlterSchemaStmt *n = makeNode(AlterSchemaStmt);
+ n->schemaname = $3;
+ n->authid = NULL;
+ n->hasBlockChain = false;
+ n->charset = $4->charset;
+ n->collate = $4->collate;
$$ = (Node *)n;
}
;
@@ -2138,6 +2182,26 @@ VariableSetStmt:
n->is_local = false;
$$ = (Node *) n;
}
+ | SET GLOBAL TRANSACTION transaction_mode_list
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "SET GLOBAL TRANSACTION is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SET GLOBAL TRANSACTION is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SET GLOBAL TRANSACTION is only supported in B_FORMAT.")));
+ }
+ VariableSetStmt *n = makeNode(VariableSetStmt);
+ n->kind = VAR_SET_MULTI;
+ n->name = "GLOBAL TRANSACTION";
+ n->args = $4;
+ $$ = (Node *)n;
+ }
| SET generic_set_extension
{
#ifdef ENABLE_MULTIPLE_NODES
@@ -2147,7 +2211,7 @@ VariableSetStmt:
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("SET config_parameter = expr is not yet supported in distributed database.")));
#endif
- if (DB_IS_CMPT(B_FORMAT) && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
VariableSetStmt *n = $2;
n->is_local = false;
$$ = (Node *) n;
@@ -2168,7 +2232,7 @@ VariableSetStmt:
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("SET config_parameter = expr is not yet supported in distributed database.")));
#endif
- if (DB_IS_CMPT(B_FORMAT) && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
VariableSetStmt *n = $3;
n->is_local = false;
$$ = (Node *) n;
@@ -2418,7 +2482,7 @@ VariableMultiSetStmt: SET VariableSetElemsList
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set multiple variables is not yet supported in distributed database.")));
#endif
- if (DB_IS_CMPT(B_FORMAT) && u_sess->attr.attr_common.enable_set_variable_b_format)
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES))
{
VariableMultiSetStmt* n = makeNode(VariableMultiSetStmt);
n->args = $2;
@@ -2742,7 +2806,7 @@ set_expr_extension:
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("@var_name is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
$$ = $1;
} else {
const char* message = "@var_name is supported only in B-format database, and enable_set_variable_b_format = on.";
@@ -3176,11 +3240,35 @@ VariableShowStmt:
#endif
}
| SHOW var_name
- {
+ {
+ if(strcmp($2, "events") == 0) {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "SHOW EVENTS is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SHOW EVENTS is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "show events statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("SHOW EVENTS is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ ShowEventStmt *n = makeNode(ShowEventStmt);
+ n->from_clause = NULL;
+ n->where_clause = NULL;
+ $$ = (Node *)n;
+ } else {
VariableShowStmt *n = makeNode(VariableShowStmt);
n->name = $2;
$$ = (Node *) n;
- }
+ }
+ }
| SHOW CURRENT_SCHEMA
{
VariableShowStmt *n = makeNode(VariableShowStmt);
@@ -3568,51 +3656,60 @@ modify_column_cmds:
| modify_column_cmds ',' modify_column_cmd { $$ = lappend($$, $3); }
;
modify_column_cmd:
- ColId Typename
+ ColId Typename opt_charset ColQualList add_column_first_after
{
- AlterTableCmd *n = makeNode(AlterTableCmd);
- ColumnDef *def = makeNode(ColumnDef);
- n->subtype = AT_AlterColumnType;
- n->name = $1;
- n->def = (Node *) def;
- /* We only use these three fields of the ColumnDef node */
- def->typname = $2;
- def->collClause = NULL;
- def->raw_default = NULL;
- def->update_default = NULL;
- def->clientLogicColumnRef=NULL;
- $$ = (Node *)n;
- }
- | ColId Typename ON_UPDATE_TIME UPDATE b_expr
- {
-#ifndef ENABLE_MULTIPLE_NODES
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)
- {
- AlterTableCmd *n = makeNode(AlterTableCmd);
+ AlterTableCmd *n = (AlterTableCmd *)$5;
+ if ($4 == NULL && n->is_first == false && n->after_name == NULL && !ENABLE_MODIFY_COLUMN) {
ColumnDef *def = makeNode(ColumnDef);
- Constraint *cons = makeNode(Constraint);
n->subtype = AT_AlterColumnType;
n->name = $1;
n->def = (Node *) def;
/* We only use these three fields of the ColumnDef node */
def->typname = $2;
- def->constraints = list_make1(cons);
- cons->contype = CONSTR_DEFAULT;
- cons->location = @3;
- cons->update_expr = $5;
- cons->cooked_expr = NULL;
+ def->typname->charset = $3;
+ def->collClause = NULL;
+ def->raw_default = NULL;
+ def->update_default = NULL;
+ def->clientLogicColumnRef=NULL;
$$ = (Node *)n;
} else {
- const char* message = "on update syntax be supported dbcompatibility B.";
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "Un-support feature";
InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
ereport(errstate,
- (errmodule(MOD_PARSER),
- errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("on update syntax is supported in dbcompatibility B."),
- parser_errposition(@1)));
- $$ = NULL;
- }
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support feature"),
+ parser_errposition(@4),
+ errdetail("this modify syntax is supported only in B compatibility")));
+ }
+ ColumnDef *def = makeNode(ColumnDef);
+ def->colname = $1;
+ def->typname = $2;
+ def->typname->charset = $3;
+ def->kvtype = ATT_KV_UNDEFINED;
+ def->inhcount = 0;
+ def->is_local = true;
+ def->is_not_null = false;
+ def->is_from_type = false;
+ def->storage = 0;
+ def->cmprs_mode = ATT_CMPR_UNDEFINED;
+ def->raw_default = NULL;
+ def->cooked_default = NULL;
+ def->collOid = InvalidOid;
+ def->fdwoptions = NULL;
+ def->update_default = NULL;
+ SplitColQualList($4, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner);
+ n->subtype = AT_ModifyColumn;
+ n->name = $1;
+ n->def = (Node *)def;
+ $$ = (Node *)n;
+ }
}
| ColId NOT NULL_P opt_enable
{
@@ -3720,6 +3817,7 @@ alter_partition_cmds:
| alter_partition_cmds ',' alter_partition_cmd { $$ = lappend($1, $3); }
| move_partition_cmd { $$ = list_make1($1); }
| exchange_partition_cmd { $$ = list_make1($1); }
+ | reset_partition_cmd { $$ = list_make1($1); }
;
alter_partition_cmd:
@@ -4338,6 +4436,18 @@ exchange_partition_cmd:
}
;
+reset_partition_cmd:
+ RESET PARTITION
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+
+ n->subtype = AT_ResetPartitionno;
+ n->missing_ok = FALSE;
+ $$ = (Node *) n;
+
+ }
+ ;
+
alter_table_cmd:
/*ALTER INDEX index_name UNUSABLE*/
UNUSABLE
@@ -4375,17 +4485,17 @@ alter_table_cmd:
|
/* ALTER TABLE ADD */
- ADD_P columnDef
+ ADD_P columnDef add_column_first_after
{
- AlterTableCmd *n = makeNode(AlterTableCmd);
+ AlterTableCmd *n = (AlterTableCmd *)$3;
n->subtype = AT_AddColumn;
n->def = $2;
$$ = (Node *)n;
}
/* ALTER TABLE ADD COLUMN */
- | ADD_P COLUMN columnDef
+ | ADD_P COLUMN columnDef add_column_first_after
{
- AlterTableCmd *n = makeNode(AlterTableCmd);
+ AlterTableCmd *n = (AlterTableCmd *)$4;
n->subtype = AT_AddColumn;
n->def = $3;
$$ = (Node *)n;
@@ -4572,6 +4682,86 @@ alter_table_cmd:
{
$$ = $2;
}
+ | MODIFY_P COLUMN ColId Typename opt_charset ColQualList add_column_first_after
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support feature"),
+ parser_errposition(@1),
+ errdetail("ALTER TABLE MODIFY COLUMN syntax is supported only in B compatibility")));
+ }
+ ColumnDef *def = makeNode(ColumnDef);
+ def->colname = $3;
+ def->typname = $4;
+ def->typname->charset = $5;
+ def->kvtype = ATT_KV_UNDEFINED;
+ def->inhcount = 0;
+ def->is_local = true;
+ def->is_not_null = false;
+ def->is_from_type = false;
+ def->storage = 0;
+ def->cmprs_mode = ATT_CMPR_UNDEFINED;
+ def->raw_default = NULL;
+ def->update_default = NULL;
+ def->cooked_default = NULL;
+ def->collOid = InvalidOid;
+ def->fdwoptions = NULL;
+ SplitColQualList($6, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner);
+ AlterTableCmd *n = (AlterTableCmd *)$7;
+ n->subtype = AT_ModifyColumn;
+ n->name = $3;
+ n->def = (Node *)def;
+ $$ = (Node *)n;
+ }
+ | CHANGE opt_column ColId ColId Typename opt_charset ColQualList add_column_first_after
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support feature"),
+ parser_errposition(@1),
+ errdetail("ALTER TABLE CHANGE syntax is supported only in B compatibility")));
+ }
+ ColumnDef *def = makeNode(ColumnDef);
+ def->colname = $4;
+ def->typname = $5;
+ def->typname->charset = $6;
+ def->kvtype = ATT_KV_UNDEFINED;
+ def->inhcount = 0;
+ def->is_local = true;
+ def->is_not_null = false;
+ def->is_from_type = false;
+ def->storage = 0;
+ def->cmprs_mode = ATT_CMPR_UNDEFINED;
+ def->raw_default = NULL;
+ def->update_default = NULL;
+ def->cooked_default = NULL;
+ def->collOid = InvalidOid;
+ def->fdwoptions = NULL;
+ SplitColQualList($7, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner);
+ AlterTableCmd *n = (AlterTableCmd *)$8;
+ n->subtype = AT_ModifyColumn;
+ n->name = $3;
+ n->def = (Node *)def;
+ $$ = (Node *)n;
+ }
/* ALTER TABLE SET WITH OIDS */
| SET WITH OIDS
{
@@ -4884,6 +5074,24 @@ alter_table_cmd:
n->def = $1;
$$ = (Node *)n;
}
+ | CharsetCollate
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->subtype = AT_SetCharsetCollate;
+ n->def = (Node *)$1;
+ $$ = (Node*)n;
+ }
+ | CONVERT_P TO convert_charset opt_collate
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->subtype = AT_ConvertCharset;
+ CharsetCollateOptions *cc = makeNode(CharsetCollateOptions);
+ cc->cctype = OPT_CHARSETCOLLATE;
+ cc->charset = $3;
+ cc->collate = $4;
+ n->def = (Node *)cc;
+ $$ = (Node*)n;
+ }
/* PGXC_END */
/* table comments start */
| COMMENT opt_equal Sconst
@@ -4910,7 +5118,7 @@ opt_drop_behavior:
;
opt_collate_clause:
- COLLATE any_name
+ COLLATE collate_name
{
CollateClause *n = makeNode(CollateClause);
n->arg = NULL;
@@ -5983,7 +6191,7 @@ opt_rename:
*****************************************************************************/
CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
- OptInherit OptAutoIncrement OptWith OnCommitOption OptCompress OptPartitionElement
+ OptInherit OptAutoIncrement optCharsetCollate OptWith OnCommitOption OptCompress OptPartitionElement
/* PGXC_BEGIN */
OptDistributeBy OptSubCluster
/* PGXC_END */
@@ -5993,28 +6201,34 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
{
CreateStmt *n = makeNode(CreateStmt);
$4->relpersistence = $2;
- n->relkind = $19;
+ n->relkind = $20;
n->relation = $4;
n->tableElts = $6;
n->inhRelations = $8;
n->constraints = NIL;
- n->options = $10;
- n->oncommit = $11;
- n->row_compress = $12;
- n->tablespacename = $13;
+ n->options = $11;
+ n->oncommit = $12;
+ n->row_compress = $13;
+ n->tablespacename = $14;
n->if_not_exists = false;
/* PGXC_BEGIN */
- n->distributeby = $14;
- n->subcluster = $15;
+ n->distributeby = $15;
+ n->subcluster = $16;
/* PGXC_END */
- n->tableOptions = $16;
- n->partTableState = (PartitionState *)$17;
- n->internalData = $18;
+ n->tableOptions = $17;
+ n->partTableState = (PartitionState *)$18;
+ n->internalData = $19;
n->autoIncStart = $9;
+ if ($10 == NULL) {
+ n->charset = PG_INVALID_ENCODING;
+ } else {
+ n->charset = $10->charset;
+ n->collate = $10->collate;
+ }
$$ = (Node *)n;
}
| CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name '('
- OptTableElementList ')' OptInherit OptAutoIncrement OptWith OnCommitOption
+ OptTableElementList ')' OptInherit OptAutoIncrement optCharsetCollate OptWith OnCommitOption
OptCompress OptPartitionElement
/* PGXC_BEGIN */
OptDistributeBy OptSubCluster
@@ -6029,19 +6243,25 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
n->tableElts = $9;
n->inhRelations = $11;
n->constraints = NIL;
- n->options = $13;
- n->oncommit = $14;
- n->row_compress = $15;
- n->tablespacename = $16;
+ n->options = $14;
+ n->oncommit = $15;
+ n->row_compress = $16;
+ n->tablespacename = $17;
n->if_not_exists = true;
/* PGXC_BEGIN */
- n->distributeby = $17;
- n->subcluster = $18;
+ n->distributeby = $18;
+ n->subcluster = $19;
/* PGXC_END */
- n->tableOptions = $19;
- n->partTableState = (PartitionState *)$20;
- n->internalData = $21;
+ n->tableOptions = $20;
+ n->partTableState = (PartitionState *)$21;
+ n->internalData = $22;
n->autoIncStart = $12;
+ if ($13 == NULL) {
+ n->charset = PG_INVALID_ENCODING;
+ } else {
+ n->charset = $13->charset;
+ n->collate = $13->collate;
+ }
$$ = (Node *)n;
}
| CREATE OptTemp TABLE qualified_name OF any_name
@@ -6070,6 +6290,7 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
n->tableOptions = $14;
n->partTableState = NULL;
n->internalData = NULL;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name OF any_name
@@ -6098,6 +6319,7 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
n->tableOptions = $17;
n->partTableState = NULL;
n->internalData = NULL;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
;
@@ -6134,17 +6356,17 @@ opt_table_partitioning_clause:
;
range_partitioning_clause:
- PARTITION BY RANGE '(' column_item_list ')'
- opt_interval_partition_clause subpartitioning_clause '(' range_partition_definition_list ')' opt_row_movement_clause
+ PARTITION BY RANGE opt_columns '(' column_item_list ')'
+ opt_interval_partition_clause opt_partitions_num subpartitioning_clause '(' range_partition_definition_list ')' opt_row_movement_clause
{
PartitionState *n = makeNode(PartitionState);
- if ($8 != NULL && list_length($5) != 1) {
+ if ($10 != NULL && list_length($6) != 1) {
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Un-support feature"),
errdetail("The partition key's length should be 1.")));
}
- if ($8 != NULL && $7 != NULL) {
+ if ($10 != NULL && $8 != NULL) {
const char* message = "Un-support feature";
InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
ereport(errstate,
@@ -6153,24 +6375,32 @@ range_partitioning_clause:
errdetail("Subpartitions do not support interval partition."),
errcause("System error."), erraction("Contact engineer to support.")));
}
- n->partitionKey = $5;
- n->intervalPartDef = (IntervalPartitionDefState *)$7;
- n->partitionList = $10;
+ if ($9 > 0 && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@9),
+ errdetail("range partition with partitions clause is supported only in B compatibility")));
+ }
+ n->partitionKey = $6;
+ n->intervalPartDef = (IntervalPartitionDefState *)$8;
+ n->partitionsNum = $9;
+ n->partitionList = $12;
if (n->intervalPartDef)
n->partitionStrategy = 'i';
else
n->partitionStrategy = 'r';
- n->rowMovement = (RowMovementValue)$12;
- n->subPartitionState = (PartitionState *)$8;
+ n->rowMovement = (RowMovementValue)$14;
+ n->subPartitionState = (PartitionState *)$10;
$$ = (Node *)n;
}
;
list_partitioning_clause:
- PARTITION BY LIST '(' column_item_list ')' subpartitioning_clause
+ PARTITION BY LIST opt_columns '(' column_item_list ')' opt_partitions_num subpartitioning_clause
'(' list_partition_definition_list ')' opt_row_movement_clause
{
#ifdef ENABLE_MULTIPLE_NODES
@@ -6181,21 +6411,29 @@ list_partitioning_clause:
errmsg("Un-support feature"),
errdetail("The distributed capability is not supported currently.")));
#endif
- if (list_length($5) != 1) {
- const char* message = "Un-support feature";
- InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Un-support feature"),
- errdetail("The partition key's length should be 1.")));
+ if (list_length($6) != 1 && $9 != NULL) {
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ if ($8 > 0 && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@8),
+ errdetail("list partition with partitions clause is supported only in B compatibility")));
}
PartitionState *n = makeNode(PartitionState);
- n->partitionKey = $5;
+ n->partitionKey = $6;
n->intervalPartDef = NULL;
- n->partitionList = $9;
+ n->partitionList = $11;
n->partitionStrategy = 'l';
- n->subPartitionState = (PartitionState *)$7;
- n->rowMovement = (RowMovementValue)$11;
+ n->partitionsNum = $8;
+ n->subPartitionState = (PartitionState *)$9;
+ n->rowMovement = (RowMovementValue)$13;
$$ = (Node *)n;
@@ -6203,8 +6441,8 @@ list_partitioning_clause:
;
hash_partitioning_clause:
- PARTITION BY IDENT '(' column_item_list ')' subpartitioning_clause
- '(' hash_partition_definition_list ')' opt_row_movement_clause
+ PARTITION BY IDENT '(' column_item_list ')' opt_partitions_num subpartitioning_clause
+ opt_hash_partition_definition_list opt_row_movement_clause
{
#ifdef ENABLE_MULTIPLE_NODES
const char* message = "Un-support feature";
@@ -6233,8 +6471,9 @@ hash_partitioning_clause:
n->intervalPartDef = NULL;
n->partitionList = $9;
n->partitionStrategy = 'h';
- n->subPartitionState = (PartitionState *)$7;;
- n->rowMovement = (RowMovementValue)$11;
+ n->partitionsNum = $7;
+ n->subPartitionState = (PartitionState *)$8;
+ n->rowMovement = (RowMovementValue)$10;
int i = 0;
ListCell *elem = NULL;
List *parts = n->partitionList;
@@ -6246,6 +6485,101 @@ hash_partitioning_clause:
$$ = (Node *)n;
}
+ | PARTITION BY KEY '(' column_item_list ')' opt_partitions_num subpartitioning_clause
+ opt_hash_partition_definition_list opt_row_movement_clause
+ {
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@3),
+ errdetail("PARTITION BY KEY is supported only in B compatibility")));
+ }
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (list_length($5) != 1) {
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ PartitionState *n = makeNode(PartitionState);
+ n->partitionKey = $5;
+ n->intervalPartDef = NULL;
+ n->partitionList = $9;
+ n->partitionStrategy = 'h';
+ n->partitionsNum = $7;
+ n->subPartitionState = (PartitionState *)$8;
+ n->rowMovement = (RowMovementValue)$10;
+ int i = 0;
+ ListCell *elem = NULL;
+ List *parts = n->partitionList;
+ foreach(elem, parts) {
+ HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
+ hashPart->boundary = list_make1(makeIntConst(i, -1));
+ i++;
+ }
+ $$ = (Node *)n;
+
+ }
+ ;
+
+opt_columns:
+ COLUMNS
+ {
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@1),
+ errdetail("COLUMNS is supported only in B compatibility")));
+ }
+ $$ = NULL;
+ }
+ | /* empty */ { $$ = NULL; }
+ ;
+
+opt_partitions_num:
+ PARTITIONS Iconst
+ {
+ if ($2 <= 0) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ parser_errposition(@2),
+ errdetail("partitions number must be a positive integer")));
+ }
+ $$ = $2;
+ }
+ | /* empty */ { $$ = 0; }
+ ;
+
+opt_subpartitions_num:
+ SUBPARTITIONS Iconst
+ {
+ if ($2 <= 0) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ parser_errposition(@2),
+ errdetail("subpartitions number must be a positive integer")));
+ }
+ $$ = $2;
+ }
+ | /* empty */ { $$ = 0; }
+ ;
+
+opt_hash_partition_definition_list:
+ '(' hash_partition_definition_list ')' { $$ = $2; }
+ | /* empty */ { $$ = NIL; }
;
value_partitioning_clause:
@@ -6333,7 +6667,7 @@ list_subpartitioning_clause:
;
hash_subpartitioning_clause:
- SUBPARTITION BY IDENT '(' column_item_list ')'
+ SUBPARTITION BY IDENT '(' column_item_list ')' opt_subpartitions_num
{
#ifdef ENABLE_MULTIPLE_NODES
const char* message = "Un-support feature";
@@ -6363,10 +6697,44 @@ hash_subpartitioning_clause:
n->partitionList = NIL;
n->partitionStrategy = 'h';
n->subPartitionState = NULL;
-
+ n->partitionsNum = $7;
$$ = (Node *)n;
}
+ | SUBPARTITION BY KEY '(' column_item_list ')' opt_subpartitions_num
+ {
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@3),
+ errdetail("SUBPARTITION BY KEY is supported only in B compatibility")));
+ }
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (list_length($5) != 1) {
+ const char* message = "Un-support feature";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ PartitionState *n = makeNode(PartitionState);
+ n->partitionKey = $5;
+ n->intervalPartDef = NULL;
+ n->partitionList = NIL;
+ n->partitionStrategy = 'h';
+ n->subPartitionState = NULL;
+ n->partitionsNum = $7;
+ $$ = (Node *)n;
+ }
;
subpartition_definition_list:
@@ -6381,24 +6749,13 @@ subpartition_definition_list:
;
subpartition_item:
- SUBPARTITION name VALUES '(' expr_list ')' opt_part_options
+ SUBPARTITION name VALUES '(' listValueList ')' opt_part_options
{
ListPartitionDefState *n = makeNode(ListPartitionDefState);
n->partitionName = $2;
n->boundary = $5;
n->tablespacename = $7;
- $$ = (Node *)n;
- }
- | SUBPARTITION name VALUES '(' DEFAULT ')' opt_part_options
- {
- ListPartitionDefState *n = makeNode(ListPartitionDefState);
- n->partitionName = $2;
- Const *n_default = makeNode(Const);
- n_default->ismaxvalue = true;
- n_default->location = -1;
- n->boundary = list_make1(n_default);
- n->tablespacename = $7;
$$ = (Node *)n;
}
| SUBPARTITION name opt_part_options
@@ -6529,33 +6886,22 @@ range_less_than_list:
;
list_partition_item:
- PARTITION name VALUES '(' expr_list ')' opt_part_options
+ PARTITION name VALUES opt_in_p '(' listValueList ')' opt_part_options
{
ListPartitionDefState *n = makeNode(ListPartitionDefState);
n->partitionName = $2;
- n->boundary = $5;
- n->tablespacename = $7;
+ n->boundary = $6;
+ n->tablespacename = $8;
$$ = (Node *)n;
}
- | PARTITION name VALUES '(' DEFAULT ')' opt_part_options
+ | PARTITION name VALUES opt_in_p '(' listValueList ')' opt_part_options '(' subpartition_definition_list ')'
{
ListPartitionDefState *n = makeNode(ListPartitionDefState);
n->partitionName = $2;
- Const *n_default = makeNode(Const);
- n_default->ismaxvalue = true;
- n_default->location = -1;
- n->boundary = list_make1(n_default);
- n->tablespacename = $7;
- $$ = (Node *)n;
- }
- | PARTITION name VALUES '(' expr_list ')' opt_part_options '(' subpartition_definition_list ')'
- {
- ListPartitionDefState *n = makeNode(ListPartitionDefState);
- n->partitionName = $2;
- n->boundary = $5;
- n->tablespacename = $7;
- n->subPartitionDefState = $9;
+ n->boundary = $6;
+ n->tablespacename = $8;
+ n->subPartitionDefState = $10;
int i = 0;
ListCell *elem = NULL;
List *parts = n->subPartitionDefState;
@@ -6569,29 +6915,21 @@ list_partition_item:
}
$$ = (Node *)n;
}
- | PARTITION name VALUES '(' DEFAULT ')' opt_part_options '(' subpartition_definition_list ')'
+ ;
+
+opt_in_p:
+ IN_P
{
- ListPartitionDefState *n = makeNode(ListPartitionDefState);
- n->partitionName = $2;
- Const *n_default = makeNode(Const);
- n_default->ismaxvalue = true;
- n_default->location = -1;
- n->boundary = list_make1(n_default);
- n->tablespacename = $7;
- n->subPartitionDefState = $9;
- int i = 0;
- ListCell *elem = NULL;
- List *parts = n->subPartitionDefState;
- foreach(elem, parts) {
- if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) {
- break;
- }
- HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
- hashPart->boundary = list_make1(makeIntConst(i, -1));
- i++;
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support syntax in current compatibility"),
+ parser_errposition(@1),
+ errdetail("VALUES IN is supported only in B compatibility")));
}
- $$ = (Node *)n;
+ $$ = NULL;
}
+ | /* empty */ { $$ = NULL; }
;
hash_partition_item:
@@ -6624,25 +6962,45 @@ hash_partition_item:
}
;
+range_partition_boundary:
+ MAXVALUE
+ {
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature in current compatibility"),
+ parser_errposition(@1),
+ errdetail("MAXVALUE without parentheses is supported only in B compatibility")));
+ }
+ Const *bound = makeNode(Const);
+ bound->ismaxvalue = true;
+ bound->location = @1;
+
+ $$ = list_make1(bound);
+ }
+ | '(' maxValueList ')'
+ {
+ $$ = $2;
+ }
+ ;
+
range_less_than_item:
- PARTITION name VALUES LESS THAN
- '(' maxValueList ')' opt_part_options
+ PARTITION name VALUES LESS THAN range_partition_boundary opt_part_options
{
RangePartitionDefState *n = makeNode(RangePartitionDefState);
n->partitionName = $2;
- n->boundary = $7;
- n->tablespacename = $9;
+ n->boundary = $6;
+ n->tablespacename = $7;
$$ = (Node *)n;
}
- | PARTITION name VALUES LESS THAN
- '(' maxValueList ')' opt_part_options '(' subpartition_definition_list ')'
+ | PARTITION name VALUES LESS THAN range_partition_boundary opt_part_options '(' subpartition_definition_list ')'
{
RangePartitionDefState *n = makeNode(RangePartitionDefState);
n->partitionName = $2;
- n->boundary = $7;
- n->tablespacename = $9;
- n->subPartitionDefState = $11;
+ n->boundary = $6;
+ n->tablespacename = $7;
+ n->subPartitionDefState = $9;
int i = 0;
ListCell *elem = NULL;
List *parts = n->subPartitionDefState;
@@ -6748,6 +7106,21 @@ maxValueItem:
}
;
+listValueList:
+ expr_list
+ {
+ $$ = $1;
+ }
+ | DEFAULT
+ {
+ Const *n = makeNode(Const);
+
+ n->ismaxvalue = true;
+ n->location = @1;
+
+ $$ = list_make1(n);
+ }
+ ;
opt_row_movement_clause: ENABLE_P ROW MOVEMENT { $$ = ROWMOVEMENT_ENABLE; }
| DISABLE_P ROW MOVEMENT { $$ = ROWMOVEMENT_DISABLE; }
@@ -6842,35 +7215,76 @@ TypedTableElement:
| TableConstraint { $$ = $1; }
;
-columnDef: ColId Typename KVType ColCmprsMode create_generic_options ColQualList opt_column_options
+columnDef: ColId Typename opt_charset KVType ColCmprsMode create_generic_options ColQualList opt_column_options
{
ColumnDef *n = makeNode(ColumnDef);
n->colname = $1;
n->typname = $2;
- n->kvtype = $3;
+ n->typname->charset = $3;
+ n->kvtype = $4;
n->inhcount = 0;
n->is_local = true;
n->is_not_null = false;
n->is_from_type = false;
n->storage = 0;
- n->cmprs_mode = $4;
+ n->cmprs_mode = $5;
n->raw_default = NULL;
n->update_default = NULL;
n->cooked_default = NULL;
n->collOid = InvalidOid;
- n->fdwoptions = $5;
- if ($3 == ATT_KV_UNDEFINED) {
- SplitColQualList($6, &n->constraints, &n->collClause, &n->clientLogicColumnRef,
+ n->fdwoptions = $6;
+ if ($4 == ATT_KV_UNDEFINED) {
+ SplitColQualList($7, &n->constraints, &n->collClause, &n->clientLogicColumnRef,
yyscanner);
} else {
- SplitColQualList($6, &n->constraints, &n->collClause,
+ SplitColQualList($7, &n->constraints, &n->collClause,
yyscanner);
}
- n->columnOptions = $7;
+ n->columnOptions = $8;
$$ = (Node *)n;
}
;
+add_column_first_after:
+ FIRST_P
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "ALTER TABLE ... ADD ... FIRST is not yet supported";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("ALTER TABLE ... ADD ... FIRST is not yet supported")));
+#endif
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->is_first = true;
+ n->after_name = NULL;
+ $$ = (Node *)n;
+ }
+ | AFTER ColId
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "ALTER TABLE ... ADD ... AFTER column_name is not yet supported";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("ALTER TABLE ... ADD ... AFTER column_name is not yet supported")));
+#endif
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->is_first = false;
+ n->after_name = $2;
+ $$ = (Node *)n;
+ }
+ | /* EMPTY */
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->is_first = false;
+ n->after_name = NULL;
+ $$ = (Node *)n;
+ }
+ ;
+
KVType: TSTAG {$$ = ATT_KV_TAG;} /* tag for kv storage */
| TSFIELD {$$ = ATT_KV_FIELD;} /* field for kv storage */
| TSTIME {$$ = ATT_KV_TIMETAG;} /* field for kv storage */
@@ -6929,7 +7343,7 @@ ColConstraint:
}
| ColConstraintElem { $$ = $1; }
| ConstraintAttr { $$ = $1; }
- | COLLATE any_name
+ | COLLATE collate_name
{
/*
* Note: the CollateClause is momentarily included in
@@ -7203,7 +7617,7 @@ ColConstraintElem:
n->location = @1;
$$ = (Node *)n;
}
- | UNIQUE opt_definition OptConsTableSpaceWithEmpty InformationalConstraintElem
+ | opt_unique_key opt_definition OptConsTableSpaceWithEmpty InformationalConstraintElem
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_UNIQUE;
@@ -7215,7 +7629,7 @@ ColConstraintElem:
n->inforConstraint = (InformationalConstraint *) $4;
$$ = (Node *)n;
}
- | UNIQUE opt_definition OptConsTableSpaceWithEmpty ENABLE_P InformationalConstraintElem
+ | opt_unique_key opt_definition OptConsTableSpaceWithEmpty ENABLE_P InformationalConstraintElem
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_UNIQUE;
@@ -7303,7 +7717,7 @@ ColConstraintElem:
}
#endif
}
- | GENERATED ALWAYS AS '(' a_expr ')' STORED
+ | GENERATED ALWAYS AS '(' a_expr ')' generated_column_option
{
#ifdef ENABLE_MULTIPLE_NODES
const char* message = "Generated column is not yet supported";
@@ -7362,6 +7776,26 @@ ColConstraintElem:
$$ = (Node *)n;
}
;
+
+opt_unique_key:
+ UNIQUE { $$ = NULL; }
+ | UNIQUE KEY
+ {
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Un-support feature"),
+ parser_errposition(@1),
+ errdetail("UNIQUE KEY is supported only in B compatibility")));
+ }
+ $$ = NULL;
+ }
+ ;
+
+generated_column_option:
+ STORED { $$ = 's'; }
+ | /* EMPTY */ { $$ = '\0'; }
+ ;
/*
* ConstraintAttr represents constraint attributes, which we parse as if
@@ -12195,6 +12629,11 @@ drop_type: TABLE { $$ = OBJECT_TABLE; }
| PUBLICATION { $$ = OBJECT_PUBLICATION; }
;
+collate_name: any_name { $$ = $1; }
+ | BINARY { $$ = list_make1(makeString("binary")); }
+ | Sconst { $$ = list_make1(makeString($1)); }
+ ;
+
any_name_list:
any_name { $$ = list_make1($1); }
| any_name_list ',' any_name { $$ = lappend($1, $3); }
@@ -13703,7 +14142,7 @@ index_params: index_elem { $$ = list_make1($1); }
* expressions in parens. For backwards-compatibility reasons, we allow
* an expression that's just a function call to be written without parens.
*/
-index_elem: ColId opt_collate opt_class opt_asc_desc opt_nulls_order
+index_elem: ColId opt_collation opt_class opt_asc_desc opt_nulls_order
{
$$ = makeNode(IndexElem);
$$->name = $1;
@@ -13714,7 +14153,7 @@ index_elem: ColId opt_collate opt_class opt_asc_desc opt_nulls_order
$$->ordering = (SortByDir)$4;
$$->nulls_ordering = (SortByNulls)$5;
}
- | index_functional_expr_key opt_collate opt_class opt_asc_desc opt_nulls_order
+ | index_functional_expr_key opt_collation opt_class opt_asc_desc opt_nulls_order
{
$$ = makeNode(IndexElem);
$$->name = NULL;
@@ -13725,7 +14164,7 @@ index_elem: ColId opt_collate opt_class opt_asc_desc opt_nulls_order
$$->ordering = (SortByDir)$4;
$$->nulls_ordering = (SortByNulls)$5;
}
- | '(' a_expr ')' opt_collate opt_class opt_asc_desc opt_nulls_order
+ | '(' a_expr ')' opt_collation opt_class opt_asc_desc opt_nulls_order
{
$$ = makeNode(IndexElem);
$$->name = NULL;
@@ -13819,11 +14258,7 @@ constraint_elem: ColId con_asc_desc
;
index_functional_expr_key: col_name_keyword_nonambiguous '(' Iconst ')'
- {
- if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("prefix key is supported only in B-format database")));
- }
+ {
PrefixKey* pk = makeNode(PrefixKey);
pk->arg = (Expr*)makeColumnRef(pstrdup($1), NIL, @1, yyscanner);
pk->length = $3;
@@ -13838,8 +14273,7 @@ index_functional_expr_key: col_name_keyword_nonambiguous '(' Iconst ')'
* This syntax branch can be parsed either as a column prefix or as a function.
* In B-compatible mode, it is preferentially treated as a column prefix.
*/
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT &&
- $4 == NIL && list_length(elist) == 1 && list_length(nlist) == 1) {
+ if ($4 == NIL && list_length(elist) == 1 && list_length(nlist) == 1) {
Node* arg = (Node*)linitial(elist);
if (IsA(arg, A_Const) && ((A_Const*)arg)->val.type == T_Integer) {
PrefixKey* pk = makeNode(PrefixKey);
@@ -13874,11 +14308,39 @@ index_including_params: index_elem { $$ = list_make1($1); }
| index_including_params ',' index_elem { $$ = lappend($1, $3); }
;
-
-opt_collate: COLLATE any_name { $$ = $2; }
- | /*EMPTY*/ { $$ = NIL; }
+collate:
+ COLLATE opt_equal charset_collate_name
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "specifying character sets and collations is not yet supported";
+ InsertErrorMessage(message, u_sess->plsql_cxt.gsplsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail(specifying character sets and collations is not yet supported)));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("specifying character sets and collations is supported only in B-format database")));
+ }
+ $$ = $3;
+ }
;
+opt_collate:
+ collate { $$ = $1; }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+default_collate:
+ collate { $$ = $1; }
+ | DEFAULT collate { $$ = $2; }
+ ;
+
+opt_collation:
+ COLLATE collate_name { $$ = $2; }
+ | /*EMPTY*/ { $$ = NIL; }
+ ;
+
opt_class: any_name { $$ = $1; }
| USING any_name { $$ = $2; }
| /*EMPTY*/ { $$ = NIL; }
@@ -14095,6 +14557,811 @@ callfunc_args: func_arg_expr
$$ = lappend($1, $3);
}
;
+
+/*****************************************************************************
+ *
+ * QUERY:
+ * create [definer_name] event [IF NOT EXIST]
+ * on schedule schedule_time
+ * [ON COMPLETION [NOT] PRESERVE]
+ * [ENABLE | DISABLE | DISABLE ON SLAVE]
+ * [COMMENT 'string']
+ * do event_body
+ *
+ *****************************************************************************/
+
+ CreateEventStmt:
+ CREATE opt_or_replace definer_opt EVENT qualified_name ON SCHEDULE start_expr opt_ev_on_completion
+ opt_ev_status comment_opt DO ev_body
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "create event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("CREATE EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "create event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE EVENT is supported only in B-format database"),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ if($2) {
+ parser_yyerror("EVENT not support REPLACE function");
+ }
+ CreateEventStmt *n = makeNode(CreateEventStmt);
+ n->def_name = $3;
+ n->if_not_exists = false;
+ n->event_name = $5;
+ n->start_time_expr = $8;
+ n->end_time_expr = NULL;
+ n->interval_time = NULL;
+ n->complete_preserve = $9;
+ n->event_status = (EventStatus)$10;
+ n->event_comment_str = $11;
+ n->event_query_str = $13;
+ $$ = (Node *)n;
+ }
+ | CREATE opt_or_replace definer_opt EVENT IF_P NOT EXISTS qualified_name ON SCHEDULE start_expr opt_ev_on_completion
+ opt_ev_status comment_opt DO ev_body
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "create event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("CREATE EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "create event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ if($2) {
+ parser_yyerror("EVENT not support REPLACE function");
+ }
+ CreateEventStmt *n = makeNode(CreateEventStmt);
+ n->def_name = $3;
+ n->if_not_exists = true;
+ n->event_name = $8;
+ n->start_time_expr = $11;
+ n->end_time_expr = NULL;
+ n->interval_time = NULL;
+ n->complete_preserve = $12;
+ n->event_status = (EventStatus)$12;
+ n->event_comment_str = $14;
+ n->event_query_str = $16;
+ $$ = (Node *)n;
+ }
+ | CREATE opt_or_replace definer_opt EVENT qualified_name ON SCHEDULE EVERY every_interval start_expr end_expr opt_ev_on_completion
+ opt_ev_status comment_opt DO ev_body
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "create event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("CREATE EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "create event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ if($2) {
+ parser_yyerror("EVENT not support REPLACE function");
+ }
+ CreateEventStmt *n = makeNode(CreateEventStmt);
+ n->def_name = $3;
+ n->if_not_exists = false;
+ n->event_name = $5;
+ n->start_time_expr = $10;
+ n->end_time_expr = $11;
+ n->interval_time = $9;
+ n->complete_preserve = $12;
+ n->event_status = (EventStatus)$13;
+ n->event_comment_str = $14;
+ n->event_query_str = $16;
+ $$ = (Node *)n;
+ }
+ | CREATE opt_or_replace definer_opt EVENT IF_P NOT EXISTS qualified_name ON SCHEDULE EVERY every_interval start_expr end_expr opt_ev_on_completion
+ opt_ev_status comment_opt DO ev_body
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "create event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("CREATE EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "create event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ if($2) {
+ parser_yyerror("EVENT not support REPLACE function");
+ }
+ CreateEventStmt *n = makeNode(CreateEventStmt);
+ n->def_name = $3;
+ n->if_not_exists = true;
+ n->event_name = $8;
+ n->start_time_expr = $13;
+ n->end_time_expr = $14;
+ n->interval_time = $12;
+ n->complete_preserve = $15;
+ n->event_status = (EventStatus)$16;
+ n->event_comment_str = $17;
+ n->event_query_str = $19;
+ $$ = (Node *)n;
+ }
+ ;
+
+
+definer_opt:
+ DEFINER '=' user { $$ = $3; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+user:
+ ColId { $$ = $1; }
+ ;
+
+every_interval:
+ Iconst opt_interval
+ {
+ TypeName *t;
+ t = SystemTypeName("interval");
+ t->typmods = $2;
+ Node *num = makeIntConst($1, @1);
+ $$ = makeTypeCast(num, t, -1);
+ }
+ | Sconst opt_interval
+ {
+ TypeName *t;
+ t = SystemTypeName("interval");
+ t->typmods = $2;
+ Node *num = makeStringConst($1, @1);
+ $$ = makeTypeCast(num, t, -1);
+ }
+ | FCONST opt_interval
+ {
+ TypeName *t;
+ t = SystemTypeName("interval");
+ t->typmods = $2;
+ Node *num = makeStringConst($1, @1);
+ $$ = makeTypeCast(num, t, -1);
+ }
+ ;
+
+start_expr: STARTS ev_timeexpr
+ { $$ = $2; }
+ | AT ev_timeexpr
+ { $$ = $2; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+end_expr: ENDS ev_timeexpr
+ { $$ = $2; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+ev_timeexpr: initime { $$ = $1; }
+ | initime '+' interval_list
+ { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "+", $1, $3, @2);}
+ | initime '-' interval_list
+ { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "-", $1, $3, @2);}
+ ;
+
+interval_list: interval_cell { $$ = $1; }
+ | interval_list '+' interval_list
+ { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "+", $1, $3, @2); }
+ | interval_list '-' interval_list
+ { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "-", $1, $3, @2); }
+ ;
+
+interval_cell: ConstInterval ICONST opt_interval
+ {
+ char* a = NULL;
+ a = (char*)palloc(8);
+ pg_itoa($2, a);
+ TypeName *t = $1;
+ t->typmods = $3;
+ $$ = makeStringConstCast(a, @2, t);
+ }
+ | ConstInterval Sconst opt_interval
+ {
+ TypeName *t = $1;
+ t->typmods = $3;
+ $$ = makeStringConstCast($2, @2, t);
+ }
+ | ConstInterval FCONST opt_interval
+ {
+ $$ = makeFloatConst($2, @2);
+ }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+initime: interval_intexpr {$$ = $1; }
+ | functime_expr {$$ = $1; }
+ | functime_app {$$ = $1; }
+ ;
+
+functime_app: IDENT '(' ')'
+ {
+ FuncCall *n = makeNode(FuncCall);
+
+ n->funcname = list_make1(makeString($1));
+ n->args = NIL;
+ n->agg_order = NIL;
+ n->agg_star = FALSE;
+ n->agg_distinct = FALSE;
+ n->func_variadic = FALSE;
+ n->over = NULL;
+ n->location = @1;
+ n->call_func = false;
+ $$ = (Node *)n;
+ }
+ | IDENT '(' func_arg_list ')'
+ {
+ FuncCall *n = makeNode(FuncCall);
+ n->funcname = list_make1(makeString($1));
+ n->args = $3;
+ n->agg_order = NIL;
+ n->agg_star = FALSE;
+ n->agg_distinct = FALSE;
+ n->func_variadic = FALSE;
+ n->over = NULL;
+ n->location = @1;
+ n->call_func = false;
+ $$ = (Node *)n;
+ }
+ ;
+
+functime_expr:
+ CURRENT_TIMESTAMP
+ {
+ FuncCall *n = makeNode(FuncCall);
+ n->funcname = SystemFuncName("pg_systimestamp");
+ n->args = NIL;
+ n->agg_order = NIL;
+ n->agg_star = FALSE;
+ n->agg_distinct = FALSE;
+ n->func_variadic = FALSE;
+ n->over = NULL;
+ n->location = @1;
+ n->call_func = false;
+ $$ = (Node *)n;
+ }
+ | CURRENT_TIMESTAMP '(' Iconst ')'
+ {
+ Node *n;
+ TypeName *d;
+ n = makeStringConstCast("now", -1, SystemTypeName("text"));
+ d = SystemTypeName("timestamptz");
+ d->typmods = list_make1(makeIntConst($3, @3));
+ $$ = makeTypeCast(n, d, @1);
+ }
+ | LOCALTIMESTAMP
+ {
+ Node *n;
+ n = makeStringConstCast("now", -1, SystemTypeName("text"));
+ $$ = makeTypeCast(n, SystemTypeName("timestamp"), @1);
+ }
+ | LOCALTIMESTAMP '(' Iconst ')'
+ {
+ Node *n;
+ TypeName *d;
+ n = makeStringConstCast("now", -1, SystemTypeName("text"));
+ d = SystemTypeName("timestamp");
+ d->typmods = list_make1(makeIntConst($3, @3));
+ $$ = makeTypeCast(n, d, @1);
+ }
+ | SYSDATE
+ {
+ FuncCall *n = makeNode(FuncCall);
+ n->funcname = SystemFuncName("sysdate");
+ n->args = NIL;
+ n->agg_order = NIL;
+ n->agg_star = FALSE;
+ n->agg_distinct = FALSE;
+ n->func_variadic = FALSE;
+ n->over = NULL;
+ n->location = @1;
+ n->call_func = false;
+ $$ = (Node *)n;
+ }
+ ;
+
+interval_intexpr:
+ Sconst
+ {
+ $$ = makeStringConstCast($1, @1, SystemTypeName("timestamp"));
+ }
+ ;
+
+opt_ev_on_completion:
+ ON COMPLETION PRESERVE { $$ = FALSE; }
+ | ON COMPLETION NOT PRESERVE { $$ = TRUE; }
+ | /*EMPTY*/ { $$ = TRUE; }
+ ;
+
+opt_ev_status:
+ ENABLE_P { $$ = EVENT_ENABLE; }
+ | DISABLE_P { $$ = EVENT_DISABLE; }
+ | DISABLE_P ON SLAVE { $$ = EVENT_DISABLE_ON_SLAVE; }
+ | /*EMPTY*/ { $$ = EVENT_ENABLE; }
+ ;
+
+
+
+comment_opt: COMMENT SCONST { $$ = $2; }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+ev_body: {
+ char *ev_body_str = NULL;
+ int ev_body_len = 0;
+ int tok = YYEMPTY;
+ int proc_b = 0;
+ int proc_e = 0;
+ int rc = 0;
+ int pre_tok = 0;
+ int ploc = 0;
+ int pre_loc = 0;
+ int next_tok = 0;
+ int flag = 0;
+ rc = CompileWhich();
+ base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner);
+ if (yychar == YYEOF || yychar == YYEMPTY) {
+ tok = YYLEX;
+ }
+ else {
+ tok = yychar;
+ yychar = YYEMPTY;
+ }
+ proc_b = yylloc;
+ /* start event body scan */
+ while (true) {
+ if(tok == ';') {
+ ++flag;
+ }
+
+ if(tok == YYEOF ) {
+ tok = YYLEX;
+ if(flag) {
+ proc_e = pre_loc;
+ } else {
+ proc_e = yylloc;
+ }
+ break;
+ }
+ ploc = yylloc;
+ if(tok != 0) {
+ pre_loc = ploc;
+ }
+ pre_tok = tok;
+ tok = YYLEX;
+ }
+ if (proc_e == 0) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("event body is not ended correctly.")));
+ }
+ ev_body_len = proc_e - proc_b + 1;
+ ev_body_str = (char*)palloc0(ev_body_len + 1);
+ rc = strncpy_s(ev_body_str, ev_body_len + 1, yyextra->core_yy_extra.scanbuf + proc_b - 1, ev_body_len);
+ securec_check(rc, "\0", "\0");
+ ev_body_str[ev_body_len] = '\0';
+ /* Reset the flag which mark whether we are in slash proc. */
+ yyextra->core_yy_extra.in_slash_proc_body = false;
+ yyextra->core_yy_extra.dolqstart = NULL;
+ $$ = ev_body_str;
+ }
+ ;
+
+
+/*****************************************************************************
+ *
+ * QUERY:
+ * ALTER
+ * [DEFINER = user]
+ * EVENT event_name
+ * [ON SCHEDULE schedule]
+ * [ON COMPLETION [NOT] PRESERVE]
+ * [RENAME TO new_event_name]
+ * [ENABLE | DISABLE | DISABLE ON SLAVE]
+ * [COMMENT 'string']
+ * [DO event_body]
+ *
+ *****************************************************************************/
+AlterEventStmt:
+ ALTER definer_name_opt EVENT qualified_name preserve_opt rename_opt status_opt comments_opt action_opt
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "alter event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ALTER EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "alter event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("ALTER EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ AlterEventStmt *n = makeNode(AlterEventStmt);
+ n->def_name = $2;
+ n->event_name = $4;
+ n->start_time_expr = NULL;
+ n->end_time_expr = NULL;
+ n->interval_time = NULL;
+ n->complete_preserve = $5;
+ n->new_name = $6;
+ n->event_status = $7;
+ n->event_comment_str = $8;
+ n->event_query_str = $9;
+ $$ = (Node*)n;
+ }
+ | ALTER definer_name_opt EVENT qualified_name ON SCHEDULE AT ev_timeexpr preserve_opt
+ rename_opt status_opt comments_opt action_opt
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "alter event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ALTER EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "alter event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("ALTER EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ AlterEventStmt *n = makeNode(AlterEventStmt);
+ n->def_name = $2;
+ n->event_name = $4;
+ n->start_time_expr = makeDefElem("start_date", (Node *)$8);
+ n->end_time_expr = NULL;
+ n->interval_time = makeDefElem("repeat_interval", NULL);
+ n->complete_preserve = $9;
+ n->new_name = $10;
+ n->event_status = $11;
+ n->event_comment_str = $12;
+ n->event_query_str = $13;
+ $$ = (Node*)n;
+ }
+ | ALTER definer_name_opt EVENT qualified_name ON SCHEDULE EVERY every_interval start_opt
+ end_opt preserve_opt rename_opt status_opt comments_opt action_opt
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "alter event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ALTER EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "alter event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("ALTER EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+
+ }
+ AlterEventStmt *n = makeNode(AlterEventStmt);
+ n->def_name = $2;
+ n->event_name = $4;
+ n->start_time_expr = $9;
+ n->end_time_expr = $10;
+ n->interval_time = makeDefElem("repeat_interval", (Node *)$8);
+ n->complete_preserve = $11;
+ n->new_name = $12;
+ n->event_status = $13;
+ n->event_comment_str = $14;
+ n->event_query_str = $15;
+ $$ = (Node*)n;
+ }
+ ;
+
+definer_name_opt: DEFINER '=' user
+ {
+ $$ = makeDefElem("owner", (Node *)makeString($3));
+ }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+
+end_opt: ENDS ev_timeexpr
+ {
+ $$ = makeDefElem("end_date", (Node *)$2);
+ }
+ |/*EMPTY*/ { $$ = NULL; }
+ ;
+
+start_opt:
+ STARTS ev_timeexpr
+ {
+ $$ = makeDefElem("start_date", (Node *)$2);
+ }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+preserve_opt: ON COMPLETION PRESERVE
+ {
+ $$ = makeDefElem("auto_drop", (Node *)makeInteger(0));
+ }
+ | ON COMPLETION NOT PRESERVE
+ {
+ $$ = makeDefElem("auto_drop", (Node *)makeInteger(1));
+ }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+rename_opt: RENAME TO qualified_name
+ {
+ $$ = makeDefElem("rename", (Node *)$3);
+ }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+status_opt: ENABLE_P {$$ = makeDefElem("enabled", (Node *)makeInteger(0));}
+ | DISABLE_P {$$ = makeDefElem("enabled", (Node *)makeInteger(1));}
+ | DISABLE_P ON SLAVE{$$ = makeDefElem("enabled", (Node *)makeInteger(2));}
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+comments_opt: COMMENT Sconst {$$ = makeDefElem("comments", (Node *)makeString($2));}
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+action_opt: DO ev_body {$$ = makeDefElem("program_action", (Node *)makeString($2));}
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+
+
+/*****************************************************************************
+ *
+ * QUERY:
+ * DROP EVENT [IF EXISTS] event_name
+ *
+ *****************************************************************************/
+DropEventStmt:
+ DROP EVENT IF_P EXISTS qualified_name
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "drop event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("DROP EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "drop event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("DROP EVENT is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ DropEventStmt *n = makeNode(DropEventStmt);
+ n->missing_ok = true;
+ n->event_name = $5;
+ $$ = (Node*)n;
+ }
+ | DROP EVENT qualified_name
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "drop event statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("DROP EVENT is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "drop event statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("DROP EVENT is supported only in B-format database"),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ DropEventStmt *n = makeNode(DropEventStmt);
+ n->missing_ok = false;
+ n->event_name = $3;
+ $$ = (Node*)n;
+ }
+ ;
+
+/*****************************************************************************
+ *
+ * QUERY:
+ * SHOW EVENTS
+ * [{FROM | IN} schema_name]
+ * [LIKE 'pattern' | WHERE expr]
+ *
+ *****************************************************************************/
+
+ShowEventStmt:
+ SHOW EVENTS event_from_clause event_where_clause
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "show events statement is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SHOW EVENTS is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "show events statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("SHOW EVENTS is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ ShowEventStmt *n = makeNode(ShowEventStmt);
+ n->from_clause = $3;
+ n->where_clause = $4;
+ $$ = (Node*)n;
+ }
+ | SHOW EVENTS event_from_clause
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "show events is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SHOW EVENTS is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "show events statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("SHOW EVENTS is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ ShowEventStmt *n = makeNode(ShowEventStmt);
+ n->from_clause = $3;
+ n->where_clause = NULL;
+ $$ = (Node*)n;
+ }
+ | SHOW EVENTS event_where_clause
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "show events is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SHOW EVENTS is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ const char* message = "show events statement is only supported in B format.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate, (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("SHOW EVENTS is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;
+ }
+ ShowEventStmt *n = makeNode(ShowEventStmt);
+ n->from_clause = NULL;
+ n->where_clause = $3;
+ $$ = (Node*)n;
+ }
+ ;
+
+
+event_from_clause:
+ FROM ColId
+ {
+ $$ = makeStringConst($2, @2);
+ }
+ | IN_P ColId
+ {
+ $$ = makeStringConst($2, @2);
+ }
+ ;
+
+event_where_clause:
+ WHERE ev_where_body
+ {
+ $$ = $2;
+ }
+ | LIKE Sconst
+ {
+ errno_t rc = EOK;
+ char* event_where_str;
+ event_where_str = (char *)palloc(1024 + strlen($2));
+ rc = strcpy_s(event_where_str, 1024 + strlen($2), "job_name like '");
+ securec_check(rc, "\0", "\0");
+ rc = strcat_s(event_where_str, 1024 + strlen($2), $2);
+ securec_check(rc, "\0", "\0");
+ rc = strcat_s(event_where_str, 1024 + strlen($2), "\'");
+ securec_check(rc, "\0", "\0");
+ $$ = event_where_str;
+ }
+ ;
+
+ev_where_body: {
+ char *ev_body_str = NULL;
+ int ev_body_len = 0;
+ int tok = YYEMPTY;
+ int proc_b = 0;
+ int proc_e = 0;
+ int rc = 0;
+ int pre_tok = 0;
+ int ploc = 0;
+ int pre_loc = 0;
+ int next_tok = 0;
+ int flag = 0;
+ rc = CompileWhich();
+ base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner);
+ if (yychar == YYEOF || yychar == YYEMPTY) {
+ tok = YYLEX;
+ }
+ else {
+ tok = yychar;
+ yychar = YYEMPTY;
+ }
+ proc_b = yylloc;
+ /* start event body scan */
+ while (true) {
+ if(tok == ';') {
+ ++flag;
+ }
+
+ if(tok == YYEOF ) {
+ tok = YYLEX;
+ if(flag) {
+ proc_e = pre_loc;
+ } else {
+ proc_e = yylloc;
+ }
+ break;
+ }
+ ploc = yylloc;
+ if(tok != 0) {
+ pre_loc = ploc;
+ }
+ pre_tok = tok;
+ tok = YYLEX;
+ }
+ if (proc_e == 0) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("where body is not ended correctly.")));
+ }
+ ev_body_len = proc_e - proc_b + 1;
+ ev_body_str = (char*)palloc0(ev_body_len + 1);
+ rc = strncpy_s(ev_body_str, ev_body_len + 1, yyextra->core_yy_extra.scanbuf + proc_b - 1, ev_body_len);
+ securec_check(rc, "\0", "\0");
+ ev_body_str[ev_body_len] = '\0';
+ /* Reset the flag which mark whether we are in slash proc. */
+ yyextra->core_yy_extra.in_slash_proc_body = false;
+ yyextra->core_yy_extra.dolqstart = NULL;
+ $$ = ev_body_str;
+ }
+ ;
+
CreateProcedureStmt:
CREATE opt_or_replace definer_user PROCEDURE func_name_opt_arg proc_args
opt_createproc_opt_list as_is {
@@ -20329,7 +21596,7 @@ PreparableStmt:
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("@var_name is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
$$ = $1;
} else {
const char* message = "@var_name is supported only in B-format database, and enable_set_variable_b_format = on.";
@@ -21321,26 +22588,11 @@ select_no_parens:
(Node*)list_nth($4, 0), (Node*)list_nth($4, 1),
NULL,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $1;
- if ($5 != NULL) {
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $5;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- }
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $5);
}
| select_clause opt_sort_clause select_limit opt_for_locking_clause
{
- FilterStartWithUseCases((SelectStmt *) $1, $4, yyscanner, @4);
+ FilterStartWithUseCases((SelectStmt *) $1, $4, yyscanner, @4);
insertSelectOptions((SelectStmt *) $1, $2, $4,
(Node*)list_nth($3, 0), (Node*)list_nth($3, 1),
NULL,
@@ -21354,20 +22606,7 @@ select_no_parens:
(Node*)list_nth($3, 0), (Node*)list_nth($3, 1),
NULL,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $1;
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $5;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $5);
}
| select_clause opt_sort_clause opt_select_limit into_clause opt_for_locking_clause
{
@@ -21376,20 +22615,7 @@ select_no_parens:
(Node*)list_nth($3, 0), (Node*)list_nth($3, 1),
NULL,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $1;
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $4;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $4);
}
| with_clause select_clause
{
@@ -21414,70 +22640,29 @@ select_no_parens:
(Node*)list_nth($5, 0), (Node*)list_nth($5, 1),
$1,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $2;
- if ($6 != NULL) {
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $6;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- }
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $6);
}
| with_clause select_clause opt_sort_clause select_limit for_locking_clause into_clause
{
- FilterStartWithUseCases((SelectStmt *) $2, $5, yyscanner, @5);
+ FilterStartWithUseCases((SelectStmt *) $2, $5, yyscanner, @5);
insertSelectOptions((SelectStmt *) $2, $3, $5,
(Node*)list_nth($4, 0), (Node*)list_nth($4, 1),
$1,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $2;
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $6;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $6);
}
| with_clause select_clause opt_sort_clause opt_select_limit into_clause opt_for_locking_clause
{
- FilterStartWithUseCases((SelectStmt *) $2, $6, yyscanner, @6);
+ FilterStartWithUseCases((SelectStmt *) $2, $6, yyscanner, @6);
insertSelectOptions((SelectStmt *) $2, $3, $6,
(Node*)list_nth($4, 0), (Node*)list_nth($4, 1),
$1,
yyscanner);
- SelectStmt *stmt = (SelectStmt *) $2;
- if (stmt->intoClause != NULL) {
- ereport(errstate,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("select statement can contain only one into_clause")));
- }
- IntoClause *itc = (IntoClause *) $5;
- if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
- ereport(errstate,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("into new table here only support in B-format database")));
- }
- stmt->intoClause = itc;
- $$ = (Node *)stmt;
+ $$ = processIntoClauseInSelectStmt((SelectStmt *) $1, (IntoClause *) $5);
}
| with_clause select_clause opt_sort_clause select_limit opt_for_locking_clause
{
- FilterStartWithUseCases((SelectStmt *) $2, $5, yyscanner, @5);
+ FilterStartWithUseCases((SelectStmt *) $2, $5, yyscanner, @5);
insertSelectOptions((SelectStmt *) $2, $3, $5,
(Node*)list_nth($4, 0), (Node*)list_nth($4, 1),
$1,
@@ -21669,8 +22854,108 @@ into_clause:
$$->relkind = INTO_CLAUSE_RELKIND_DEFAULT;
$$->userVarList = $2;
}
- ;
-
+ | INTO OUTFILE Sconst characterset_option fields_options_fin lines_options_fin
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "SELECT INTO OUTFILE is not yet supported in distributed database.";
+ InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT INTO OUTFILE is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT INTO OUTFILE is only supported in B_FORMAT.")));
+ }
+ IntoClause *n = makeNode(IntoClause);
+ n->filename = $3;
+ n->copyOption = $5;
+ n->is_outfile = true;
+ if ($4)
+ n->copyOption = lappend(n->copyOption, $4);
+ if ($6)
+ n->copyOption = lappend3(n->copyOption, $6);
+ n->rel = NULL;
+ n->colNames = NIL;
+ n->options = NIL;
+ n->onCommit = ONCOMMIT_NOOP;
+ n->row_compress = REL_CMPRS_PAGE_PLAIN;
+ n->tableSpaceName = NULL;
+ n->skipData = false;
+ n->relkind = INTO_CLAUSE_RELKIND_DEFAULT;
+ $$ = n;
+ }
+ | INTO DUMPFILE Sconst
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "SELECT INTO DUMPFILE is not yet supported in distributed database.";
+ gsplsql_insert_error_msg(message, u_sess->plsql_cxt.plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT INTO DUMPFILE is not yet supported in distributed database.")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT INTO DUMPFILE is only supported in B_FORMAT.")));
+ }
+ $$ = makeNode(IntoClause);
+ $$->is_outfile = false;
+ $$->filename = $3;
+ }
+ ;
+characterset_option:
+ CHARACTER SET Sconst
+ {
+ $$ = makeDefElem("encoding", (Node *)makeString($3));
+ }
+ | /*EMPTY*/ { $$ = NULL; }
+ ;
+fields_options_fin:
+ FIELDS fields_options_list { $$ = $2; }
+ | /*EMPTY*/ { $$ = NIL; }
+ ;
+fields_options_list:
+ fields_options_list fields_options_item { $$ = lappend($1, $2); }
+ | /*EMPTY*/ { $$ = NIL; }
+ ;
+fields_options_item:
+ TERMINATED BY Sconst
+ {
+ $$ = makeDefElem("delimiter", (Node *)makeString($3));
+ }
+ | OPTIONALLY ENCLOSED BY Sconst
+ {
+ $$ = makeDefElem("o_enclosed", (Node *)makeString($4));
+ }
+ | ENCLOSED BY Sconst
+ {
+ $$ = makeDefElem("enclosed", (Node *)makeString($3));
+ }
+ | ESCAPED BY Sconst
+ {
+ $$ = makeDefElem("escape", (Node *)makeString($3));
+ }
+ ;
+lines_options_fin:
+ LINES lines_options_list { $$ = $2; }
+ | /*EMPTY*/ { $$ = NIL; }
+ ;
+lines_options_list:
+ lines_options_list lines_option_item { $$ = lappend($1, $2); }
+ | /*EMPTY*/ { $$ = NIL; }
+ ;
+lines_option_item:
+ STARTING BY Sconst
+ {
+ $$ = makeDefElem("line_start", (Node *)makeString($3));
+ }
+ | TERMINATED BY Sconst
+ {
+ $$ = makeDefElem("eol", (Node *)makeString($3));
+ }
+ ;
into_user_var_list:
uservar_name { $$ = list_make1($1); }
| into_user_var_list ',' uservar_name { $$ = lappend($1,$3); }
@@ -23193,38 +24478,16 @@ ConstCharacter: CharacterWithLength
}
;
-CharacterWithLength: character '(' Iconst ')' opt_charset
+CharacterWithLength: character '(' Iconst ')'
{
- if (($5 != NULL) && (strcmp($5, "sql_text") != 0))
- {
- char *type;
-
- type = (char *)palloc(strlen($1) + 1 + strlen($5) + 1);
- strcpy(type, $1);
- strcat(type, "_");
- strcat(type, $5);
- $1 = type;
- }
-
$$ = SystemTypeName($1);
$$->typmods = list_make1(makeIntConst($3, @3));
$$->location = @1;
}
;
-CharacterWithoutLength: character opt_charset
+CharacterWithoutLength: character
{
- if (($2 != NULL) && (strcmp($2, "sql_text") != 0))
- {
- char *type;
-
- type = (char *)palloc(strlen($1) + 1 + strlen($2) + 1);
- strcpy(type, $1);
- strcat(type, "_");
- strcat(type, $2);
- $1 = type;
- }
-
$$ = SystemTypeName($1);
/* char defaults to char(1), varchar to no limit */
@@ -23260,9 +24523,123 @@ opt_varying:
| /*EMPTY*/ { $$ = FALSE; }
;
+character_set:
+ CHARACTER SET
+ | CHARSET
+ ;
+
+charset_collate_name:
+ ColId { $$ = $1; }
+ | BINARY { $$ = pg_strdup($1); }
+ | Sconst { $$ = $1; }
+ ;
+
+charset:
+ character_set charset_collate_name
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "specifying character sets and collations is not yet supported";
+ InsertErrorMessage(message, u_sess->plsql_cxt.gsplsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("specifying character sets and collations is not yet supported")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("specifying character sets and collations is supported only in B-format database")));
+ }
+ int encoding = pg_valid_server_encoding($2);
+ if (encoding < 0)
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("%s is not a valid encoding name", $2),
+ parser_errposition(@2)));
+ $$ = encoding;
+ }
+ ;
+
+convert_charset:
+ charset
+ {
+ $$ = $1;
+ }
+ | character_set DEFAULT
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ const char* message = "specifying character sets and collations is not yet supported";
+ InsertErrorMessage(message, u_sess->plsql_cxt.gsplsql_yylloc);
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("specifying character sets and collations is not yet supported")));
+#endif
+ if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("specifying character sets and collations is supported only in B-format database")));
+ }
+ $$ = PG_INVALID_ENCODING;
+ }
+ ;
+
opt_charset:
- CHARACTER SET ColId { $$ = $3; }
- | /*EMPTY*/ { $$ = NULL; }
+ charset
+ {
+ $$ = $1;
+ }
+ | /*EMPTY*/
+ {
+ $$ = PG_INVALID_ENCODING;
+ }
+ ;
+
+default_charset:
+ DEFAULT character_set opt_equal charset_collate_name
+ {
+ int encoding = pg_valid_server_encoding($4);
+ if (encoding < 0)
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("%s is not a valid encoding name", $4),
+ parser_errposition(@4)));
+ $$ = encoding;
+ }
+ | character_set opt_equal charset_collate_name
+ {
+ int encoding = pg_valid_server_encoding($3);
+ if (encoding < 0)
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("%s is not a valid encoding name", $3),
+ parser_errposition(@3)));
+ $$ = encoding;
+ }
+ ;
+
+optCharsetCollate:
+ CharsetCollate { $$ = $1; }
+ | /* EMPTY */ { $$ = NULL;}
+ ;
+
+CharsetCollate:
+ charset_collate
+ {
+ $$ = MakeCharsetCollateOptions(NULL, $1);
+ }
+ | CharsetCollate charset_collate
+ {
+ $$ = MakeCharsetCollateOptions($1, $2);
+ }
+ ;
+
+charset_collate:
+ default_charset
+ {
+ CharsetCollateOptions *n = (CharsetCollateOptions*)palloc0(sizeof(CharsetCollateOptions));
+ n->cctype = OPT_CHARSET;
+ n->charset = $1;
+ $$ = n;
+ }
+ | default_collate
+ {
+ CharsetCollateOptions *n = (CharsetCollateOptions*)palloc0(sizeof(CharsetCollateOptions));
+ n->cctype = OPT_COLLATE;
+ n->collate = $1;
+ $$ = n;
+ }
;
/*
@@ -23527,7 +24904,7 @@ a_expr: c_expr { $$ = $1; }
}
| a_expr TYPECAST Typename
{ $$ = makeTypeCast($1, $3, @2); }
- | a_expr COLLATE any_name
+ | a_expr COLLATE collate_name
{
CollateClause *n = makeNode(CollateClause);
n->arg = $1;
@@ -23612,7 +24989,7 @@ a_expr: c_expr { $$ = $1; }
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("@var_name := expr is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
UserSetElem *n = makeNode(UserSetElem);
n->name = list_make1((Node *)$1);
n->val = (Expr *)$3;
@@ -24317,7 +25694,7 @@ c_expr: columnref %prec UMINUS { $$ = $1; }
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("@var_name is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
$$ = $1;
} else {
const char* message = "@var_name is supported only in B-format database, and enable_set_variable_b_format = on.";
@@ -24562,16 +25939,6 @@ func_expr: func_application within_group_clause over_clause
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("group_concat is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT)
- {
- const char* message = "group_concat is supported only in B-format database";
- InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
- ereport(errstate,
- (errmodule(MOD_PARSER),
- errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("group_concat is supported only in B-format database"),
- parser_errposition(@1)));
- }
WindowDef *wd = (WindowDef*) $3;
if (wd != NULL) {
ereport(errstate,
@@ -24591,16 +25958,6 @@ func_expr: func_application within_group_clause over_clause
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("group_concat is not yet supported in distributed database.")));
#endif
- if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT)
- {
- const char* message = "group_concat is supported only in B-format database";
- InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc);
- ereport(errstate,
- (errmodule(MOD_PARSER),
- errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("group_concat is supported only in B-format database"),
- parser_errposition(@1)));
- }
$$ = $1;
}
| func_expr_common_subexpr
@@ -26007,6 +27364,7 @@ case_expr: CASE case_arg when_clause_list case_default END_P
c->arg = NULL;
c->args = NULL;
c->defresult = NULL;
+ c->fromDecode = true;
foreach(cell,$5)
{
@@ -26656,6 +28014,7 @@ unreserved_keyword:
| CASCADED
| CATALOG_P
| CHAIN
+ | CHANGE
| CHARACTERISTICS
| CHARACTERSET
| CHECKPOINT
@@ -26669,12 +28028,14 @@ unreserved_keyword:
| CLUSTER
| COLUMN_ENCRYPTION_KEY
| COLUMN_ENCRYPTION_KEYS
+ | COLUMNS
| COMMENT
| COMMENTS
| COMMIT
| COMMITTED
| COMPATIBLE_ILLEGAL_CHARS
| COMPLETE
+ | COMPLETION
| COMPRESS
| CONDITION
| CONFIGURATION
@@ -26686,6 +28047,7 @@ unreserved_keyword:
| CONTINUE_P
| CONTVIEW
| CONVERSION_P
+ | CONVERT_P
| COORDINATOR
| COORDINATORS
| COPY
@@ -26728,6 +28090,7 @@ unreserved_keyword:
| DOMAIN_P
| DOUBLE_P
| DROP
+ | DUMPFILE
| DUPLICATE
| EACH
| ELASTIC
@@ -26738,12 +28101,16 @@ unreserved_keyword:
| ENCRYPTED_VALUE
| ENCRYPTION
| ENCRYPTION_TYPE
+ | ENDS
| ENFORCED
| ENUM_P
| EOL
| ERRORS
| ESCAPE
+ | ESCAPED
| ESCAPING
+ | EVENT
+ | EVENTS
| EVERY
| EXCHANGE
| EXCLUDE
@@ -26820,6 +28187,7 @@ unreserved_keyword:
| LC_CTYPE_P
| LEAKPROOF
| LEVEL
+ | LINES
| LIST
| LISTEN
| LOAD
@@ -26877,6 +28245,7 @@ unreserved_keyword:
| OPTIONALLY
| OPTIONS
| OVER
+ | OUTFILE
| OWNED
| OWNER
| PACKAGE
@@ -26960,6 +28329,7 @@ unreserved_keyword:
| RULE
| SAMPLE
| SAVEPOINT
+ | SCHEDULE
| SCHEMA
| SCROLL
| SEARCH
@@ -26981,6 +28351,7 @@ unreserved_keyword:
| SIMPLE
| SIZE
| SKIP
+ | SLAVE
| SLICE
| SMALLDATETIME_FORMAT_P
| SNAPSHOT
@@ -26991,6 +28362,8 @@ unreserved_keyword:
| STABLE
| STANDALONE_P
| START
+ | STARTING
+ | STARTS
| STATEMENT
| STATEMENT_ID
| STATISTICS
@@ -27004,6 +28377,7 @@ unreserved_keyword:
| STRICT_P
| STRIP_P
| SUBPARTITION
+ | SUBPARTITIONS
| SUBSCRIPTION
| SYNONYM
| SYSID
@@ -28240,6 +29614,7 @@ makeNodeDecodeCondtion(Expr* firstCond,Expr* secondCond)
c->args = NULL;
c->args = lappend(c->args,w);
c->defresult = (Expr*)equal_oper;
+ c->fromDecode = true;
return (Expr*)c;
}
@@ -29263,6 +30638,22 @@ static void FilterStartWithUseCases(SelectStmt* stmt, List* locking_clause, core
}
}
+static Node *processIntoClauseInSelectStmt(SelectStmt *stmt, IntoClause *itc)
+{
+ if (itc != NULL) {
+ if (stmt->intoClause != NULL) {
+ ereport(errstate,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("select statement can contain only one into_clause")));
+ }
+ if (itc->rel != NULL && u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) {
+ ereport(errstate,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("select statement can contain only one into_clause"))); }
+ stmt->intoClause = itc;
+ }
+ return (Node *)stmt;
+}
static Node* MakeConnectByRootNode(ColumnRef* cr, int location)
{
@@ -29388,8 +30779,11 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm
errno_t rc = 0;
base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner);
if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) {
+ if(strlen(input) == 0) {
+ parser_yyerror("DELIMITER must be followed by a 'delimiter' character or string");
+ }
if (strlen(input) >= DELIMITER_LENGTH) {
- parser_yyerror("syntax error");
+ parser_yyerror("'delimiter' length should less than 16");
}
n->is_local = false;
n->kind = VAR_SET_VALUE;
@@ -29504,6 +30898,27 @@ static void setAccessMethod(Constraint *n)
}
}
+static CharsetCollateOptions* MakeCharsetCollateOptions(CharsetCollateOptions *options, CharsetCollateOptions *option)
+{
+ if (options == NULL) {
+ options = makeNode(CharsetCollateOptions);
+ options->cctype = OPT_CHARSETCOLLATE;
+ options->charset = PG_INVALID_ENCODING;
+ options->collate = NULL;
+ }
+ switch (option->cctype) {
+ case OPT_CHARSET:
+ options->charset = option->charset;
+ break;
+ case OPT_COLLATE:
+ options->collate = option->collate;
+ break;
+ default:
+ break;
+ }
+ return options;
+}
+
/*
* Must undefine this stuff before including scan.c, since it has different
* definitions for these macros.
diff --git a/src/common/backend/parser/parse_coerce.cpp b/src/common/backend/parser/parse_coerce.cpp
index 8331ebcda..18b404c85 100644
--- a/src/common/backend/parser/parse_coerce.cpp
+++ b/src/common/backend/parser/parse_coerce.cpp
@@ -35,6 +35,7 @@
#include "utils/numeric.h"
#include "utils/guc.h"
#include "utils/guc_tables.h"
+#include "mb/pg_wchar.h"
#ifdef PGXC
#include "pgxc/pgxc.h"
#endif
@@ -62,6 +63,9 @@ static bool meet_decode_compatibility(List* exprs, const char* context);
static bool meet_c_format_compatibility(List* exprs, const char* context);
static bool meet_set_type_compatibility(List* exprs, const char* context, Oid *retOid);
extern Node* makeAConst(Value* v, int location);
+#define CHECK_PARSE_PHRASE(context, target) \
+ (AssertMacro(sizeof(target) - 1 == strlen(target)), strncmp(context, target, sizeof(target) - 1) == 0)
+
/*
* @Description: same as get_element_type() except this reports error
* when the result is invalid.
@@ -173,6 +177,50 @@ Node* coerce_to_target_type(ParseState* pstate, Node* expr, Oid exprtype, Oid ta
return result;
}
+/*
+ * coerce_to_target_charset()
+ * Convert an expression to a target character set.
+ *
+ * pstate - parse state (can be NULL, see semtc_coerce_type)
+ * expr - input expression tree (already transformed by semtc_expr)
+ * target_charset - desired result character set
+ * targetTypeId - desired result type
+ */
+Node* coerce_to_target_charset(Node* expr, int target_charset, Oid targetTypeId)
+{
+ FuncExpr* fexpr = NULL;
+ List* args = NIL;
+ Const* cons = NULL;
+ int exprcharset = PG_INVALID_ENCODING;
+
+ if (target_charset == PG_INVALID_ENCODING) {
+ return expr;
+ }
+
+ exprcharset = exprCharset((Node*)expr);
+ if (exprcharset == PG_INVALID_ENCODING) {
+ exprcharset = GetDatabaseEncoding();
+ }
+ if (exprcharset == target_charset) {
+ return expr;
+ }
+
+ const char* expr_charset_name = pg_encoding_to_char(exprcharset);
+ const char* target_charset_name = pg_encoding_to_char(target_charset);
+
+ args = list_make1(expr);
+
+ cons = makeConst(NAMEOID, -1, InvalidOid, sizeof(const char*), NameGetDatum(expr_charset_name), false, true);
+ args = lappend(args, cons);
+
+ cons = makeConst(NAMEOID, -1, InvalidOid, sizeof(const char*),
+ NameGetDatum(target_charset_name), false, true);
+ args = lappend(args, cons);
+
+ fexpr = makeFuncExpr(CONVERTFUNCOID, targetTypeId, args, InvalidOid, InvalidOid, COERCE_IMPLICIT_CAST);
+ return (Node*)fexpr;
+}
+
/*
* user_defined variables only store integer, float, bit, string and null,
* therefor, we convert the constant to the corresponding type.
@@ -1684,7 +1732,7 @@ Oid select_common_type(ParseState* pstate, List* exprs, const char* context, Nod
* using C format coercion.
*/
ptype = choose_specific_expr_type(pstate, exprs, context);
- } else if (context != NULL && 0 == strncmp(context, "NVL", sizeof("NVL"))) {
+ } else if (context != NULL && CHECK_PARSE_PHRASE(context, "NVL")) {
/* Follow A db nvl*/
ptype = choose_nvl_type(pstate, exprs, context);
} else {
@@ -1717,7 +1765,8 @@ Oid select_common_type(ParseState* pstate, List* exprs, const char* context, Nod
static bool meet_decode_compatibility(List* exprs, const char* context)
{
bool res = u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ENABLE_SQL_BETA_FEATURE(A_STYLE_COERCE) &&
- context != NULL && 0 == strncmp(context, "CASE", sizeof("CASE")) && check_all_in_whitelist(exprs);
+ context != NULL && CHECK_PARSE_PHRASE(context, "DECODE") && check_all_in_whitelist(exprs);
+
return res;
}
@@ -1728,9 +1777,9 @@ static bool meet_decode_compatibility(List* exprs, const char* context)
static bool meet_c_format_compatibility(List* exprs, const char* context)
{
bool res = (u_sess->attr.attr_sql.sql_compatibility == C_FORMAT && context != NULL &&
- (0 == strncmp(context, "CASE", sizeof("CASE")) || 0 == strncmp(context, "COALESCE", sizeof("COALESCE")))) ||
- (ENABLE_SQL_BETA_FEATURE(A_STYLE_COERCE) && context != NULL &&
- (0 == strncmp(context, "CASE", sizeof("CASE"))));
+ (CHECK_PARSE_PHRASE(context, "CASE") || CHECK_PARSE_PHRASE(context, "COALESCE") ||
+ CHECK_PARSE_PHRASE(context, "DECODE"))) ||
+ (ENABLE_SQL_BETA_FEATURE(A_STYLE_COERCE) && context != NULL && CHECK_PARSE_PHRASE(context, "DECODE"));
return res;
}
diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp
index e1a77d588..31e0c4c27 100644
--- a/src/common/backend/parser/parse_expr.cpp
+++ b/src/common/backend/parser/parse_expr.cpp
@@ -21,6 +21,7 @@
#include "catalog/pg_type.h"
#include "catalog/pg_proc.h"
#include "catalog/gs_package.h"
+#include "catalog/gs_utf8_collation.h"
#include "commands/dbcommands.h"
#include "commands/sequence.h"
#include "db4ai/predict_by.h"
@@ -91,6 +92,7 @@ static Node* convertStarToCRef(RangeTblEntry* rte, char* catname, char* nspname,
static bool IsSequenceFuncCall(Node* filed1, Node* filed2, Node* filed3);
static Node* transformSequenceFuncCall(ParseState* pstate, Node* field1, Node* field2, Node* field3, int location);
static Node* transformConnectByRootFuncCall(ParseState* pstate, Node* funcNameVal, ColumnRef *cref);
+static bool CheckSwAbortedRTE(ParseState *pstate, char *relname);
static char *ColumnRefFindRelname(ParseState *pstate, const char *colname);
static Node *transformStartWithColumnRef(ParseState *pstate, ColumnRef *cref, char **colname);
static Node* tryTransformFunc(ParseState* pstate, List* fields, int location);
@@ -1941,8 +1943,8 @@ static Node* transformCaseExpr(ParseState* pstate, CaseExpr* c)
if (OidIsValid(c->casetype)) {
return (Node*)c;
}
- bool saved_is_case_when = pstate->p_is_case_when;
- pstate->p_is_case_when = true;
+ bool saved_is_decode = pstate->p_is_decode;
+ pstate->p_is_decode = c->fromDecode;
newc = makeNode(CaseExpr);
/* transform the test expression, if any */
@@ -2040,7 +2042,7 @@ static Node* transformCaseExpr(ParseState* pstate, CaseExpr* c)
resultexprs = lcons(newc->defresult, resultexprs);
}
- ptype = select_common_type(pstate, resultexprs, "CASE", NULL);
+ ptype = select_common_type(pstate, resultexprs, c->fromDecode ? "DECODE" : "CASE", NULL);
AssertEreport(OidIsValid(ptype), MOD_OPT, "");
newc->casetype = ptype;
/* casecollid will be set by parse_collate.c */
@@ -2056,7 +2058,7 @@ static Node* transformCaseExpr(ParseState* pstate, CaseExpr* c)
}
newc->location = c->location;
- pstate->p_is_case_when = saved_is_case_when;
+ pstate->p_is_decode = saved_is_decode;
return (Node*)newc;
}
@@ -2916,6 +2918,7 @@ static Node* transformCollateClause(ParseState* pstate, CollateClause* c)
}
newc->collOid = LookupCollation(pstate, c->collname, c->location);
newc->location = c->location;
+ check_binary_collation(newc->collOid, argtype);
return (Node*)newc;
}
@@ -3367,6 +3370,12 @@ static Node *transformStartWithColumnRef(ParseState *pstate, ColumnRef *cref, ch
return NULL;
}
+ if (!CheckSwAbortedRTE(pstate, relname)) {
+ ereport(DEBUG1, (errmodule(MOD_OPT_REWRITE),
+ errmsg("do not find relname %s in sw-aborted RTE, maybe it's a normal column", relname)));
+ return NULL;
+ }
+
*colname = makeStartWithDummayColname(relname, local_column_ref);
field1 = (Node *)makeString("tmp_reuslt");
field2 = (Node*)makeString(pstrdup(*colname));
@@ -3430,6 +3439,52 @@ static Node* transformConnectByRootFuncCall(ParseState* pstate, Node* funcNameVa
return transformFuncCall(pstate, fn);
}
+/*
+ * Check rel RTE sw type
+ * TRUE is sw-aborted RTE
+ * FALSE is not sw-aborted RTE
+ */
+static bool CheckSwAbortedRTE(ParseState *pstate, char *relname)
+{
+ ListCell *lc = NULL;
+
+ while (pstate != NULL) {
+ foreach(lc, pstate->p_rtable) {
+ char *rtename = NULL;
+ RangeTblEntry *rte = (RangeTblEntry *)lfirst(lc);
+
+ if (!rte->swAborted) {
+ continue;
+ }
+
+ if (rte->rtekind == RTE_RELATION) {
+ rtename = (rte->alias && rte->alias->aliasname) ?
+ rte->alias->aliasname : rte->relname;
+ } else if (rte->rtekind == RTE_SUBQUERY) {
+ rtename = rte->alias->aliasname;
+ } else if (rte->rtekind == RTE_CTE) {
+ rtename = (rte->alias && rte->alias->aliasname) ?
+ rte->alias->aliasname : rte->ctename;
+ } else if (rte->rtekind == RTE_JOIN) {
+ continue;
+ } else {
+ ereport(ERROR,
+ (errcode(ERRCODE_AMBIGUOUS_COLUMN),
+ errmsg("Only support RTE_RELATION/RTE_SUBQUERY/RTE_CTE"
+ "when transform column in start with.")));
+ }
+
+ if (pg_strcasecmp(relname, rtename) == 0) {
+ return true;
+ }
+ }
+
+ pstate = pstate->parentParseState;
+ }
+
+ return false;
+}
+
/*
* Note, currently, only support 1 fild ColumnRef, will expand to support multiple case
*/
@@ -3496,8 +3551,6 @@ static Node* transformPrefixKey(ParseState* pstate, PrefixKey* pkey)
int maxlen;
int location = ((ColumnRef*)argnode)->location;
- Assert(nodeTag(argnode) == T_ColumnRef);
-
if (pkey->length <= 0 || pkey->length > INDEX_KEY_MAX_PREFIX_LENGTH) {
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
diff --git a/src/common/backend/parser/parse_func.cpp b/src/common/backend/parser/parse_func.cpp
index 36a9bc92c..53215e41a 100644
--- a/src/common/backend/parser/parse_func.cpp
+++ b/src/common/backend/parser/parse_func.cpp
@@ -2045,7 +2045,7 @@ Oid LookupFuncName(List* funcname, int nargs, const Oid* argtypes, bool noError)
while (clist) {
/* if argtype is CL type replace it with original type */
for (int i = 0; i < nargs; i++) {
- if (IsClientLogicType(clist->args[i])) {
+ if (IsClientLogicType(clist->args[i]) && !u_sess->attr.attr_common.IsInplaceUpgrade) {
clist->args[i] = cl_get_input_param_original_type(clist->oid, i);
}
}
diff --git a/src/common/backend/parser/parse_merge.cpp b/src/common/backend/parser/parse_merge.cpp
index fbd08341b..e45e97fd0 100644
--- a/src/common/backend/parser/parse_merge.cpp
+++ b/src/common/backend/parser/parse_merge.cpp
@@ -1290,6 +1290,7 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt)
action->qual = transformWhereClause(pstate, mergeWhenClause->condition, "WHEN");
pstate->p_varnamespace = save_varnamespace;
pstate->use_level = false;
+ UpdateParseCheck(pstate, (Node*)action);
fixResTargetListWithTableNameRef(targetrel, stmt->relation, set_clause_list_copy);
mergeWhenClause->targetList = set_clause_list_copy;
diff --git a/src/common/backend/parser/parse_oper.cpp b/src/common/backend/parser/parse_oper.cpp
index cf35bc263..161a54f0d 100644
--- a/src/common/backend/parser/parse_oper.cpp
+++ b/src/common/backend/parser/parse_oper.cpp
@@ -263,20 +263,34 @@ static Oid binary_oper_exact(List* opname, Oid arg1, Oid arg2, bool use_a_style_
{
Oid result;
bool was_unknown = false;
- bool other_was_num = false;
+
+ if (use_a_style_coercion) {
+ /*
+ * For A-style decode,
+ * decode(, , ...) will be compared as characters
+ * decode(, , ...) will be compared as numbers
+ * Note that decode(, , ...) categories are not
+ * handled, because PG-style coercion suffers from blankspace padding of
+ * bpchar and displaying fractional part of numeric, the behavior is tricky
+ * to describe.
+ */
+ char arg1_category = get_typecategory(arg1);
+ char arg2_category = get_typecategory(arg2);
+ if (arg1_category == TYPCATEGORY_NUMERIC && arg2_category == TYPCATEGORY_UNKNOWN) {
+ return OpernameGetOprid(opname, TEXTOID, TEXTOID);
+ } else if (arg2_category == TYPCATEGORY_NUMERIC &&
+ (arg1_category == TYPCATEGORY_UNKNOWN || arg1_category == TYPCATEGORY_STRING)) {
+ return OpernameGetOprid(opname, NUMERICOID, NUMERICOID);
+ }
+ }
/* Unspecified type for one of the arguments? then use the other */
if ((arg1 == UNKNOWNOID) && (arg2 != InvalidOid)) {
arg1 = arg2;
was_unknown = true;
- other_was_num = get_typecategory(arg2) == TYPCATEGORY_NUMERIC;
} else if ((arg2 == UNKNOWNOID) && (arg1 != InvalidOid)) {
arg2 = arg1;
was_unknown = true;
- other_was_num = get_typecategory(arg1) == TYPCATEGORY_NUMERIC;
- }
- if (use_a_style_coercion && was_unknown && other_was_num) {
- return OpernameGetOprid(opname, TEXTOID, TEXTOID);
}
result = OpernameGetOprid(opname, arg1, arg2);
@@ -392,7 +406,7 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n
* Try to find the mapping in the lookaside cache.
*/
if (pstate != NULL) {
- use_a_style_coercion = pstate->p_is_case_when && ENABLE_SQL_BETA_FEATURE(A_STYLE_COERCE);
+ use_a_style_coercion = pstate->p_is_decode && ENABLE_SQL_BETA_FEATURE(A_STYLE_COERCE);
}
key_ok = make_oper_cache_key(&key, opname, ltypeId, rtypeId, use_a_style_coercion);
diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp
index 08b0a2395..51c00f2e5 100755
--- a/src/common/backend/parser/parse_relation.cpp
+++ b/src/common/backend/parser/parse_relation.cpp
@@ -1138,10 +1138,14 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm
TryUnlockAllAccounts();
}
- if (rel->partMap && rel->partMap->type == PART_TYPE_INTERVAL) {
- /* take AccessShareLock on ADD_PARTITION_ACTION to avoid concurrency with new partition operations. */
- LockRelationForAccessIntervalPartitionTab(rel);
+#ifndef ENABLE_MULTIPLE_NODES
+ if (RelationIsPartitioned(rel)) {
+ /* take ShareLock to avoid PARTITION DDL COMMIT until we finish the InitPlan. Distribute mode doesn't support
+ * partition DDL/DML parallel work, no need this action */
+ LockPartitionObject(RelationGetRelid(rel), PARTITION_OBJECT_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK);
+ AddPartitionDMLInfo(RelationGetRelid(rel));
}
+#endif
if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) {
if (u_sess->attr.attr_sql.enable_parallel_ddl && !isFirstNode && isCreateView) {
diff --git a/src/common/backend/parser/parse_target.cpp b/src/common/backend/parser/parse_target.cpp
index 53f6d23b2..e1a471bba 100644
--- a/src/common/backend/parser/parse_target.cpp
+++ b/src/common/backend/parser/parse_target.cpp
@@ -36,6 +36,8 @@
#include "utils/typcache.h"
#include "executor/executor.h"
#include "gs_ledger/ledger_utils.h"
+#include "mb/pg_wchar.h"
+#include "parser/parse_utilcmd.h"
static void markTargetListOrigin(ParseState* pstate, TargetEntry* tle, Var* var, int levelsup);
static Node* transformAssignmentIndirection(ParseState* pstate, Node* basenode, const char* targetName,
@@ -374,6 +376,7 @@ Expr* transformAssignedExpr(ParseState* pstate, Expr* expr, char* colname, int a
Oid attrtype; /* type of target column */
int32 attrtypmod;
Oid attrcollation; /* collation of target column */
+ int attrcharset = PG_INVALID_ENCODING;
AssertEreport(rd != NULL, MOD_OPT, "");
/*
@@ -391,6 +394,9 @@ Expr* transformAssignedExpr(ParseState* pstate, Expr* expr, char* colname, int a
attrtype = attnumTypeId(rd, attrno);
attrtypmod = rd->rd_att->attrs[attrno - 1].atttypmod;
attrcollation = rd->rd_att->attrs[attrno - 1].attcollation;
+ if (DB_IS_CMPT(B_FORMAT)) {
+ attrcharset = get_charset_by_collation(attrcollation);
+ }
/*
* If the expression is a DEFAULT placeholder, insert the attribute's
@@ -534,6 +540,9 @@ Expr* transformAssignedExpr(ParseState* pstate, Expr* expr, char* colname, int a
}
}
}
+#ifndef ENABLE_MULTIPLE_NODES
+ expr = (Expr*)coerce_to_target_charset((Node*)expr, attrcharset, attrtype);
+#endif
ELOG_FIELD_NAME_END;
diff --git a/src/common/backend/parser/parse_type.cpp b/src/common/backend/parser/parse_type.cpp
index a6b1c7444..e31d357f6 100644
--- a/src/common/backend/parser/parse_type.cpp
+++ b/src/common/backend/parser/parse_type.cpp
@@ -34,6 +34,8 @@
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "utils/pl_package.h"
+#include "catalog/gs_utf8_collation.h"
+#include "parser/parse_utilcmd.h"
static int32 typenameTypeMod(ParseState* pstate, const TypeName* typname, Type typ);
@@ -652,6 +654,48 @@ Oid LookupCollation(ParseState* pstate, List* collnames, int location)
return colloid;
}
+static Oid get_column_def_collation_b_format(ColumnDef* coldef, Oid typeOid, Oid typcollation,
+ bool is_bin_type, Oid rel_coll_oid)
+{
+ if (coldef->typname->charset != PG_INVALID_ENCODING && !IsSupportCharsetType(typeOid)) {
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type %s not support set charset", format_type_be(typeOid))));
+ }
+
+ Oid result = InvalidOid;
+ if (!OidIsValid(typcollation) && !is_bin_type) {
+ return InvalidOid;
+ } else if (OidIsValid(coldef->collOid)) {
+ /* Precooked collation spec, use that */
+ return coldef->collOid;
+ }
+
+ char* schemaname = NULL;
+ char* collate = NULL;
+ if (coldef->collClause) {
+ DeconstructQualifiedName(coldef->collClause->collname, &schemaname, &collate);
+ if (schemaname != NULL && strcmp(schemaname, "pg_catalog") != 0) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA),
+ errmsg("error schema name for collate")));
+ }
+ }
+ /* For binary type, if the table's default collation is not "binary", the rel_coll_oid is not inherited. */
+ if (is_bin_type) {
+ rel_coll_oid = InvalidOid;
+ }
+ result = transform_default_collation(collate, coldef->typname->charset, rel_coll_oid, true);
+ if (!OidIsValid(result)) {
+ if (!USE_DEFAULT_COLLATION) {
+ result = typcollation;
+ } else if (is_bin_type) {
+ result = BINARY_COLLATION_OID;
+ } else {
+ result = get_default_collation_by_charset(GetDatabaseEncoding());
+ }
+ }
+ return result;
+}
+
/*
* GetColumnDefCollation
*
@@ -660,13 +704,16 @@ Oid LookupCollation(ParseState* pstate, List* collnames, int location)
*
* pstate is only used for error location purposes, and can be NULL.
*/
-Oid GetColumnDefCollation(ParseState* pstate, ColumnDef* coldef, Oid typeOid)
+Oid GetColumnDefCollation(ParseState* pstate, ColumnDef* coldef, Oid typeOid, Oid rel_coll_oid)
{
Oid result;
Oid typcollation = get_typcollation(typeOid);
int location = -1;
+ bool is_bin_type = IsBinaryType(typeOid);
- if (coldef->collClause) {
+ if (DB_IS_CMPT(B_FORMAT)) {
+ result = get_column_def_collation_b_format(coldef, typeOid, typcollation, is_bin_type, rel_coll_oid);
+ } else if (coldef->collClause) {
/* We have a raw COLLATE clause, so look up the collation */
location = coldef->collClause->location;
result = LookupCollation(pstate, coldef->collClause->collname, location);
@@ -678,8 +725,11 @@ Oid GetColumnDefCollation(ParseState* pstate, ColumnDef* coldef, Oid typeOid)
result = typcollation;
}
+ if (coldef->collClause) {
+ check_binary_collation(result, typeOid);
+ }
/* Complain if COLLATE is applied to an uncollatable type */
- if (OidIsValid(result) && !OidIsValid(typcollation)) {
+ if (OidIsValid(result) && !OidIsValid(typcollation) && !is_bin_type) {
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("collations are not supported by type %s", format_type_be(typeOid)),
diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp
index dc9c97847..412c0eb3d 100644
--- a/src/common/backend/parser/parse_utilcmd.cpp
+++ b/src/common/backend/parser/parse_utilcmd.cpp
@@ -94,6 +94,7 @@
#include "client_logic/client_logic.h"
#include "client_logic/client_logic_enums.h"
#include "storage/checksum_impl.h"
+#include "catalog/gs_utf8_collation.h"
/* State shared by transformCreateSchemaStmt and its subroutines */
typedef struct {
@@ -153,7 +154,6 @@ static void transformFKConstraints(CreateStmtContext* cxt, bool skipValidation,
static void transformConstraintAttrs(CreateStmtContext* cxt, List* constraintList);
static void transformColumnType(CreateStmtContext* cxt, ColumnDef* column);
static void setSchemaName(char* context_schema, char** stmt_schema_name);
-static void TrySetAutoIncNotNullConstraint(ColumnDef* column);
static void TransformTempAutoIncrement(ColumnDef* column, CreateStmt* stmt);
static int128 TransformAutoIncStart(CreateStmt* stmt);
@@ -167,7 +167,7 @@ static void checkConstraint(CreateStmtContext* cxt, Node* node);
static void setMemCheckFlagForIdx(List* IndexList);
/* check partition name */
-static void check_partition_name_less_than(List* partitionList, bool isPartition);
+static void check_partition_name_internal(List* partitionList, bool isPartition);
static void check_partition_name_start_end(List* partitionList, bool isPartition);
/* for range partition: start/end syntax */
@@ -193,6 +193,10 @@ extern Node* makeAConst(Value* v, int location);
static bool IsElementExisted(List* indexElements, IndexElem* ielem);
static char* CreatestmtGetOrientation(CreateStmt *stmt);
static void CheckAutoIncrementIndex(CreateStmtContext *cxt);
+static List* semtc_generate_hash_partition_defs(CreateStmt* stmt, const char* prefix_name, int part_count);
+static void TransformModifyColumndef(CreateStmtContext* cxt, AlterTableCmd* cmd);
+static void TransformColumnDefinitionConstraints(
+ CreateStmtContext* cxt, ColumnDef* column, bool preCheck, bool is_modify);
#define REDIS_SCHEMA "data_redis"
/*
@@ -227,6 +231,160 @@ static void checkPartitionConstraintWithExpr(Constraint* con)
}
}
+int get_charset_by_collation(Oid coll_oid)
+{
+ HeapTuple tp = NULL;
+ int result = PG_INVALID_ENCODING;
+
+ /* The collation OID in B format has a rule, through which we can quickly get the charset from the OID. */
+ if (COLLATION_IN_B_FORMAT(coll_oid)) {
+ return FAST_GET_CHARSET_BY_COLL(coll_oid);
+ }
+
+ if (COLLATION_HAS_INVALID_ENCODING(coll_oid)) {
+ return result;
+ }
+
+ tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(coll_oid));
+ if (!HeapTupleIsValid(tp)) {
+ return result;
+ }
+ Form_pg_collation coll_tup = (Form_pg_collation)GETSTRUCT(tp);
+ result = coll_tup->collencoding;
+ ReleaseSysCache(tp);
+ return result;
+}
+
+Oid get_default_collation_by_charset(int charset)
+{
+ Oid coll_oid = InvalidOid;
+ Relation rel;
+ ScanKeyData key[2];
+ SysScanDesc scan = NULL;
+ HeapTuple tup = NULL;
+
+ rel = heap_open(CollationRelationId, AccessShareLock);
+ ScanKeyInit(&key[0], Anum_pg_collation_collencoding, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(charset));
+ ScanKeyInit(&key[1], Anum_pg_collation_collisdef, BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true));
+
+ scan = systable_beginscan(rel, CollationEncDefIndexId, true, NULL, 2, key);
+
+ while (HeapTupleIsValid(tup = systable_getnext(scan))) {
+ coll_oid = HeapTupleGetOid(tup);
+ break;
+ }
+ systable_endscan(scan);
+ heap_close(rel, AccessShareLock);
+
+ if (coll_oid == InvalidOid) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("default collation for encoding \"%s\" does not exist",
+ pg_encoding_to_char(charset))));
+ }
+ return coll_oid;
+}
+
+static Oid check_collation_by_charset(const char* collate, int charset)
+{
+ Oid coll_oid = InvalidOid;
+ coll_oid = GetSysCacheOid3(COLLNAMEENCNSP, PointerGetDatum(collate),
+ Int32GetDatum(charset),
+ ObjectIdGetDatum(PG_CATALOG_NAMESPACE));
+ if (coll_oid == InvalidOid) {
+ coll_oid = get_collation_oid_with_lower_name(collate, charset);
+ if (coll_oid == InvalidOid) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("collation \"%s\" for encoding \"%s\" does not exist",
+ collate, pg_encoding_to_char(charset))));
+ }
+ }
+ return coll_oid;
+}
+
+/*
+ * transform_default_collation -
+ * Returns the processed collation oid of schema、relation or attribute level.
+ */
+Oid transform_default_collation(const char* collate, int charset, Oid def_coll_oid, bool is_attr)
+{
+ Oid coll_oid = InvalidOid;
+ HeapTuple coll_tup;
+
+ if (collate != NULL && charset != PG_INVALID_ENCODING) {
+ coll_oid = check_collation_by_charset(collate, charset);
+ } else if (collate != NULL) {
+ CatCList* list = NULL;
+ list = SearchSysCacheList1(COLLNAMEENCNSP, PointerGetDatum(collate));
+ if (list->n_members == 0) {
+ /* If the collate string is in uppercase, change to lowercase and search it again */
+ coll_oid = get_collation_oid_with_lower_name(collate, charset);
+ if (coll_oid == InvalidOid) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("collation \"%s\" does not exist", collate)));
+ }
+ } else if (list->n_members > 1) {
+ /*
+ * opengauss may have collation with same name. When specifying the collation in attribute,
+ * you can specify these collation for forward compatibility.
+ */
+ if (is_attr) {
+ charset = GetDatabaseEncoding();
+ coll_oid = check_collation_by_charset(collate, charset);
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("there is more than one collation \"%s\" with the same name", collate)));
+ }
+ } else {
+ coll_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, 0);
+ charset = ((Form_pg_collation)GETSTRUCT(coll_tup))->collencoding;
+ if (!is_attr && charset == PG_INVALID_ENCODING) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("collation \"%s\" have no corresponding encoding", collate)));
+ }
+ coll_oid = HeapTupleGetOid(coll_tup);
+ }
+
+ ReleaseSysCacheList(list);
+ } else if (charset != PG_INVALID_ENCODING) {
+ coll_oid = get_default_collation_by_charset(charset);
+ } else {
+ coll_oid = def_coll_oid;
+ }
+
+ if (!is_attr && OidIsValid(coll_oid) && !COLLATION_IN_B_FORMAT(coll_oid)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("this collation only cannot be specified here")));
+ }
+ if (charset != PG_INVALID_ENCODING && charset != PG_SQL_ASCII && charset != GetDatabaseEncoding()) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("difference between the charset and the database encoding has not supported")));
+ }
+ return coll_oid;
+}
+
+Oid fill_relation_collation(const char* collate, int charset, List** options, Oid nsp_coll_oid)
+{
+ if (!OidIsValid(nsp_coll_oid) && USE_DEFAULT_COLLATION) {
+ nsp_coll_oid = get_default_collation_by_charset(GetDatabaseEncoding());
+ }
+ Oid coll_oid = transform_default_collation(collate, charset, nsp_coll_oid);
+ ListCell* cell = NULL;
+ DefElem* opt = NULL;
+
+ if (coll_oid == InvalidOid) {
+ return coll_oid;
+ }
+ /* If specified by reloption, this is the case. */
+ foreach(cell, *options) {
+ opt = (DefElem*)lfirst(cell);
+ if (strncmp(opt->defname, "collate", strlen("collate")) == 0) {
+ return coll_oid;
+ }
+ }
+ *options = lappend(*options, makeDefElem("collate", (Node*)makeInteger(coll_oid)));
+ return coll_oid;
+}
+
List* transformCreateStmt(CreateStmt* stmt, const char* queryString, const List* uuids, bool preCheck,
Oid *namespaceid, bool isFirstNode)
{
@@ -649,6 +807,7 @@ Oid *namespaceid, bool isFirstNode)
#else
checkPartitionName(stmt->partTableState->partitionList);
#endif
+ SetPartitionnoForPartitionState(stmt->partTableState);
}
/* like clause-including reloptions: cxt.reloptions is produced by like including reloptions clause */
@@ -1001,9 +1160,7 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo
column->constraints = lappend(column->constraints, constraint);
column->raw_default = constraint->raw_expr;
- if (is_autoinc) {
- TrySetAutoIncNotNullConstraint(column);
- } else {
+ if (!is_autoinc) {
constraint = makeNode(Constraint);
constraint->contype = CONSTR_NOTNULL;
constraint->location = -1;
@@ -1066,11 +1223,6 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column,
bool is_serial = false;
bool is_set = false;
bool large = false;
- bool saw_nullable = false;
- bool saw_default = false;
- bool saw_generated = false;
- Constraint* constraint = NULL;
- ListCell* clist = NULL;
ClientLogicColumnRef* clientLogicColumnRef = NULL;
/* Check the constraint type.*/
@@ -1167,8 +1319,77 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column,
/* Process column constraints, if any... */
transformConstraintAttrs(cxt, column->constraints);
- saw_nullable = false;
- saw_default = false;
+ TransformColumnDefinitionConstraints(cxt, column, preCheck, false);
+ if (column->clientLogicColumnRef != NULL) {
+#ifdef ENABLE_MULTIPLE_NODES
+ if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) {
+#else
+ if (!u_sess->attr.attr_common.enable_full_encryption) {
+#endif
+ ereport(ERROR,
+ (errcode(ERRCODE_OPERATE_NOT_SUPPORTED),
+ errmsg("Un-support to define encrypted column when client encryption is disabled.")));
+ }
+ if (isColumnEncryptionAllowed(cxt, column)) {
+ clientLogicColumnRef = (ClientLogicColumnRef*)column->clientLogicColumnRef;
+ clientLogicColumnRef->orig_typname = column->typname;
+ typenameTypeIdAndMod(NULL, clientLogicColumnRef->orig_typname, &clientLogicColumnRef->orig_typname->typeOid, &clientLogicColumnRef->orig_typname->typemod);
+ if (clientLogicColumnRef->dest_typname) {
+ typenameTypeIdAndMod(NULL, clientLogicColumnRef->dest_typname, &clientLogicColumnRef->dest_typname->typeOid, &clientLogicColumnRef->dest_typname->typemod);
+ column->typname = makeTypeNameFromOid(clientLogicColumnRef->dest_typname->typeOid, clientLogicColumnRef->orig_typname->typeOid);
+ }
+ transformColumnType(cxt, column);
+ }
+ }
+
+ /*
+ * Generate ALTER FOREIGN TABLE ALTER COLUMN statement which adds
+ * per-column foreign data wrapper options for this column.
+ */
+ if (column->fdwoptions != NIL) {
+ AlterTableStmt* stmt = NULL;
+ AlterTableCmd* cmd = NULL;
+
+ cmd = makeNode(AlterTableCmd);
+ cmd->subtype = AT_AlterColumnGenericOptions;
+ cmd->name = column->colname;
+ cmd->def = (Node*)column->fdwoptions;
+ cmd->behavior = DROP_RESTRICT;
+ cmd->missing_ok = false;
+
+ stmt = makeNode(AlterTableStmt);
+ stmt->relation = cxt->relation;
+ stmt->cmds = NIL;
+ stmt->relkind = OBJECT_FOREIGN_TABLE;
+ stmt->cmds = lappend(stmt->cmds, cmd);
+
+ cxt->alist = lappend(cxt->alist, stmt);
+ }
+
+ ListCell *columnOption = NULL;
+ foreach (columnOption, column->columnOptions) {
+ void *pointer = lfirst(columnOption);
+ if (IsA(pointer, CommentStmt)) {
+ CommentStmt *commentStmt = (CommentStmt *)pointer;
+ commentStmt->objtype = OBJECT_COLUMN;
+ commentStmt->objname = list_make2(makeString(cxt->relation->relname), makeString(column->colname));
+ if (cxt->relation->schemaname) {
+ commentStmt->objname = lcons(makeString(cxt->relation->schemaname) , commentStmt->objname);
+ }
+ cxt->alist = lappend(cxt->alist, commentStmt);
+ break;
+ }
+ }
+}
+
+static void TransformColumnDefinitionConstraints(CreateStmtContext* cxt, ColumnDef* column,
+ bool preCheck, bool is_modify)
+{
+ bool saw_nullable = false;
+ bool saw_default = false;
+ bool saw_generated = false;
+ Constraint* constraint = NULL;
+ ListCell* clist = NULL;
foreach (clist, column->constraints) {
constraint = (Constraint*)lfirst(clist);
@@ -1260,13 +1481,18 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column,
break;
case CONSTR_FOREIGN:
-
- /*
- * Fill in the current attribute's name and throw it into the
- * list of FK constraints to be processed later.
- */
- constraint->fk_attrs = list_make1(makeString(column->colname));
- cxt->fkconstraints = lappend(cxt->fkconstraints, constraint);
+ if (!is_modify) {
+ /*
+ * Fill in the current attribute's name and throw it into the
+ * list of FK constraints to be processed later.
+ */
+ constraint->fk_attrs = list_make1(makeString(column->colname));
+ cxt->fkconstraints = lappend(cxt->fkconstraints, constraint);
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change column REFERENCES constraint is not supported")));
+ }
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1301,6 +1527,7 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column,
createSeqOwnedByTable(cxt, column, preCheck, true, true);
column->is_serial = true;
}
+ column->is_not_null = true;
break;
default:
ereport(ERROR,
@@ -1309,74 +1536,13 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column,
break;
}
}
- if (column->clientLogicColumnRef != NULL) {
-#ifdef ENABLE_MULTIPLE_NODES
- if (IS_MAIN_COORDINATOR && !u_sess->attr.attr_common.enable_full_encryption) {
-#else
- if (!u_sess->attr.attr_common.enable_full_encryption) {
-#endif
- ereport(ERROR,
- (errcode(ERRCODE_OPERATE_NOT_SUPPORTED),
- errmsg("Un-support to define encrypted column when client encryption is disabled.")));
- }
- if (isColumnEncryptionAllowed(cxt, column)) {
- clientLogicColumnRef = (ClientLogicColumnRef*)column->clientLogicColumnRef;
- clientLogicColumnRef->orig_typname = column->typname;
- typenameTypeIdAndMod(NULL, clientLogicColumnRef->orig_typname, &clientLogicColumnRef->orig_typname->typeOid, &clientLogicColumnRef->orig_typname->typemod);
- if (clientLogicColumnRef->dest_typname) {
- typenameTypeIdAndMod(NULL, clientLogicColumnRef->dest_typname, &clientLogicColumnRef->dest_typname->typeOid, &clientLogicColumnRef->dest_typname->typemod);
- column->typname = makeTypeNameFromOid(clientLogicColumnRef->dest_typname->typeOid, clientLogicColumnRef->orig_typname->typeOid);
- }
- transformColumnType(cxt, column);
- }
- }
if (saw_default && saw_generated)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("both default and generation expression specified for column \"%s\" of table \"%s\"",
column->colname, cxt->relation->relname),
- parser_errposition(cxt->pstate,
- constraint->location)));
-
- /*
- * Generate ALTER FOREIGN TABLE ALTER COLUMN statement which adds
- * per-column foreign data wrapper options for this column.
- */
- if (column->fdwoptions != NIL) {
- AlterTableStmt* stmt = NULL;
- AlterTableCmd* cmd = NULL;
-
- cmd = makeNode(AlterTableCmd);
- cmd->subtype = AT_AlterColumnGenericOptions;
- cmd->name = column->colname;
- cmd->def = (Node*)column->fdwoptions;
- cmd->behavior = DROP_RESTRICT;
- cmd->missing_ok = false;
-
- stmt = makeNode(AlterTableStmt);
- stmt->relation = cxt->relation;
- stmt->cmds = NIL;
- stmt->relkind = OBJECT_FOREIGN_TABLE;
- stmt->cmds = lappend(stmt->cmds, cmd);
-
- cxt->alist = lappend(cxt->alist, stmt);
- }
-
- ListCell *columnOption = NULL;
- foreach (columnOption, column->columnOptions) {
- void *pointer = lfirst(columnOption);
- if (IsA(pointer, CommentStmt)) {
- CommentStmt *commentStmt = (CommentStmt *)pointer;
- commentStmt->objtype = OBJECT_COLUMN;
- commentStmt->objname = list_make2(makeString(cxt->relation->relname), makeString(column->colname));
- if (cxt->relation->schemaname) {
- commentStmt->objname = lcons(makeString(cxt->relation->schemaname) , commentStmt->objname);
- }
- cxt->alist = lappend(cxt->alist, commentStmt);
- break;
- }
- }
+ parser_errposition(cxt->pstate, constraint->location)));
}
/*
@@ -1882,11 +2048,10 @@ static void transformTableLikeClause(
list_make1(makeString(NameStr(column_settings_rel_data->column_key_name)));
def->clientLogicColumnRef->columnEncryptionAlgorithmType = static_cast(columns_rel_data->encryption_type);
def->clientLogicColumnRef->orig_typname = makeTypeNameFromOid(columns_rel_data->data_type_original_oid,
- columns_rel_data->data_type_original_mod);;
+ columns_rel_data->data_type_original_mod);
def->clientLogicColumnRef->dest_typname =
makeTypeNameFromOid(attribute->atttypid, attribute->atttypmod);
- def->typname = makeTypeNameFromOid(columns_rel_data->data_type_original_oid,
- columns_rel_data->data_type_original_mod);
+ def->typname = makeTypeNameFromOid(attribute->atttypid, attribute->atttypmod);
ReleaseSysCache(col_tup);
ReleaseSysCache(col_setting_tup);
}
@@ -4877,8 +5042,14 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query
Value *v = (Value *)linitial(def->typname->names);
if (strcmp(v->val.str, "set") == 0) {
if (oldIsSet) {
- ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("can not alter column type to another set")));
+ if (cmd->is_first || cmd->after_name != NULL) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-supported feature"),
+ errdetail("set column is not supported for modify column first|after colname")));
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("can not alter column type to another set")));
+ }
} else {
PrecheckColumnTypeForSet(&cxt, def->typname);
CreateSetOwnedByTable(&cxt, def, cmd->name);
@@ -4906,6 +5077,11 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query
newcmds = lappend(newcmds, cmd);
break;
}
+ case AT_ModifyColumn: {
+ TransformModifyColumndef(&cxt, cmd);
+ newcmds = lappend(newcmds, cmd);
+ break;
+ }
default:
newcmds = lappend(newcmds, cmd);
@@ -5134,7 +5310,7 @@ static void transformColumnType(CreateStmtContext* cxt, ColumnDef* column)
parser_errposition(cxt->pstate, column->typname->location)));
}
- if (column->collClause) {
+ if (!DB_IS_CMPT(B_FORMAT) && column->collClause) {
LookupCollation(cxt->pstate, column->collClause->collname, column->collClause->location);
/* Complain if COLLATE is applied to an uncollatable type */
if (!OidIsValid(typtup->typcollation))
@@ -5306,42 +5482,101 @@ NodeTag GetPartitionStateType(char type)
char* GetPartitionDefStateName(Node *partitionDefState)
{
- char* partitionName = NULL;
- switch (nodeTag(partitionDefState)) {
- case T_RangePartitionDefState:
- partitionName = ((RangePartitionDefState *)partitionDefState)->partitionName;
- break;
- case T_ListPartitionDefState:
- partitionName = ((ListPartitionDefState *)partitionDefState)->partitionName;
- break;
- case T_HashPartitionDefState:
- partitionName = ((HashPartitionDefState *)partitionDefState)->partitionName;
- break;
- default:
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("unsupported subpartition type")));
- break;
- }
- return partitionName;
+ return ((PartitionDefState *)partitionDefState)->partitionName;
}
List* GetSubPartitionDefStateList(Node *partitionDefState)
{
- List* subPartitionList = NIL;
- switch (nodeTag(partitionDefState)) {
+ if (IsA(partitionDefState, RangePartitionStartEndDefState)) {
+ return NIL;
+ }
+ return ((PartitionDefState *)partitionDefState)->subPartitionDefState;
+}
+
+static void semtc_check_partitions_clause(CreateStmt *stmt)
+{
+ int part_num = stmt->partTableState->partitionsNum;
+ List* part_list = stmt->partTableState->partitionList;
+
+ if (part_num > MAX_PARTITION_NUM) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("partitions number '%d' cannot be greater than %d",
+ part_num, MAX_PARTITION_NUM)));
+ }
+ /* have PARTITIONS num clause */
+ if (part_num > 0) {
+ if (part_list != NIL) {
+ if (list_length(part_list) != part_num) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("the number of defined partitions does not match the partitions number '%d'",
+ part_num)));
+ }
+ return;
+ }
+ /* only hash partition can omit partition definition */
+ if (stmt->partTableState->partitionStrategy != 'h') {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("Partitions number is specified but partition definition is missing")));
+ }
+ stmt->partTableState->partitionList = semtc_generate_hash_partition_defs(stmt, NULL, part_num);
+ return;
+ }
+ /* have no PARTITIONS num clause */
+ if (part_list == NIL) {
+ /* only hash partition can omit partition definition */
+ if (stmt->partTableState->partitionStrategy != 'h') {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Missing partition definition when partitions number is not specified")));
+ }
+ stmt->partTableState->partitionList = semtc_generate_hash_partition_defs(stmt, NULL, 1);
+ }
+}
+
+static List *semtc_check_subpartitions_clause(CreateStmt *stmt, Node *partition_def_state,
+ List *sub_partition_list)
+{
+ char* partName = GetPartitionDefStateName(partition_def_state);
+ int subpart_num = stmt->partTableState->subPartitionState->partitionsNum;
+ if (subpart_num == 0) {
+ return sub_partition_list;
+ }
+ /* have SUBPARTITIONS num clause */
+ if (sub_partition_list != NIL) {
+ if (sub_partition_list->length != subpart_num) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("The number of defined subpartitions in partition \"%s\" "
+ "does not match the subpartitions number: %d", partName, subpart_num)));
+ }
+ return sub_partition_list;
+ }
+ /* only hash partition can omit partition definition */
+ if (stmt->partTableState->subPartitionState->partitionStrategy != 'h') {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("Partitions number is specified but partition definition is missing")));
+ }
+ /* generate subpartitionDefStates */
+ switch (nodeTag(partition_def_state)) {
case T_RangePartitionDefState:
- subPartitionList = ((RangePartitionDefState *)partitionDefState)->subPartitionDefState;
+ ((RangePartitionDefState *)partition_def_state)->subPartitionDefState =
+ semtc_generate_hash_partition_defs(stmt, partName, subpart_num);
break;
case T_ListPartitionDefState:
- subPartitionList = ((ListPartitionDefState *)partitionDefState)->subPartitionDefState;
+ ((ListPartitionDefState *)partition_def_state)->subPartitionDefState =
+ semtc_generate_hash_partition_defs(stmt, partName, subpart_num);
break;
case T_HashPartitionDefState:
- subPartitionList = ((HashPartitionDefState *)partitionDefState)->subPartitionDefState;
+ ((HashPartitionDefState *)partition_def_state)->subPartitionDefState =
+ semtc_generate_hash_partition_defs(stmt, partName, subpart_num);
break;
default:
- subPartitionList = NIL;
break;
}
- return subPartitionList;
+ return GetSubPartitionDefStateList(partition_def_state);
}
/*
@@ -5450,13 +5685,16 @@ void checkPartitionSynax(CreateStmt* stmt)
}
/* check partition key number for none value-partition table */
- if (!value_partition && stmt->partTableState->partitionKey->length > MAX_PARTITIONKEY_NUM) {
+ if (!value_partition && stmt->partTableState->partitionKey->length > PARTITION_PARTKEYMAXNUM) {
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("too many partition keys for partitioned table"),
- errhint("Partittion key columns can not be more than %d", MAX_PARTITIONKEY_NUM)));
+ errhint("Partittion key columns can not be more than %d", PARTITION_PARTKEYMAXNUM)));
}
+ /* check PARTITIONS clause */
+ semtc_check_partitions_clause(stmt);
+
/* check range partition number for none value-partition table */
if (!value_partition && stmt->partTableState->partitionList->length > MAX_PARTITION_NUM) {
ereport(ERROR,
@@ -5465,7 +5703,7 @@ void checkPartitionSynax(CreateStmt* stmt)
errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM)));
}
- /* check interval synax */
+ /* check interval sytnax */
if (stmt->partTableState->intervalPartDef) {
#ifdef ENABLE_MULTIPLE_NODES
ereport(ERROR,
@@ -5492,15 +5730,22 @@ void checkPartitionSynax(CreateStmt* stmt)
#endif
}
- /* check subpartition synax */
+ /* check subpartition sytnax */
if (stmt->partTableState->subPartitionState != NULL) {
NodeTag subPartitionType = GetPartitionStateType(stmt->partTableState->subPartitionState->partitionStrategy);
List* partitionList = stmt->partTableState->partitionList;
ListCell* lc1 = NULL;
ListCell* lc2 = NULL;
+ if (stmt->partTableState->subPartitionState->partitionsNum > MAX_PARTITION_NUM) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("Invalid number of partitions"),
+ errdetail("subpartitions number '%d' cannot be greater than %d",
+ stmt->partTableState->subPartitionState->partitionsNum, MAX_PARTITION_NUM)));
+ }
foreach (lc1, partitionList) {
Node* partitionDefState = (Node*)lfirst(lc1);
List* subPartitionList = GetSubPartitionDefStateList(partitionDefState);
+ subPartitionList = semtc_check_subpartitions_clause(stmt, partitionDefState, subPartitionList);
foreach (lc2, subPartitionList) {
Node *subPartitionDefState = (Node *)lfirst(lc2);
if ((nodeTag(subPartitionDefState) != subPartitionType)) {
@@ -5560,14 +5805,14 @@ static void checkPartitionValue(CreateStmtContext* cxt, CreateStmt* stmt)
}
/*
- * check_partition_name_less_than
+ * check_partition_name_internal
* check partition name with less/than stmt.
*
* [IN] partitionList: partition list
*
* RETURN: void
*/
-static void check_partition_name_less_than(List* partitionList, bool isPartition)
+static void check_partition_name_internal(List* partitionList, bool isPartition)
{
ListCell* cell = NULL;
ListCell* lc = NULL;
@@ -5576,10 +5821,10 @@ static void check_partition_name_less_than(List* partitionList, bool isPartition
foreach (cell, partitionList) {
lc = cell;
- ref_partname = ((RangePartitionDefState*)lfirst(cell))->partitionName;
+ ref_partname = ((PartitionDefState*)lfirst(cell))->partitionName;
while (NULL != (lc = lnext(lc))) {
- cur_partname = ((RangePartitionDefState*)lfirst(lc))->partitionName;
+ cur_partname = ((PartitionDefState*)lfirst(lc))->partitionName;
if (!strcmp(ref_partname, cur_partname)) {
ereport(ERROR,
@@ -5637,46 +5882,28 @@ void checkPartitionName(List* partitionList, bool isPartition)
if (cell != NULL) {
Node* state = (Node*)lfirst(cell);
- if (IsA(state, RangePartitionDefState))
- check_partition_name_less_than(partitionList, isPartition);
- else
+ if (IsA(state, RangePartitionStartEndDefState)) {
check_partition_name_start_end(partitionList, isPartition);
+ } else {
+ check_partition_name_internal(partitionList, isPartition);
+ }
}
}
-List* GetPartitionNameList(List* partitionList)
+List* GetPartitionNameList(List *partitionList)
{
- ListCell* cell = NULL;
- ListCell* lc = NULL;
- List* subPartitionDefStateList = NIL;
- List* partitionNameList = NIL;
+ ListCell *cell = NULL;
+ ListCell *lc = NULL;
+ List *subPartitionDefStateList = NIL;
+ List *partitionNameList = NIL;
foreach (cell, partitionList) {
- if (IsA((Node*)lfirst(cell), RangePartitionDefState)) {
- RangePartitionDefState* partitionDefState = (RangePartitionDefState*)lfirst(cell);
- subPartitionDefStateList = partitionDefState->subPartitionDefState;
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- } else if (IsA((Node*)lfirst(cell), ListPartitionDefState)) {
- ListPartitionDefState* partitionDefState = (ListPartitionDefState*)lfirst(cell);
- subPartitionDefStateList = partitionDefState->subPartitionDefState;
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- } else {
- HashPartitionDefState* partitionDefState = (HashPartitionDefState*)lfirst(cell);
- subPartitionDefStateList = partitionDefState->subPartitionDefState;
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- }
-
+ PartitionDefState *partitionDefState = (PartitionDefState *)lfirst(cell);
+ subPartitionDefStateList = partitionDefState->subPartitionDefState;
+ partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
foreach (lc, subPartitionDefStateList) {
- if (IsA((Node *)lfirst(lc), RangePartitionDefState)) {
- RangePartitionDefState *partitionDefState = (RangePartitionDefState *)lfirst(lc);
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- } else if (IsA((Node *)lfirst(lc), ListPartitionDefState)) {
- ListPartitionDefState *partitionDefState = (ListPartitionDefState *)lfirst(lc);
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- } else {
- HashPartitionDefState *partitionDefState = (HashPartitionDefState *)lfirst(lc);
- partitionNameList = lappend(partitionNameList, partitionDefState->partitionName);
- }
+ PartitionDefState *subpartitionDefState = (PartitionDefState *)lfirst(lc);
+ partitionNameList = lappend(partitionNameList, subpartitionDefState->partitionName);
}
}
@@ -6050,6 +6277,23 @@ static void checkCGinBtreeIndexCompatible(IndexStmt* stmt)
}
}
}
+
+static Node* transformListPartitionRowExpr(ParseState* pstate, RowExpr* rowexpr)
+{
+ Node* con = NULL;
+ RowExpr* result = makeNode(RowExpr);
+
+ result->row_typeid = rowexpr->row_typeid;
+ result->row_format = rowexpr->row_format;
+ result->location = rowexpr->location;
+ result->colnames = NIL;
+
+ foreach_cell (cell, rowexpr->args) {
+ con = transformIntoConst(pstate, (Node*)lfirst(cell));
+ result->args = lappend(result->args, con);
+ }
+ return (Node*)result;
+}
List* transformListPartitionValue(ParseState* pstate, List* boundary, bool needCheck, bool needFree)
{
@@ -6061,13 +6305,18 @@ List* transformListPartitionValue(ParseState* pstate, List* boundary, bool needC
/* scan value of partition key of per partition */
foreach (valueCell, boundary) {
elem = (Node*)lfirst(valueCell);
+ if (IsA(elem, RowExpr)) { /* for multi-keys list partition boundary */
+ result = transformListPartitionRowExpr(pstate, (RowExpr*)elem);
+ newValueList = lappend(newValueList, result);
+ continue;
+ }
result = transformIntoConst(pstate, elem);
if (PointerIsValid(result) && needCheck && ((Const*)result)->constisnull && !((Const*)result)->ismaxvalue) {
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("Partition key value can not be null"),
errdetail("partition bound element must be one of: string, datetime or interval literal, number, "
- "or MAXVALUE, and not null")));
+ "or MAXVALUE(for range partition)/DEFAULT(for list partition), and not null")));
}
newValueList = lappend(newValueList, result);
}
@@ -6078,7 +6327,7 @@ List* transformListPartitionValue(ParseState* pstate, List* boundary, bool needC
return newValueList;
}
-void transformRangeSubPartitionValue(ParseState* pstate, List* subPartitionDefStateList)
+void transformSubPartitionValue(ParseState* pstate, List* subPartitionDefStateList)
{
if (subPartitionDefStateList == NIL) {
return;
@@ -6099,7 +6348,7 @@ void transformPartitionValue(ParseState* pstate, Node* rangePartDef, bool needCh
RangePartitionDefState* state = (RangePartitionDefState*)rangePartDef;
/* only one boundary need transform */
state->boundary = transformRangePartitionValueInternal(pstate, state->boundary, needCheck, true);
- transformRangeSubPartitionValue(pstate, state->subPartitionDefState);
+ transformSubPartitionValue(pstate, state->subPartitionDefState);
break;
}
case T_RangePartitionStartEndDefState: {
@@ -6114,14 +6363,14 @@ void transformPartitionValue(ParseState* pstate, Node* rangePartDef, bool needCh
case T_ListPartitionDefState: {
ListPartitionDefState* state = (ListPartitionDefState*)rangePartDef;
state->boundary = transformListPartitionValue(pstate, state->boundary, needCheck, true);
- transformRangeSubPartitionValue(pstate, state->subPartitionDefState);
+ transformSubPartitionValue(pstate, state->subPartitionDefState);
break;
}
case T_HashPartitionDefState: {
HashPartitionDefState* state = (HashPartitionDefState*)rangePartDef;
/* only one boundary need transform */
state->boundary = transformListPartitionValue(pstate, state->boundary, needCheck, true);
- transformRangeSubPartitionValue(pstate, state->subPartitionDefState);
+ transformSubPartitionValue(pstate, state->subPartitionDefState);
break;
}
@@ -6270,10 +6519,10 @@ Oid generateClonedIndex(Relation source_idx, Relation source_relation, char* tem
ret = DefineIndex(RelationGetRelid(source_relation),
index_stmt,
InvalidOid, /* no predefined OID */
- false, /* is_alter_table */
+ true, /* is_alter_table */
true, /* check_rights */
skip_build, /* skip_build */
- false); /* quiet */
+ true); /* quiet */
(void)pgstat_report_waitstatus(oldStatus);
/* clean up */
@@ -6570,11 +6819,11 @@ static Oid get_split_partition_oid(Relation partTableRel, SplitPartitionState* s
partMap = (RangePartitionMap*)partTableRel->partMap;
if (PointerIsValid(splitState->src_partition_name)) {
- srcPartOid = partitionNameGetPartitionOid(RelationGetRelid(partTableRel),
+ srcPartOid = PartitionNameGetPartitionOid(RelationGetRelid(partTableRel),
splitState->src_partition_name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
- true,
+ false,
false,
NULL,
NULL,
@@ -6583,9 +6832,8 @@ static Oid get_split_partition_oid(Relation partTableRel, SplitPartitionState* s
Assert(PointerIsValid(splitState->partition_for_values));
splitState->partition_for_values = transformConstIntoTargetType(
partTableRel->rd_att->attrs, partMap->partitionKey, splitState->partition_for_values);
- srcPartOid = partitionValuesGetPartitionOid(
- partTableRel, splitState->partition_for_values, AccessExclusiveLock, true, true, false);
- }
+ srcPartOid = PartitionValuesGetPartitionOid(
+ partTableRel, splitState->partition_for_values, AccessExclusiveLock, true, false, false); }
return srcPartOid;
}
@@ -8025,6 +8273,10 @@ static void CheckAutoIncrementIndex(CreateStmtContext *cxt)
}
if (!has_found) {
+ /* AUTO_INCREMENT will be checked in CheckRelAutoIncrementIndex after executing alter table. */
+ if (cxt->node != NULL && IsA(cxt->node, AlterTableStmt)) {
+ continue;
+ }
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
(errmsg("Incorrect table definition, auto_increment column must be defined as a key"))));
}
@@ -8050,26 +8302,6 @@ static int128 TransformAutoIncStart(CreateStmt* stmt)
return autoinc;
}
-static void TrySetAutoIncNotNullConstraint(ColumnDef* column)
-{
- Constraint* constraint = NULL;
- bool has_nullcons = false;
-
- foreach_cell (clist, column->constraints) {
- if (((Constraint*)lfirst(clist))->contype == CONSTR_NULL) {
- has_nullcons = true;
- break;
- }
- }
- /* If nullable constraint is specified, no need to set NOT-NULL constraint. */
- if (!has_nullcons) {
- constraint = makeNode(Constraint);
- constraint->contype = CONSTR_NOTNULL;
- constraint->location = -1;
- column->constraints = lappend(column->constraints, constraint);
- }
-}
-
static void TransformTempAutoIncrement(ColumnDef* column, CreateStmt* stmt)
{
int128 autoinc;
@@ -8093,6 +8325,157 @@ static void TransformTempAutoIncrement(ColumnDef* column, CreateStmt* stmt)
constraint->cooked_expr = NULL;
column->constraints = lappend(column->constraints, constraint);
column->raw_default = constraint->raw_expr;
-
- TrySetAutoIncNotNullConstraint(column);
+}
+
+/*
+ * semtc_generate_hash_partition_defs
+ * Generate a specified number of partition definitions.
+ * prefix_name: parent partition name for subpartition, used as a prefix for subpartition names to be generated
+ * part_count: number of partitions to be generated
+ */
+static List* semtc_generate_hash_partition_defs(CreateStmt* stmt, const char* prefix_name, int part_count)
+{
+ char namebuf[NAMEDATALEN] = {0};
+ List* result = NULL;
+ HashPartitionDefState *def = NULL;
+ A_Const *con = NULL;
+ uint32 prefix_len = prefix_name ? (uint32)strlen(prefix_name) : 0;
+ uint32 len;
+ errno_t rc = EOK;
+
+ for (int i = 0; i < part_count; i++) {
+ def = makeNode(HashPartitionDefState);
+ if (prefix_name) {
+ rc = snprintf_s(namebuf, sizeof(namebuf), sizeof(namebuf) - 1, "sp%d", i);
+ } else {
+ rc = snprintf_s(namebuf, sizeof(namebuf), sizeof(namebuf) - 1, "p%d", i);
+ }
+ securec_check_ss(rc, "", "");
+
+ len = (uint32)strlen(namebuf) + prefix_len;
+ def->partitionName = (char*)palloc0(len + 1);
+ if (prefix_name) {
+ rc = memcpy_s(def->partitionName, len + 1, prefix_name, prefix_len);
+ securec_check(rc, "\0", "\0");
+ }
+ rc = strncpy_s(def->partitionName + prefix_len, len + 1 - prefix_len, namebuf, len - prefix_len);
+ securec_check(rc, "\0", "\0");
+ if (len >= NAMEDATALEN) {
+ ereport(ERROR, (errcode(ERRCODE_NAME_TOO_LONG),
+ errmsg("identifier too long"),
+ errdetail("The %s name \"%s\" is too long",
+ prefix_name ? "subpartition" : "partition", def->partitionName)));
+ }
+ con = makeNode(A_Const);
+ con->val.type = T_Integer;
+ con->val.val.ival = i;
+ con->location = -1;
+ def->boundary = list_make1(con);
+ def->tablespacename = stmt->tablespacename;
+ def->subPartitionDefState = NULL;
+ result = lappend(result, def);
+ }
+ return result;
+}
+
+static void TransformModifyColumnDatatype(CreateStmtContext* cxt, AlterTableCmd* cmd)
+{
+ ColumnDef *def = (ColumnDef *)cmd->def;
+ bool new_set = false;
+ /* pre-alter column type is an set, should drop it after */
+ bool old_set = DropSetOwnedByTable(cxt, cmd->name);
+
+ if (def->typname && list_length(def->typname->names) == 1 && !def->typname->pct_type) {
+ char* tname = strVal(linitial(def->typname->names));
+ if (strcmp(tname, "set") == 0) {
+ if (old_set) {
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("can not alter column type to another set")));
+ } else { /* alter the column to a new set type, should create it before */
+ PrecheckColumnTypeForSet(cxt, def->typname);
+ new_set = true;
+ def->typname->typeOid = InvalidOid; // pg_attribute.atttypid
+ CreateSetOwnedByTable(cxt, def, def->colname);
+ }
+ } else if (strcmp(tname, "smallserial") == 0 || strcmp(tname, "serial2") == 0 || strcmp(tname, "serial") == 0 ||
+ strcmp(tname, "serial4") == 0 || strcmp(tname, "bigserial") == 0 || strcmp(tname, "serial8") == 0 ||
+ strcmp(tname, "largeserial") == 0 || strcmp(tname, "serial16") == 0) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("cannot modify or change column to type '%s'", tname)));
+ }
+ }
+
+ /* can NOT change column to an existed set data type */
+ Type tup = LookupTypeName(cxt->pstate, def->typname, NULL);
+ if (HeapTupleIsValid(tup)) {
+ Form_pg_type typform = (Form_pg_type)GETSTRUCT(tup);
+ if (typform->typtype == TYPTYPE_SET) {
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("can not use existed set type %s for column definition",
+ format_type_be(HeapTupleGetOid(tup)))));
+ }
+ ReleaseSysCache(tup);
+ }
+
+ /* Do necessary work on the column type declaration. But for set type,
+ * no need to check before because the type has not created yet.
+ */
+ if (!new_set && def->typname) {
+ transformColumnType(cxt, def);
+ }
+}
+
+static void DropModifyColumnAutoIncrement(CreateStmtContext* cxt, Relation rel, const char* colname)
+{
+ AttrNumber attnum = get_attnum(rel->rd_id, colname);
+ if (attnum <= 0 || attnum != RelAutoIncAttrNum(rel)) {
+ return;
+ }
+ char* seqname = get_rel_name(RelAutoIncSeqOid(rel));
+ if (!seqname) { /* shouldn't happen */
+ return;
+ }
+ DropStmt *drop = makeNode(DropStmt);
+ drop->removeType = OBJECT_LARGE_SEQUENCE;
+ drop->missing_ok = true;
+ drop->objects = list_make1(list_make1(makeString(seqname)));
+ drop->arguments = NIL;
+ drop->behavior = DROP_RESTRICT;
+ drop->concurrent = false;
+ drop->purge = false;
+
+ cxt->alist = lappend(cxt->alist, drop);
+}
+
+static void TransformModifyColumndef(CreateStmtContext* cxt, AlterTableCmd* cmd)
+{
+ ColumnDef *def = (ColumnDef *)cmd->def;
+ /* check constraints type */
+ checkConstraint(cxt, (Node*)def);
+ // check datatype
+ TransformModifyColumnDatatype(cxt, cmd);
+ // check attr constraints
+ transformConstraintAttrs(cxt, def->constraints);
+ cxt->columns = lappend(cxt->columns, (Node*)def);
+ TransformColumnDefinitionConstraints(cxt, def, false, true);
+ if (def->clientLogicColumnRef != NULL) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change column to encrypted column is not supported")));
+ }
+ // drop old auto_increment
+ DropModifyColumnAutoIncrement(cxt, cxt->rel, cmd->name);
+ /* for CHANGE column */
+ if (strcmp(cmd->name, def->colname) != 0) {
+ RenameStmt *rename = makeNode(RenameStmt);
+ rename->renameType = OBJECT_COLUMN;
+ rename->relationType = OBJECT_TABLE;
+ rename->relation = cxt->relation;
+ rename->subname = cmd->name;
+ rename->newname = def->colname;
+ rename->missing_ok = false;
+ cxt->blist = lappend(cxt->blist, rename);
+ }
}
diff --git a/src/common/backend/parser/scan.l b/src/common/backend/parser/scan.l
index 028616944..c6ca2e139 100755
--- a/src/common/backend/parser/scan.l
+++ b/src/common/backend/parser/scan.l
@@ -887,20 +887,21 @@ other .
char *slashstar = strstr(yytext, "/*");
char *dashdash = strstr(yytext, "--");
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format)
- {
- if(nchars > 3 && yytext[0] == '@' && yytext[1] == '`' && yytext[nchars-1] == '`')
- {
+ if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT &&
+ (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES))
+ {
+ if(nchars > 3 && yytext[0] == '@' && yytext[1] == '`' && yytext[nchars-1] == '`')
+ {
char *subtext = strstr(yytext + 2, "`");
- if(strlen(subtext) == 1)
+ if (strlen(subtext) == 1)
{
- SET_YYLLOC();
- yylval->str = pstrdup(yytext + 1);
- yyextra->is_hint_str = false;
- return SET_USER_IDENT;
+ SET_YYLLOC();
+ yylval->str = pstrdup(yytext + 1);
+ yyextra->is_hint_str = false;
+ return SET_USER_IDENT;
}
- }
- }
+ }
+ }
if (slashstar && dashdash)
{
@@ -1069,7 +1070,8 @@ other .
}
{set_identifier} {
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT &&
+ (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
char *set_ident;
SET_YYLLOC();
@@ -1171,7 +1173,8 @@ other .
{setUserIdentifier} {
SET_YYLLOC();
- if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT &&
+ (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
yylval->str = pstrdup(yytext + 1);
yyextra->is_hint_str = false;
return SET_USER_IDENT;
diff --git a/src/common/backend/pgxc_single/locator/redistrib.cpp b/src/common/backend/pgxc_single/locator/redistrib.cpp
index 3ac9db8d6..7f386ab03 100644
--- a/src/common/backend/pgxc_single/locator/redistrib.cpp
+++ b/src/common/backend/pgxc_single/locator/redistrib.cpp
@@ -876,7 +876,7 @@ void get_redis_rel_ctid(const char* rel_name, const char* partition_name, RedisC
if (partition_name != NULL) {
/* For partitioned table, */
- part_oid = partitionNameGetPartitionOid(
+ part_oid = PartitionNameGetPartitionOid(
relid, partition_name, PART_OBJ_TYPE_TABLE_PARTITION, NoLock, false, false, NULL, NULL, NoLock);
}
diff --git a/src/common/backend/pgxc_single/pool/execRemote.cpp b/src/common/backend/pgxc_single/pool/execRemote.cpp
index 8b88d5d05..4f633d615 100755
--- a/src/common/backend/pgxc_single/pool/execRemote.cpp
+++ b/src/common/backend/pgxc_single/pool/execRemote.cpp
@@ -9703,7 +9703,7 @@ static void ReceivePartitionPageAndTuple(Oid relid, TupleTableSlot* slot)
}
rel = relation_open(relid, ShareUpdateExclusiveLock);
- partitionid = partitionNameGetPartitionOid(
+ partitionid = PartitionNameGetPartitionOid(
relid, (const char*)partname->data, parttype, part_lock, true, false, NULL, NULL, NoLock);
if (!OidIsValid(partitionid)) {
diff --git a/src/common/backend/pgxc_single/pool/poolutils.cpp b/src/common/backend/pgxc_single/pool/poolutils.cpp
index ffa6f1515..fdcaaa0d2 100755
--- a/src/common/backend/pgxc_single/pool/poolutils.cpp
+++ b/src/common/backend/pgxc_single/pool/poolutils.cpp
@@ -40,6 +40,8 @@
#include "utils/memutils.h"
#include "utils/resowner.h"
#include "utils/elog.h"
+#include "pgstat.h"
+#include "utils/mem_snapshot.h"
/*
* pgxc_pool_check
@@ -612,3 +614,241 @@ void HandlePoolerReload(void)
}
#endif
+
+#define IS_COMM_NULL_STR(str) ((str) == NULL || (str)[0] == '\0')
+
+int gs_comm_check_valid_ip(char* input_ip)
+{
+ if (!input_ip) {
+ return 0;
+ }
+
+ char* ip_address = pstrdup(input_ip);
+ const int max_cidr_str_len = 3;
+ const int default_inet4_cidr = 32;
+ const int default_inet6_cidr = 128;
+
+ /* get the cidr/netmask if available */
+ char* tmp_cidr_str;
+ int cidr = -1;
+ tmp_cidr_str = strchr(ip_address, '/');
+ if (tmp_cidr_str) {
+ *tmp_cidr_str = '\0';
+ tmp_cidr_str++;
+ /* cidr */
+ if (strlen(tmp_cidr_str) <= max_cidr_str_len) {
+ cidr = atoi(tmp_cidr_str);
+ } else {
+ pfree_ext(ip_address);
+ return 0;
+ }
+ }
+
+ /* check ip */
+ struct addrinfo* addrs = NULL;
+ struct addrinfo hint;
+ errno_t rc = 0;
+ rc = memset_s(&hint, sizeof(hint), 0, sizeof(hint));
+ securec_check(rc, "", "");
+ hint.ai_flags = AI_NUMERICHOST;
+ if (getaddrinfo(ip_address, NULL, &hint, &addrs) != 0) {
+ pfree_ext(ip_address);
+ return 0;
+ }
+ if (addrs->ai_family == AF_INET) {
+ if (cidr < 0) {
+ cidr = default_inet4_cidr;
+ } else if (cidr > default_inet4_cidr) {
+ pfree_ext(ip_address);
+ return 0;
+ }
+ } else if (addrs->ai_family == AF_INET6) {
+ if (cidr < 0) {
+ cidr = default_inet6_cidr;
+ } else if (cidr > default_inet6_cidr) {
+ pfree_ext(ip_address);
+ return 0;
+ }
+ } else {
+ pfree_ext(ip_address);
+ return 0;
+ }
+
+ freeaddrinfo(addrs);
+ pfree_ext(ip_address);
+
+ /* abnormal: retun 0; normal: reutrn 1; unknown: return -1. */
+ return((cidr >= 0) ? 1 : -1);
+}
+
+static void validate_listen_ip(char *validate_ip, ReturnSetInfo *rsinfo)
+{
+ MemoryContext oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
+
+ TupleDesc tupdesc = CreateTemplateTupleDesc(2, false);
+ TupleDescInitEntry(tupdesc, (AttrNumber)1, "pid", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)2, "node_name", TEXTOID, -1, 0);
+
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem);
+ rsinfo->setDesc = BlessTupleDesc(tupdesc);
+
+ /* check ip validation */
+ if (IS_COMM_NULL_STR(validate_ip) || gs_comm_check_valid_ip(validate_ip) == 0) {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("funcs para not support invalid validate_ip.")));
+ }
+
+ /* If BackendStatusArray is NULL, we will get it from other thread */
+ if (t_thrd.shemem_ptr_cxt.BackendStatusArray == NULL) {
+ if (PgBackendStatusArray != NULL) {
+ t_thrd.shemem_ptr_cxt.BackendStatusArray = PgBackendStatusArray;
+ } else {
+ return;
+ }
+ }
+
+ ThreadId *validate_pids = (ThreadId*)palloc0(sizeof(ThreadId) * BackendStatusArray_size);
+ bool *is_threadpool_worker = (bool*)palloc0(sizeof(bool) * BackendStatusArray_size);
+
+ /* Get all status entries, which procpid or sessionid is valid */
+ uint32 numBackends = 0;
+ PgBackendStatusNode* node = gs_stat_read_current_status(&numBackends);
+ /* If all entries procpid or sessionid are invalid, get numBackends is 0 and should return directly */
+ if (numBackends == 0) {
+ gs_stat_free_stat_node(node);
+ pfree_ext(validate_pids);
+ pfree_ext(is_threadpool_worker);
+ return;
+ }
+
+ PgBackendStatusNode* temp = node;
+ int num = 0;
+ while (temp != NULL) {
+ PgBackendStatus* beentry = temp->data;
+ temp = temp->next;
+
+ /* If the backend thread is valid and application name of beentry equals to appName, record this thread pid */
+ if (beentry == NULL) {
+ continue;
+ }
+
+ int sock = beentry->remote_info.socket;
+ if (sock <= 0) {
+ continue;
+ }
+
+ struct sockaddr saddr;
+ if (!get_addr_from_socket(sock, &saddr)) {
+ continue;
+ }
+ char sock_ip[IP_LEN] = {0};
+ int port = 0;
+
+ int family = get_ip_port_from_addr(sock_ip, &port, saddr);
+ if (family == -1 || family == AF_UNIX) {
+ continue;
+ }
+
+ if (strcmp(sock_ip, validate_ip) == 0) {
+ ThreadId tid = beentry->st_procpid;
+ uint64 sid = beentry->st_sessionid;
+ if (tid == sid) {
+ if (tid <= 0) {
+ continue;
+ }
+
+ validate_pids[num] = tid;
+ is_threadpool_worker[num] = false;
+ num++;
+ } else if (ENABLE_THREAD_POOL) {
+ if (sid <= 0) {
+ continue;
+ }
+
+ validate_pids[num] = sid;
+ is_threadpool_worker[num] = true;
+ num++;
+ }
+ }
+ }
+ gs_stat_free_stat_node(node);
+
+ for (int i = 0; i < num; i++) {
+ if (is_threadpool_worker[i]) {
+ ThreadPoolSessControl *sess_ctrl = g_threadPoolControler->GetSessionCtrl();
+ int ctrl_idx = sess_ctrl->FindCtrlIdxBySessId(validate_pids[i]);
+ (void)sess_ctrl->SendSignal((int)ctrl_idx, SIGTERM);
+ ereport(LOG, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("Success to send SIGTERM to session:%lu", validate_pids[i])));
+ } else {
+ (void)kill_backend(validate_pids[i], false);
+ ereport(LOG, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("Success to send SIGTERM to thread:%lu", validate_pids[i])));
+ }
+ }
+
+ pfree_ext(validate_pids);
+ pfree_ext(is_threadpool_worker);
+ MemoryContextSwitchTo(oldcontext);
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(rsinfo->setResult);
+
+ return;
+}
+
+Datum gs_validate_ext_listen_ip(PG_FUNCTION_ARGS)
+{
+ if (!superuser() && !isMonitoradmin(GetUserId())) {
+ ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be system admin or operator admin to execute gs_validate_ext_listen_ip"))));
+ }
+
+ if (IsTransactionBlock()) {
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("gs_validate_ext_listen_ip cannot run inside a transaction block")));
+ }
+
+ char* clear = PG_GETARG_CSTRING(0);
+ char* cur_node_name = PG_GETARG_CSTRING(1);
+ char* validate_ip = PG_GETARG_CSTRING(2);
+ bool is_clear = false;
+ bool is_clear_listen_addresses = false;
+
+ /* check clear validation */
+ if (IS_COMM_NULL_STR(cur_node_name)) {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("funcs para not support invalid clear option")));
+ } else if (strcmp(clear, "on") == 0) {
+ is_clear = true;
+ } else if (strcmp(clear, "off") == 0) {
+ is_clear = false;
+ } else if (strcmp(clear, "normal") == 0) {
+ is_clear_listen_addresses = true;
+ } else {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("funcs para not support invalid clear option")));
+ }
+
+ /* check node_name validation */
+ if (IS_COMM_NULL_STR(cur_node_name) || strcmp(cur_node_name, g_instance.attr.attr_common.PGXCNodeName) != 0) {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("funcs para not support invalid node name.")));
+ }
+
+ if (!is_clear_listen_addresses) {
+ ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("not support to execute gs_validate_ext_listen_ip"))));
+ } else {
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ validate_listen_ip(validate_ip, rsinfo);
+ return (Datum)0;
+ }
+
+ return (Datum)0;
+}
\ No newline at end of file
diff --git a/src/common/backend/utils/adt/dbsize.cpp b/src/common/backend/utils/adt/dbsize.cpp
index d7fee808a..48defd20e 100644
--- a/src/common/backend/utils/adt/dbsize.cpp
+++ b/src/common/backend/utils/adt/dbsize.cpp
@@ -1463,11 +1463,11 @@ Datum pg_partition_size_name(PG_FUNCTION_ARGS)
names = stringToQualifiedNameList(partTableName);
partTableOid = RangeVarGetRelid(makeRangeVarFromNameList(names), NoLock, false);
- partOid = partitionNameGetPartitionOid(
+ partOid = PartitionNameGetPartitionOid(
partTableOid, partName, PART_OBJ_TYPE_TABLE_PARTITION, NoLock, true, false, NULL, NULL, NoLock);
if (!OidIsValid(partOid)) {
- partOid = partitionNameGetPartitionOid(partTableOid, partName, PART_OBJ_TYPE_TABLE_SUB_PARTITION, NoLock, true,
+ partOid = SubPartitionNameGetSubPartitionOid(partTableOid, partName, NoLock, NoLock, true,
false, NULL, NULL, NoLock, &subparentOid);
}
@@ -1587,11 +1587,11 @@ Datum pg_partition_indexes_size_name(PG_FUNCTION_ARGS)
names = stringToQualifiedNameList(partTableName);
partTableOid = RangeVarGetRelid(makeRangeVarFromNameList(names), NoLock, false);
- partOid = partitionNameGetPartitionOid(
+ partOid = PartitionNameGetPartitionOid(
partTableOid, partName, PART_OBJ_TYPE_TABLE_PARTITION, NoLock, true, false, NULL, NULL, NoLock);
if (!OidIsValid(partOid)) {
- partOid = partitionNameGetPartitionOid(partTableOid, partName, PART_OBJ_TYPE_TABLE_SUB_PARTITION, NoLock, true,
+ partOid = SubPartitionNameGetSubPartitionOid(partTableOid, partName, NoLock, NoLock, true,
false, NULL, NULL, NoLock, &subparentOid);
}
diff --git a/src/common/backend/utils/adt/expr_distinct.cpp b/src/common/backend/utils/adt/expr_distinct.cpp
index c236f2f03..497327042 100644
--- a/src/common/backend/utils/adt/expr_distinct.cpp
+++ b/src/common/backend/utils/adt/expr_distinct.cpp
@@ -70,61 +70,8 @@ static void GetExprNumDistinctWalker(PlannerInfo *root, VariableStatData *varDat
static void TransferFunctionNumDistinct(PlannerInfo *root, VariableStatData *varData, FuncExpr *funcExpr,
bool isJoinVar);
static bool CheckFuncArgsForTransferNumDistinct(FuncExpr *funcExpr, Node **varNode);
-static bool IsFunctionTransferNumDistinct(FuncExpr *funcExpr);
-/*
- * The array collects all of the type-cast functions which can transfer number of distinct from any one of
- * its arguments, other parameters are viewed as Const.
- */
-static Oid g_typeCastFuncOids[] = {
- /* type cast from bool */
- BOOLTOINT1FUNCOID, BOOLTOINT2FUNCOID, BOOLTOINT4FUNCOID, BOOLTOINT8FUNCOID, BOOLTOTEXTFUNCOID,
-
- /* type cast from int1 */
- I1TOI2FUNCOID, I1TOI4FUNCOID, I1TOI8FUNCOID, I1TOF4FUNCOID, I1TOF8FUNCOID, INT1TOBPCHARFUNCOID,
- INT1TOVARCHARFUNCOID, INT1TONVARCHAR2FUNCOID, INT1TOTEXTFUNCOID, INT1TONUMERICFUNCOID, INT1TOINTERVALFUNCOID,
-
- /* type cast from int2 */
- I2TOI1FUNCOID, INT2TOINT4FUNCOID, INT2TOFLOAT4FUNCOID, INT2TOFLOAT8FUNCOID, INT2TOBPCHAR, INT2TOTEXTFUNCOID,
- INT2TOVARCHARFUNCOID, INT2TOINT8FUNCOID, INT2TONUMERICFUNCOID, INT2TOINTERVALFUNCOID,
-
- /* type cast from int4 */
- I4TOI1FUNCOID, INT4TOINT2FUNCOID, INT4TOINT8FUNCOID, INT4TOFLOAT8FUNCOID, INTEGER2CASHFUNCOID,
- INT4TONUMERICFUNCOID, INT4TOINTERVALFUNCOID, INT4TOHEXFUNCOID, INT4TOBPCHARFUNCOID, INT4TOTEXTFUNCOID,
- INT4TOVARCHARFUNCOID, INT4TOCHARFUNCOID, INT4TOCHRFUNCOID,
-
- /* type cast from int8 */
- I8TOI1FUNCOID, INT8TOINT2FUNCOID, INT8TOINT4FUNCOID, INT8TOBPCHARFUNCOID, INT8TOTEXTFUNCOID, INT8TOVARCHARFUNCOID,
- INT8TONUMERICFUNCOID, INT8TOHEXFUNCOID,
-
- /* type cast from float4/float8 */
- FLOAT4TOBPCHARFUNCOID, FLOAT4TOTEXTFUNCOID, FLOAT4TOVARCHARFUNCOID, FLOAT4TOFLOAT8FUNCOID,
- FLOAT4TONUMERICFUNCOID, FLOAT8TOBPCHARFUNCOID, FLOAT8TOINTERVALFUNCOID, FLOAT8TOTEXTFUNCOID,
- FLOAT8TOVARCHARFUNCOID, FLOAT8TONUMERICFUNCOID, FLOAT8TOTIMESTAMPFUNCOID,
-
- /* type cast from numeric */
- NUMERICTOBPCHARFUNCOID, NUMERICTOTEXTFUNCOID, NUMERICTOVARCHARFUNCOID,
-
- /* type cast from timestamp/date/time */
- DEFAULTFORMATTIMESTAMP2CHARFUNCOID, DEFAULTFORMATTIMESTAMPTZ2CHARFUNCOID, TIMESATMPTOTEXTFUNCOID,
- TIMESTAMPTOVARCHARFUNCOID, TIMESTAMP2TIMESTAMPTZFUNCOID, TIMESTAMPTZ2TIMESTAMPFUNCOID,
- DATETIMESTAMPTZFUNCOID, DATETOTIMESTAMPFUNCOID, DATETOBPCHARFUNCOID, DATETOVARCHARFUNCOID, DATETOTEXTFUNCOID,
- DATEANDTIMETOTIMESTAMPFUNCOID, DTAETIME2TIMESTAMPTZFUNCOID, TIMETOINTERVALFUNCOID, TIMESTAMPZONETOTEXTFUNCOID,
- TIME2TIMETZFUNCOID, RELTIMETOINTERVALFUNCOID,
-
- /* type cast from text */
- TODATEDEFAULTFUNCOID, TODATEFUNCOID, TOTIMESTAMPFUNCOID, TOTIMESTAMPDEFAULTFUNCOID,
- TEXTTOREGCLASSFUNCOID, TEXTTOINT1FUNCOID, TEXTTOINT2FUNCOID, TEXTTOINT4FUNCOID, TEXTTOINT8FUNCOID,
- TEXTTONUMERICFUNCOID, TEXTTOTIMESTAMP, TIMESTAMPTONEWTIMEZONEFUNCOID, TIMESTAMPTZTONEWTIMEZONEFUNCOID,
- HEXTORAWFUNCOID,
-
- /* type cast from char/varchar/bpchar */
- VARCHARTONUMERICFUNCOID, VARCHARTOINT4FUNCOID, VARCHARTOINT8FUNCOID, VARCHARTOTIMESTAMPFUNCOID,
- BPCHARTOINT4FUNCOID, BPCHARTOINT8FUNCOID, BPCHARTONUMERICFUNCOID, BPCHARTOTIMESTAMPFUNCOID,
- RTRIM1FUNCOID, BPCHARTEXTFUNCOID, CHARTOBPCHARFUNCOID, CHARTOTEXTFUNCOID
-};
-
static char *GetFunctionNameWithDefault(Oid fnOid)
{
char *funcName = get_func_name(fnOid);
@@ -387,7 +334,7 @@ static bool CheckFuncArgsForTransferNumDistinct(FuncExpr *funcExpr, Node **varNo
/*
* check if the function can transfer number of distinct from one of its parameters
*/
-static bool IsFunctionTransferNumDistinct(FuncExpr *funcExpr)
+bool IsFunctionTransferNumDistinct(FuncExpr *funcExpr)
{
/*
* We explicitly allow or disallow functions to transfer number of distinct.
diff --git a/src/common/backend/utils/adt/genfile.cpp b/src/common/backend/utils/adt/genfile.cpp
index 44a437a16..5793e10b5 100644
--- a/src/common/backend/utils/adt/genfile.cpp
+++ b/src/common/backend/utils/adt/genfile.cpp
@@ -985,6 +985,16 @@ Datum compress_statistic_info(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
+void item_state_free(CompressAddressItemState *itemState)
+{
+ if (itemState->rbStruct.header != NULL) {
+ MmapFree(&itemState->rbStruct);
+ itemState->rbStruct.header = NULL;
+ }
+ (void)FreeFile(itemState->compressedFd);
+ itemState->compressedFd = NULL;
+}
+
Datum pg_read_binary_file_blocks(PG_FUNCTION_ARGS)
{
int32 startBlockNum = PG_GETARG_INT32(1);
@@ -1023,6 +1033,11 @@ Datum pg_read_binary_file_blocks(PG_FUNCTION_ARGS)
CfsReadStruct cfsReadStruct{itemState->compressedFd, itemState->rbStruct.header, extentCount};
len = CfsReadCompressedPage(VARDATA(buf), BLCKSZ, itemState->blkno % CFS_LOGIC_BLOCKS_PER_EXTENT,
&cfsReadStruct, CFS_LOGIC_BLOCKS_PER_FILE * itemState->segmentNo + itemState->blkno);
+ if (len > MIN_COMPRESS_ERROR_RT) {
+ item_state_free(itemState);
+ ereport(ERROR, (ERRCODE_INVALID_PARAMETER_VALUE,
+ errmsg("can not read actual block %u, error code: %lu,", itemState->blkno, len)));
+ }
}
SET_VARSIZE(buf, len + VARHDRSZ);
Datum values[6];
@@ -1041,10 +1056,7 @@ Datum pg_read_binary_file_blocks(PG_FUNCTION_ARGS)
itemState->blkno++;
SRF_RETURN_NEXT(fctx, result);
} else {
- if (itemState->rbStruct.header != NULL) {
- MmapFree(&itemState->rbStruct);
- }
- (void)FreeFile(itemState->compressedFd);
+ item_state_free(itemState);
SRF_RETURN_DONE(fctx);
}
}
diff --git a/src/common/backend/utils/adt/like.cpp b/src/common/backend/utils/adt/like.cpp
index 33733df41..32be6bd64 100644
--- a/src/common/backend/utils/adt/like.cpp
+++ b/src/common/backend/utils/adt/like.cpp
@@ -25,6 +25,7 @@
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/pg_locale.h"
+#include "catalog/gs_utf8_collation.h"
static int SB_MatchText(char* t, int tlen, char* p, int plen, pg_locale_t locale, bool locale_is_c);
static text* SB_do_like_escape(text*, text*);
@@ -153,6 +154,15 @@ int GenericMatchText(char* s, int slen, char* p, int plen)
return MB_MatchText(s, slen, p, plen, 0, true);
}
+int generic_match_text_with_collation(char* s, int slen, char* p, int plen, Oid collation)
+{
+ if (collation == UTF8MB4_GENERAL_CI_COLLATION_OID || collation == UTF8MB4_UNICODE_CI_COLLATION_OID) {
+ return matchtext_utf8mb4((unsigned char*)s, slen, (unsigned char*)p, plen);
+ }
+
+ return GenericMatchText(s, slen, p, plen);
+}
+
static inline int Generic_Text_IC_like(text* str, text* pat, Oid collation)
{
char *s = NULL, *p = NULL;
@@ -263,7 +273,7 @@ Datum textlike(PG_FUNCTION_ARGS)
p = VARDATA_ANY(pat);
plen = VARSIZE_ANY_EXHDR(pat);
- result = (GenericMatchText(s, slen, p, plen) == LIKE_TRUE);
+ result = (generic_match_text_with_collation(s, slen, p, plen, PG_GET_COLLATION()) == LIKE_TRUE);
PG_RETURN_BOOL(result);
}
@@ -283,7 +293,7 @@ Datum textnlike(PG_FUNCTION_ARGS)
p = VARDATA_ANY(pat);
plen = VARSIZE_ANY_EXHDR(pat);
- result = (GenericMatchText(s, slen, p, plen) != LIKE_TRUE);
+ result = (generic_match_text_with_collation(s, slen, p, plen, PG_GET_COLLATION()) != LIKE_TRUE);
PG_RETURN_BOOL(result);
}
diff --git a/src/common/backend/utils/adt/pgstatfuncs.cpp b/src/common/backend/utils/adt/pgstatfuncs.cpp
index 03012ef0f..3fd13186b 100644
--- a/src/common/backend/utils/adt/pgstatfuncs.cpp
+++ b/src/common/backend/utils/adt/pgstatfuncs.cpp
@@ -395,7 +395,9 @@ static const char* WaitStateDesc[] = {
"wait sync consumer next step", // STATE_WAIT_SYNC_CONSUMER_NEXT_STEP
"wait sync producer next step", // STATE_WAIT_SYNC_PRODUCER_NEXT_STEP
"gtm set consistency point", // STATE_GTM_SET_CONSISTENCY_POINT
- "wait sync bgworkers" // STATE_WAIT_SYNC_BGWORKERS
+ "wait sync bgworkers", // STATE_WAIT_SYNC_BGWORKERS
+ "stanby read recovery conflict", // STATE_STANDBY_READ_RECOVERY_CONFLICT
+ "standby get snapshot" // STATE_STANDBY_GET_SNAPSHOT
};
// description for WaitStatePhase enums.
@@ -3931,10 +3933,10 @@ void get_network_info(char** node_host, int* node_port)
*node_host = get_pgxc_nodehost(node_oid);
*node_port = get_pgxc_nodeport(node_oid);
#else
- if (strcmp("*", g_instance.attr.attr_network.ListenAddresses) == 0) {
+ if (strcmp("*", u_sess->attr.attr_network.ListenAddresses) == 0) {
*node_host = "localhost";
} else {
- *node_host = g_instance.attr.attr_network.ListenAddresses;
+ *node_host = u_sess->attr.attr_network.ListenAddresses;
}
*node_port = g_instance.attr.attr_network.PostPortNumber;
#endif
diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp
index 957e07cf1..dc2f6825e 100644
--- a/src/common/backend/utils/adt/ruleutils.cpp
+++ b/src/common/backend/utils/adt/ruleutils.cpp
@@ -76,6 +76,7 @@
#include "parser/parser.h"
#include "parser/parsetree.h"
#include "parser/parse_expr.h"
+#include "parser/parse_utilcmd.h"
#ifdef PGXC
#include "pgxc/pgxc.h"
#include "optimizer/pgxcplan.h"
@@ -100,6 +101,7 @@
#include "db4ai/gd.h"
#include "commands/sqladvisor.h"
#include "commands/sequence.h"
+#include "client_logic/client_logic.h"
/* ----------
* Pretty formatting constants
@@ -214,6 +216,7 @@ typedef struct tableInfo {
AttrNumber autoinc_attnum;
Oid autoinc_consoid;
Oid autoinc_seqoid;
+ Oid collate;
} tableInfo;
typedef struct SubpartitionInfo {
@@ -260,14 +263,6 @@ static void push_ancestor_plan(deparse_namespace* dpns, ListCell* ancestor_cell,
static void pop_ancestor_plan(deparse_namespace* dpns, deparse_namespace* save_dpns);
static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, int prettyFlags);
static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, int prettyFlags, int wrapColumn);
-static void get_query_def(Query* query, StringInfo buf, List* parentnamespace, TupleDesc resultDesc, int prettyFlags,
- int wrapColumn, int startIndent
-#ifdef PGXC
- ,
- bool finalise_aggregates, bool sortgroup_colno, void* parserArg = NULL
-#endif /* PGXC */
- ,
- bool qrw_phase = false, bool viewdef = false, bool is_fqs = false);
static void get_values_def(List* values_lists, deparse_context* context);
static void get_with_clause(Query* query, deparse_context* context);
static void get_select_query_def(Query* query, deparse_context* context, TupleDesc resultDesc);
@@ -618,6 +613,11 @@ char* pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn)
return buf.data;
}
+char* pg_get_viewdef_string(Oid viewid)
+{
+ return pg_get_viewdef_worker(viewid, 0, -1);
+}
+
/*
* @Description: if the type is a string type
* @in typid - type oid
@@ -1384,23 +1384,63 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl
{
appendStringInfo(buf, "\n( ");
- /* we only support single partition key for list partition table */
- Assert(partkeynum == 1);
-
/* get table partitions info */
StringInfo query = makeStringInfo();
- appendStringInfo(query,
- "SELECT /*+ hashjoin(p t) */p.relname AS partname, "
- "array_to_string(p.boundaries, ',') as partbound, "
- "array_to_string(p.boundaries, ''',''') as partboundstr, "
- "p.oid AS partoid, "
- "t.spcname AS reltblspc "
- "FROM pg_partition p LEFT JOIN pg_tablespace t "
- "ON p.reltablespace = t.oid "
- "WHERE p.parentid = %u AND p.parttype = '%c' "
- "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC",
- tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, get_typename(*iPartboundary));
-
+ if (partkeynum == 1) {
+ appendStringInfo(query,
+ "SELECT /*+ hashjoin(p t) */p.relname AS partname, "
+ "array_to_string(p.boundaries, ',') as partbound, "
+ "array_to_string(p.boundaries, ''',''') as partboundstr, "
+ "p.oid AS partoid, "
+ "t.spcname AS reltblspc "
+ "FROM pg_partition p LEFT JOIN pg_tablespace t "
+ "ON p.reltablespace = t.oid "
+ "WHERE p.parentid = %u AND p.parttype = '%c' "
+ "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC",
+ tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, get_typename(*iPartboundary));
+ } else {
+ appendStringInfo(query,
+ "SELECT /*+ hashjoin(p t) */p.relname AS partname, "
+ "p.bound_def AS partbound, "
+ "p.oid AS partoid, "
+ "t.spcname AS reltblspc FROM ( "
+ "SELECT oid, relname, reltablespace, pg_catalog.string_agg(bound,',' ORDER BY bound_id) AS bound_def FROM( "
+ "SELECT oid, relname, reltablespace, bound_id, '('||"
+ "pg_catalog.array_to_string(pg_catalog.array_agg(key_value ORDER BY key_id), ',', 'NULL')||')' AS bound "
+ "FROM ( SELECT oid, relname, reltablespace, bound_id, key_id, ");
+ int cnt = 0;
+ for (int i = 0; i < partkeynum; i++) {
+ if (!isTypeString(iPartboundary[i])) {
+ continue;
+ }
+ if (cnt > 0) {
+ appendStringInfo(query, ",");
+ } else {
+ appendStringInfo(query, "CASE WHEN key_id in (");
+ }
+ appendStringInfo(query, "%d", i + 1);
+ cnt++;
+ }
+ if (cnt > 0) {
+ appendStringInfo(query, ") THEN pg_catalog.quote_literal(key_value) ELSE key_value END AS ");
+ }
+ appendStringInfo(query,
+ "key_value FROM ( "
+ "SELECT oid, relname, reltablespace, bound_id, pg_catalog.generate_subscripts(keys_array, 1) AS key_id, "
+ "pg_catalog.unnest(keys_array)::text AS key_value FROM ( "
+ "SELECT oid, relname, reltablespace, bound_id,key_bounds::cstring[] AS keys_array FROM ( "
+ "SELECT oid, relname, reltablespace, pg_catalog.unnest(boundaries) AS key_bounds, "
+ "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_partition "
+ "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c')))) "
+ "GROUP BY oid, relname, reltablespace, bound_id) "
+ "GROUP BY oid, relname, reltablespace "
+ "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_partition "
+ "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p "
+ "LEFT JOIN pg_tablespace t ON p.reltablespace = t.oid "
+ "ORDER BY p.bound_def ASC",
+ tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST,
+ tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST);
+ }
(void)SPI_execute(query->data, true, INT_MAX);
int proc = SPI_processed;
SPITupleTable *spitup = SPI_tuptable;
@@ -1416,7 +1456,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl
char *pvalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partbound"));
if (pvalue == NULL || strlen(pvalue) == 0) {
appendStringInfo(buf, "DEFAULT");
- } else if (isTypeString(*iPartboundary)) {
+ } else if (partkeynum == 1 && isTypeString(*iPartboundary)) {
char *svalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partboundstr"));
appendStringInfo(buf, "'%s'", svalue);
pfree_ext(svalue);
@@ -1608,18 +1648,24 @@ static int get_table_attribute(
/* Compression mode */
get_compression_mode(att_tup, buf);
- /* Add collation if not default for the type */
- if (OidIsValid(att_tup->attcollation)) {
- if (att_tup->attcollation != get_typcollation(att_tup->atttypid)) {
- /* always schema-qualify, don't try to be smart */
- char* collname = get_collation_name(att_tup->attcollation);
+ /* Add charset and collation if not default for the type */
+ if (OidIsValid(att_tup->attcollation) && att_tup->attcollation != get_typcollation(att_tup->atttypid)) {
+ /* always schema-qualify, don't try to be smart */
+ char* collname = get_collation_name(att_tup->attcollation);
+ int charset = get_charset_by_collation(att_tup->attcollation);
+ if (DB_IS_CMPT(B_FORMAT) && charset != PG_INVALID_ENCODING) {
+ appendStringInfo(
+ buf, " CHARACTER SET %s", quote_identifier(pg_encoding_to_char(charset)));
+ appendStringInfo(
+ buf, " COLLATE %s", quote_identifier(collname));
+ } else {
Oid namespace_oid = get_collation_namespace(att_tup->attcollation);
char* namespace_name = get_namespace_name(namespace_oid);
appendStringInfo(
buf, " COLLATE %s.%s", quote_identifier(namespace_name), quote_identifier(collname));
- pfree_ext(collname);
pfree_ext(namespace_name);
}
+ pfree_ext(collname);
}
if (formatter != NULL) {
@@ -2247,6 +2293,14 @@ static void append_table_autoinc_clause(StringInfo buf, Oid seqoid)
appendStringInfo(buf, " AUTO_INCREMENT = %s", strbuf);
}
+static void append_table_charset_collate_clause(StringInfo buf, Oid collate)
+{
+ const char* coll_name = get_collation_name(collate);
+ const char* charset_name = pg_encoding_to_char(get_charset_by_collation(collate));
+
+ appendStringInfo(buf, "\nCHARACTER SET = \"%s\" COLLATE = \"%s\"", charset_name, coll_name);
+}
+
/*
* @Description: append table's info.
* @in tableinfo - parent table info.
@@ -2270,6 +2324,10 @@ static bool append_table_info(tableInfo tableinfo, const char* srvname, StringIn
append_table_autoinc_clause(buf, tableinfo.autoinc_seqoid);
}
+ if (OidIsValid(tableinfo.collate)) {
+ append_table_charset_collate_clause(buf, tableinfo.collate);
+ }
+
if (tableinfo.relkind == RELKIND_FOREIGN_TABLE || tableinfo.relkind == RELKIND_STREAM)
appendStringInfo(buf, "\nSERVER %s", quote_identifier(srvname));
@@ -2576,6 +2634,39 @@ static inline bool IsTableVisible(Oid tableoid)
return OidIsValid(oid);
}
+/*
+ * @Description: in B-format, remove the collate attribute from reloptions and
+ * add collate information to tableinfo.
+ * @in reloptions - old reloptions.
+ * @in tableinfo - table information.
+ * @return - new reloptions.
+ */
+static Datum transform_reloptions_collate(Datum reloptions, tableInfo* tableinfo)
+{
+ List* options = NULL;
+ ListCell* lcell = NULL;
+ DefElem* opt = NULL;
+
+ tableinfo->collate = InvalidOid;
+ if (!DB_IS_CMPT(B_FORMAT)) {
+ return reloptions;
+ }
+
+ options = untransformRelOptions(reloptions);
+ foreach(lcell, options) {
+ opt = (DefElem*)lfirst(lcell);
+ if (0 == strncmp(opt->defname, "collate", strlen("collate"))) {
+ tableinfo->collate = pg_strtoint32(strVal(opt->arg));
+ options = list_delete_ptr(options, opt);
+ break;
+ }
+ }
+ reloptions = transformRelOptions((Datum)0, options, NULL, NULL, false, false);
+ list_free_deep(options);
+
+ return reloptions;
+}
+
/*
* @Description: get table's defination by table oid.
* @in tableoid - table oid.
@@ -2642,8 +2733,10 @@ static char* pg_get_tabledef_worker(Oid tableoid)
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Not a ordinary table or foreign table.")));
}
+ tableinfo.collate = 0;
Datum reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull);
if (isnull == false) {
+ reloptions = transform_reloptions_collate(reloptions, &tableinfo);
Datum sep = CStringGetTextDatum(", ");
Datum txt = OidFunctionCall2(F_ARRAY_TO_TEXT, reloptions, sep);
tableinfo.reloptions = TextDatumGetCString(txt);
@@ -2909,6 +3002,11 @@ Datum pg_get_triggerdef_ext(PG_FUNCTION_ARGS)
PG_RETURN_TEXT_P(string_to_text(pg_get_triggerdef_worker(trigid, pretty)));
}
+char* pg_get_triggerdef_string(Oid trigid)
+{
+ return pg_get_triggerdef_worker(trigid, false);
+}
+
static char* pg_get_triggerdef_worker(Oid trigid, bool pretty)
{
HeapTuple ht_trig;
@@ -4321,7 +4419,7 @@ static inline bool IsFunctionVisible(Oid funcoid)
StringInfoData query;
initStringInfo(&query);
appendStringInfo(&query, "select pg_namespace.oid from pg_catalog.pg_namespace where pg_namespace.oid in "
- "(select pronamespace from pg_proc where pg_proc.oid = %u);", funcoid);
+ "(select pronamespace from pg_catalog.pg_proc where pg_proc.oid = %u);", funcoid);
Oid oid = SearchSysTable(query.data);
pfree_ext(query.data);
return OidIsValid(oid);
@@ -5761,7 +5859,7 @@ void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool fin
* the view represented by a SELECT query.
* ----------
*/
-static void get_query_def(Query* query, StringInfo buf, List* parentnamespace, TupleDesc resultDesc, int prettyFlags,
+void get_query_def(Query* query, StringInfo buf, List* parentnamespace, TupleDesc resultDesc, int prettyFlags,
int wrapColumn, int startIndent
#ifdef PGXC
,
@@ -7903,15 +8001,23 @@ static void get_utility_query_def(Query* query, deparse_context* context)
/* if the column is encrypted, we should convert its data type */
if (coldef_enc != NULL && coldef_enc->dest_typname != NULL) {
+ /* get typename from the oid */
+ appendStringInfo(buf,
+ "%s %s ENCRYPTED WITH (DATATYPE_CL=%s,COLUMN_ENCRYPTION_KEY=%s, ENCRYPTION_TYPE = %s)",
+ quote_identifier(coldef->colname),
+ format_type_with_typemod(tpname->typeOid, tpname->typemod),
+ get_typename_by_id(coldef_enc->dest_typname->typeOid),
+ NameListToString(coldef_enc->column_key_name),
+ get_encryption_type_name(coldef_enc->columnEncryptionAlgorithmType));
tpname = coldef_enc->dest_typname;
+ } else {
+ /* get typename from the oid */
+ appendStringInfo(buf,
+ "%s %s",
+ quote_identifier(coldef->colname),
+ format_type_with_typemod(tpname->typeOid, tpname->typemod));
}
- /* get typename from the oid */
- appendStringInfo(buf,
- "%s %s",
- quote_identifier(coldef->colname),
- format_type_with_typemod(tpname->typeOid, tpname->typemod));
-
// add the compress mode for this column
switch (coldef->cmprs_mode) {
case ATT_CMPR_NOCOMPRESS:
diff --git a/src/common/backend/utils/adt/selfuncs.cpp b/src/common/backend/utils/adt/selfuncs.cpp
index c4c197d11..80786c757 100755
--- a/src/common/backend/utils/adt/selfuncs.cpp
+++ b/src/common/backend/utils/adt/selfuncs.cpp
@@ -156,6 +156,7 @@
#include "utils/typcache.h"
#include "utils/memutils.h"
#include "optimizer/gplanmgr.h"
+#include "instruments/instr_statement.h"
#ifdef PGXC
#include "pgxc/pgxc.h"
@@ -4796,19 +4797,6 @@ void examine_variable(PlannerInfo* root, Node* node, int varRelid, VariableStatD
}
}
-static void switch_subquery(const PlannerInfo *const root, Query **subquery, const RelOptInfo *const rel)
-{
- if (!rel->subroot->parse->is_from_inlist2join_rewrite) {
- *subquery = rel->subroot->parse;
- return;
- }
-
- if (!root->parse->is_from_sublink_rewrite && !root->parse->is_from_subquery_rewrite) {
- *subquery = rel->subroot->parse;
- return;
- }
-}
-
/*
* examine_simple_variable
* Handle a simple Var for examine_variable
@@ -4917,7 +4905,10 @@ static void examine_simple_variable(PlannerInfo* root, Var* var, VariableStatDat
* This is a temporary fix for mislocated varattno after inlist2join
* optimization.
*/
- switch_subquery(root, &subquery, rel);
+ if (rel->subroot->parse->is_from_inlist2join_rewrite) {
+ return;
+ }
+ subquery = rel->subroot->parse;
Assert(IsA(subquery, Query));
@@ -5000,6 +4991,8 @@ statistic_proc_security_check(const VariableStatData *vardata, Oid func_oid)
if (get_func_leakproof(func_oid))
return true;
+
+ instr_stmt_report_cause_type(NUM_F_LEAKPROOF);
ereport(DEBUG2,
(errmodule(MOD_OPT),
diff --git a/src/common/backend/utils/adt/varchar.cpp b/src/common/backend/utils/adt/varchar.cpp
index 55a72d62c..77f3740a5 100644
--- a/src/common/backend/utils/adt/varchar.cpp
+++ b/src/common/backend/utils/adt/varchar.cpp
@@ -24,6 +24,8 @@
#include "mb/pg_wchar.h"
#include "utils/sortsupport.h"
#include "vecexecutor/vectorbatch.h"
+#include "utils/pg_locale.h"
+#include "catalog/gs_utf8_collation.h"
#include "miscadmin.h"
@@ -37,6 +39,7 @@
} \
} while (0)
+int bpcharcase(PG_FUNCTION_ARGS);
/* common code for bpchartypmodin and varchartypmodin */
static int32 anychar_typmodin(ArrayType* ta, const char* typname)
@@ -725,10 +728,16 @@ Datum bpcharoctetlen(PG_FUNCTION_ARGS)
Datum bpchareq(PG_FUNCTION_ARGS)
{
+ bool result = false;
+ if (is_b_format_collation(PG_GET_COLLATION())) {
+ /* use varstr_cmp to compare, return 0 means equal */
+ result = (bpcharcase(fcinfo) == 0);
+ PG_RETURN_BOOL(result);
+ }
+
BpChar* arg1 = PG_GETARG_BPCHAR_PP(0);
BpChar* arg2 = PG_GETARG_BPCHAR_PP(1);
int len1, len2;
- bool result = false;
len1 = bcTruelen(arg1);
len2 = bcTruelen(arg2);
@@ -750,10 +759,15 @@ Datum bpchareq(PG_FUNCTION_ARGS)
Datum bpcharne(PG_FUNCTION_ARGS)
{
+ bool result = false;
+ if (is_b_format_collation(PG_GET_COLLATION())) {
+ result = !(bpcharcase(fcinfo) == 0);
+ PG_RETURN_BOOL(result);
+ }
+
BpChar* arg1 = PG_GETARG_BPCHAR_PP(0);
BpChar* arg2 = PG_GETARG_BPCHAR_PP(1);
int len1, len2;
- bool result = false;
len1 = bcTruelen(arg1);
len2 = bcTruelen(arg2);
@@ -967,11 +981,16 @@ Datum hashbpchar(PG_FUNCTION_ARGS)
char* keydata = NULL;
int keylen;
Datum result;
+ Oid collid = PG_GET_COLLATION();
keydata = VARDATA_ANY(key);
keylen = bcTruelen(key);
- result = hash_any((unsigned char*)keydata, keylen);
+ if (!is_b_format_collation(collid)) {
+ result = hash_any((unsigned char*)keydata, keylen);
+ } else {
+ result = hash_text_by_builtin_colltions((unsigned char *)VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key), collid);
+ }
/* Avoid leaking memory for toasted inputs */
PG_FREE_IF_COPY(key, 0);
diff --git a/src/common/backend/utils/adt/varlena.cpp b/src/common/backend/utils/adt/varlena.cpp
index c23b45e9d..54cc4b661 100644
--- a/src/common/backend/utils/adt/varlena.cpp
+++ b/src/common/backend/utils/adt/varlena.cpp
@@ -42,6 +42,8 @@
#include "executor/node/nodeSort.h"
#include "pgxc/groupmgr.h"
#include "openssl/evp.h"
+#include "catalog/gs_utf8_collation.h"
+#include "catalog/pg_collation_fn.h"
#define SUBSTR_WITH_LEN_OFFSET 2
#define SUBSTR_A_CMPT_OFFSET 4
@@ -107,6 +109,8 @@ typedef struct {
static int varstrfastcmp_c(Datum x, Datum y, SortSupport ssup);
static int bpcharfastcmp_c(Datum x, Datum y, SortSupport ssup);
+static int varstrfastcmp_builtin(Datum x, Datum y, SortSupport ssup);
+static int bpvarstrfastcmp_builtin(Datum x, Datum y, SortSupport ssup);
static int varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup);
static int varstrcmp_abbrev(Datum x, Datum y, SortSupport ssup);
static Datum varstr_abbrev_convert(Datum original, SortSupport ssup);
@@ -1692,6 +1696,8 @@ int varstr_cmp(char* arg1, int len1, char* arg2, int len2, Oid collid)
result = memcmp(arg1, arg2, Min(len1, len2));
if ((result == 0) && (len1 != len2))
result = (len1 < len2) ? -1 : 1;
+ } else if (is_b_format_collation(collid)) {
+ result = varstr_cmp_by_builtin_collations(arg1, len1, arg2, len2, collid);
} else {
char a1buf[TEXTBUFLEN];
char a2buf[TEXTBUFLEN];
@@ -1870,6 +1876,23 @@ int text_cmp(text* arg1, text* arg2, Oid collid)
return varstr_cmp(a1p, len1, a2p, len2, collid);
}
+bool texteq_with_collation(PG_FUNCTION_ARGS)
+{
+ Datum arg1 = PG_GETARG_DATUM(0);
+ Datum arg2 = PG_GETARG_DATUM(1);
+ bool result = false;
+
+ text* targ1 = DatumGetTextPP(arg1);
+ text* targ2 = DatumGetTextPP(arg2);
+
+ /* text_cmp return 0 means equal */
+ result = (text_cmp(targ1, targ2, PG_GET_COLLATION()) == 0);
+ PG_FREE_IF_COPY(targ1, 0);
+ PG_FREE_IF_COPY(targ2, 1);
+
+ return result;
+}
+
/*
* Comparison functions for text strings.
*
@@ -1882,12 +1905,18 @@ Datum texteq(PG_FUNCTION_ARGS)
{
Datum arg1 = PG_GETARG_DATUM(0);
Datum arg2 = PG_GETARG_DATUM(1);
+ bool result = false;
+
+ Oid collid = PG_GET_COLLATION();
+ if (is_b_format_collation(collid)) {
+ result = texteq_with_collation(fcinfo);
+ PG_RETURN_BOOL(result);
+ }
if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg1)) || VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg2))) {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("texteq could not support more than 1GB clob/blob data")));
}
- bool result = false;
Size len1, len2;
/*
@@ -1925,6 +1954,12 @@ Datum textne(PG_FUNCTION_ARGS)
bool result = false;
Size len1, len2;
+ Oid collid = PG_GET_COLLATION();
+ if (is_b_format_collation(collid)) {
+ result = !(texteq_with_collation(fcinfo));
+ PG_RETURN_BOOL(result);
+ }
+
/* See comment in texteq() */
len1 = toast_raw_datum_size(arg1);
len2 = toast_raw_datum_size(arg2);
@@ -2133,6 +2168,7 @@ void varstr_sortsupport(SortSupport ssup, Oid collid, bool bpchar)
{
bool abbreviate = ssup->abbreviate;
bool collate_c = false;
+ bool collate_builtin = false;
VarStringSortSupport* sss = NULL;
#ifdef HAVE_LOCALE_T
@@ -2164,6 +2200,14 @@ void varstr_sortsupport(SortSupport ssup, Oid collid, bool bpchar)
ssup->comparator = bpcharfastcmp_c;
collate_c = true;
+ } else if (is_b_format_collation(collid)) {
+ if (!bpchar) {
+ ssup->comparator = bpvarstrfastcmp_builtin;
+ } else {
+ ssup->comparator = varstrfastcmp_builtin;
+ }
+ collate_builtin = true;
+ abbreviate = false;
}
#ifdef WIN32
else if (GetDatabaseEncoding() == PG_UTF8)
@@ -2223,7 +2267,7 @@ void varstr_sortsupport(SortSupport ssup, Oid collid, bool bpchar)
* scratch space (and to detect requirement for BpChar semantics from
* caller), and the abbreviation case requires additional state.
*/
- if (abbreviate || !collate_c) {
+ if (abbreviate || !collate_c || !collate_builtin) {
sss = (VarStringSortSupport*)palloc(sizeof(VarStringSortSupport));
sss->buf1 = (char*)palloc(TEXTBUFLEN);
sss->buflen1 = TEXTBUFLEN;
@@ -2334,6 +2378,61 @@ static int bpcharfastcmp_c(Datum x, Datum y, SortSupport ssup)
return cmp;
}
+/*
+ * sortsupport comparison func (for builtin locale case)
+ */
+static int varstrfastcmp_builtin(Datum x, Datum y, SortSupport ssup)
+{
+ text* arg1 = DatumGetTextPP(x);
+ text* arg2 = DatumGetTextPP(y);
+ char *a1p = NULL, *a2p = NULL;
+ size_t len1, len2;
+ int result;
+
+ a1p = VARDATA_ANY(arg1);
+ a2p = VARDATA_ANY(arg2);
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ result = varstr_cmp_by_builtin_collations(a1p, len1, a2p, len2, ssup->ssup_collation);
+
+ /* We can't afford to leak memory here. */
+ if (PointerGetDatum(arg1) != x) {
+ pfree_ext(arg1);
+ }
+ if (PointerGetDatum(arg2) != y) {
+ pfree_ext(arg2);
+ }
+
+ return result;
+}
+
+static int bpvarstrfastcmp_builtin(Datum x, Datum y, SortSupport ssup)
+{
+ BpChar* arg1 = DatumGetBpCharPP(x);
+ BpChar* arg2 = DatumGetBpCharPP(y);
+ int len1, len2;
+ int result;
+
+ char* a1p = VARDATA_ANY(arg1);
+ char* a2p = VARDATA_ANY(arg2);
+
+ len1 = bpchartruelen(a1p, VARSIZE_ANY_EXHDR(arg1));
+ len2 = bpchartruelen(a2p, VARSIZE_ANY_EXHDR(arg2));
+
+ result = varstr_cmp_by_builtin_collations(a1p, len1, a2p, len2, ssup->ssup_collation);
+
+ /* We can't afford to leak memory here. */
+ if (PointerGetDatum(arg1) != x) {
+ pfree_ext(arg1);
+ }
+ if (PointerGetDatum(arg2) != y) {
+ pfree_ext(arg2);
+ }
+
+ return result;
+}
/*
* sortsupport comparison func (for locale case)
*/
@@ -6169,7 +6268,6 @@ Datum group_concat_transfn(PG_FUNCTION_ARGS)
appendStringInfoText(state, argsconcat); /* value */
if (state->len > maxlength) {
state->len = maxlength;
- state->data[state->len] = '\0';
}
} else {
/*
diff --git a/src/common/backend/utils/cache/inval.cpp b/src/common/backend/utils/cache/inval.cpp
index 4e87ea6b4..f50ca6122 100644
--- a/src/common/backend/utils/cache/inval.cpp
+++ b/src/common/backend/utils/cache/inval.cpp
@@ -874,6 +874,9 @@ static void TestCodeToForceCacheFlushes()
*/
void AcceptInvalidationMessages()
{
+ if (!DeepthInAcceptInvalidationMessageNotZero()) {
+ t_thrd.rc_cxt.rcNum = 0;
+ }
if (EnableLocalSysCache()) {
u_sess->pcache_cxt.gpc_remote_msg = true;
knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt;
diff --git a/src/common/backend/utils/cache/partcache.cpp b/src/common/backend/utils/cache/partcache.cpp
index 154e80ed1..39c241e08 100644
--- a/src/common/backend/utils/cache/partcache.cpp
+++ b/src/common/backend/utils/cache/partcache.cpp
@@ -88,7 +88,6 @@
*
*non-export function prototypes
*/
-static HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapshot);
static Partition AllocatePartitionDesc(Form_pg_partition relp);
static void PartitionFlushPartition(Partition partition);
@@ -96,64 +95,6 @@ bytea* merge_rel_part_reloption(Oid rel_oid, Oid part_oid);
static void PartitionParseRelOptions(Partition partition, HeapTuple tuple);
-static HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapshot)
-{
- HeapTuple pg_partition_tuple;
- Relation pg_partition_desc;
- SysScanDesc pg_partition_scan;
- ScanKeyData key[1];
-
- /*
- * If something goes wrong during backend startup, we might find ourselves
- * trying to read pg_partition before we've selected a database. That ain't
- * gonna work, so bail out with a useful error message. If this happens,
- * it probably means a partcache entry that needs to be nailed isn't.
- */
- if (!OidIsValid(u_sess->proc_cxt.MyDatabaseId)) {
- ereport(FATAL,
- (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("cannot read pg_class without having selected a database")));
- }
-
- if (snapshot == NULL) {
- snapshot = GetCatalogSnapshot();
- }
-
- /*
- * form a scan key
- */
- ScanKeyInit(&key[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(targetPartId));
-
- /*
- * Open pg_partition and fetch a tuple. Force heap scan if we haven't yet
- * built the critical partcache entries (this includes initdb and startup
- * without a pg_internal.init file). The caller can also force a heap
- * scan by setting indexOK == false.
- */
- /*u_sess->relcache_cxt.criticalRelcachesBuilt--->criticalPartcachesBuilt*/
- pg_partition_desc = heap_open(PartitionRelationId, AccessShareLock);
- pg_partition_scan = systable_beginscan(pg_partition_desc,
- PartitionOidIndexId,
- indexOK && LocalRelCacheCriticalRelcachesBuilt(),
- snapshot,
- 1,
- key);
-
- pg_partition_tuple = systable_getnext(pg_partition_scan);
-
- /*
- * Must copy tuple before releasing buffer.
- */
- if (HeapTupleIsValid(pg_partition_tuple)) {
- pg_partition_tuple = heap_copytuple(pg_partition_tuple);
- }
-
- /* all done */
- systable_endscan(pg_partition_scan);
- heap_close(pg_partition_desc, AccessShareLock);
-
- return pg_partition_tuple;
-}
-
static Partition AllocatePartitionDesc(Form_pg_partition partp)
{
Partition partition;
@@ -699,8 +640,6 @@ void PartitionClearPartition(Partition partition, bool rebuild)
if (NULL == newpart) {
/* Should only get here if partition was deleted */
- PartitionIdCacheDeleteLocal(partition);
- PartitionDestroyPartition(partition);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE), errmsg("partition %u deleted while still in use", save_partid)));
}
@@ -759,6 +698,16 @@ void PartitionClearPartition(Partition partition, bool rebuild)
partition->pd_node.bucketNode = newpart->pd_node.bucketNode;
}
+ /*
+ * The old partMap pointer has be used by a opened relation.
+ * Therefore, we need to ensure that the memory pointed to by the old partMap pointer cannot be freed
+ * and that the value in the memory pointed to by the old partMap pointer is new.
+ * 1. When the old and new partMaps are equal, keep old partMap pointer by SWAPFIELD.
+ * Then new partMap will be destoryed later.
+ * 2. When the old and new partMaps are not equal, keep old partMap pointer by SWAPFIELD
+ * and swap the memory that two partMap pointers point to in partition_rebuild_partmap.
+ * The new partMap pointer and the memory it points to are then destroyed later.
+ */
if (newpart->partMap) {
if (!EqualPartitonMap(partition->partMap, newpart->partMap)) {
RebuildPartitonMap(newpart->partMap, partition->partMap);
@@ -2275,7 +2224,7 @@ bool PartitionParentOidIsLive(Datum parentDatum)
*
* Notes: This function is called only when a partition table is lazy vacuumed,
* and cannot be executed in parallel with PartitionSetWaitCleanGpi, Currently,
- * the AccessShareLock lock of ADD_PARTITION_ACTION is used to ensure that no concurrent
+ * the AccessShareLock lock of INTERVAL_PARTITION_LOCK_SDEQUENCE is used to ensure that no concurrent
* operations are performed.
*/
void PartitionedSetEnabledClean(Oid parentOid)
@@ -2414,7 +2363,7 @@ void PartitionSetAllEnabledClean(Oid parentOid)
*
* Notes: Before calling the function, you must ensure that a lock with parentOid
* is already held (to prevent parallelism with any ALTER table partition process)
- * and AccessShareLock for ADD_PARTITION_ACTION (to prevent parallelism with the
+ * and AccessShareLock for INTERVAL_PARTITION_LOCK_SDEQUENCE (to prevent parallelism with the
* process of automatically creating partitions in any interval partition)
*/
void PartitionGetAllInvisibleParts(Oid parentOid, OidRBTree** invisibleParts)
diff --git a/src/common/backend/utils/cache/plancache.cpp b/src/common/backend/utils/cache/plancache.cpp
index 0e1224c32..9bb26c74f 100644
--- a/src/common/backend/utils/cache/plancache.cpp
+++ b/src/common/backend/utils/cache/plancache.cpp
@@ -287,6 +287,7 @@ CachedPlanSource* CreateCachedPlan(Node* raw_parse_tree, const char* query_strin
plansource->spi_signature = {(uint32)-1, 0, (uint32)-1, -1};
plansource->sql_patch_sequence = pg_atomic_read_u64(&g_instance.cost_cxt.sql_patch_sequence_id);
plansource->planManager = NULL;
+ plansource->hasSubQuery = false;
plansource->gpc_lockid = -1;
plansource->hasSubQuery = false;
@@ -675,6 +676,10 @@ void DropCachedPlan(CachedPlanSource* plansource)
*/
void ReleaseGenericPlan(CachedPlanSource* plansource)
{
+ if (ENABLE_CACHEDPLAN_MGR) {
+ ReleaseCustomPlan(plansource);
+ }
+
/* Be paranoid about the possibility that ReleaseCachedPlan fails */
if (plansource->gplan || plansource->cplan) {
CachedPlan* plan = NULL;
@@ -859,6 +864,10 @@ List* RevalidateCachedQuery(CachedPlanSource* plansource, bool has_lp)
}
/* generic root and all candidate plans need to be rebuilt. */
if (ENABLE_CACHEDPLAN_MGR && plansource->planManager != NULL) {
+ ereport(DEBUG2,
+ (errmodule(MOD_OPT),
+ errmsg("SearchPath has been changed, invalid planManager; query: \"%s\"",
+ plansource->query_string)));
plansource->planManager->is_valid = false;
}
}
@@ -947,6 +956,9 @@ List* RevalidateCachedQuery(CachedPlanSource* plansource, bool has_lp)
* correctly in the race condition case.)
*/
plansource->is_valid = false;
+ if (plansource->planManager != NULL) {
+ plansource->planManager->is_valid = false;
+ }
plansource->query_list = NIL;
plansource->relationOids = NIL;
plansource->invalItems = NIL;
@@ -1478,6 +1490,7 @@ CachedPlan* BuildCachedPlan(CachedPlanSource* plansource, List* qlist, ParamList
plan->is_valid = true;
plan->cpi = NULL;
plan->is_candidate = false;
+ plan->cost = -1;
/* assign generation number to new plan */
plan->generation = ++(plansource->generation);
@@ -1647,7 +1660,7 @@ static bool choose_cplan_by_hint(const CachedPlanSource* plansource, bool* choos
HintState *hint = parse->hintState;
if (hint != NULL && hint->cache_plan_hint != NIL) {
PlanCacheHint* pchint = (PlanCacheHint*)llast(hint->cache_plan_hint);
- if (pchint == NULL) {
+ if (pchint == NULL || pchint->base.hint_keyword == HINT_KEYWORD_CHOOSE_ADAPTIVE_GPLAN) {
return false;
}
pchint->base.state = HINT_STATE_USED;
@@ -1853,7 +1866,7 @@ CachedPlan* GetWiseCachedPlan(CachedPlanSource* plansource,
customplan = ChooseCustomPlan(plansource, boundParams);
if (!customplan) {
if (ChooseAdaptivePlan(plansource, boundParams)) {
- plan = GetAdaptGenericPlan(plansource, boundParams, &qlist);
+ plan = GetAdaptGenericPlan(plansource, boundParams, &qlist, &customplan);
} else {
plan = GetDefaultGenericPlan(plansource, boundParams, &qlist, &customplan);
}
@@ -2944,6 +2957,10 @@ CheckRelDependency(CachedPlanSource *plansource, Oid relid)
* parsing context, 'GenericRoot', need to be rebuilt.
*/
if (ENABLE_CACHEDPLAN_MGR && plansource->planManager != NULL) {
+ ereport(DEBUG2,
+ (errmodule(MOD_OPT),
+ errmsg("relation has been changed or updated, invalid planManager; query: \"%s\"",
+ plansource->query_string)));
plansource->planManager->is_valid = false;
}
}
@@ -3190,6 +3207,10 @@ ResetPlanCache(CachedPlanSource *plansource)
plansource->gplan->is_valid = false;
}
if (ENABLE_CACHEDPLAN_MGR && plansource->planManager != NULL) {
+ ereport(DEBUG2,
+ (errmodule(MOD_OPT),
+ errmsg("Reset plan cache, invalid planManager; query: \"%s\"",
+ plansource->query_string)));
plansource->planManager->is_valid = false;
}
}
diff --git a/src/common/backend/utils/cache/relcache.cpp b/src/common/backend/utils/cache/relcache.cpp
old mode 100644
new mode 100755
index 9832097d4..c2baed185
--- a/src/common/backend/utils/cache/relcache.cpp
+++ b/src/common/backend/utils/cache/relcache.cpp
@@ -43,6 +43,7 @@
#include "catalog/catalog.h"
#include "catalog/heap.h"
#include "catalog/catversion.h"
+#include "catalog/gs_sql_patch.h"
#include "catalog/index.h"
#include "catalog/indexing.h"
#include "catalog/namespace.h"
@@ -343,6 +344,7 @@ static const FormData_pg_attribute Desc_pg_replication_origin[Natts_pg_replicati
Schema_pg_replication_origin
};
static const FormData_pg_attribute Desc_pg_subscription_rel[Natts_pg_subscription_rel] = {Schema_pg_subscription_rel};
+static const FormData_pg_attribute Desc_gs_sql_patch_origin[Natts_gs_sql_patch] = {Schema_gs_sql_patch};
/* Please add to the array in ascending order of oid value */
static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{DefaultAclRelationId,
@@ -1073,6 +1075,15 @@ static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{Defau
Desc_gs_job_argument,
false,
true},
+ {GsSqlPatchRelationId, /* 9050 */
+ "gs_sql_patch",
+ GsSqlPatchRelationId_Rowtype_Id,
+ false,
+ false,
+ Natts_gs_sql_patch,
+ Desc_gs_sql_patch_origin,
+ false,
+ true},
{GsGlobalConfigRelationId,
"gs_global_config",
GsGlobalConfigRelationId_Rowtype_Id,
@@ -1494,6 +1505,32 @@ static void RelationParseRelOptions(Relation relation, HeapTuple tuple)
}
}
+static void GetInitdvals(Relation rel, HeapTuple tuple, TupInitDefVal* initdvals, int index, bool *hasInitDefval)
+{
+ bool is_null = false;
+ Datum dval;
+
+ dval = fastgetattr(tuple, Anum_pg_attribute_attinitdefval, rel->rd_att, &is_null);
+
+ if (is_null) {
+ initdvals[index].isNull = true;
+ initdvals[index].datum = NULL;
+ initdvals[index].dataLen = 0;
+ } else {
+ /* fetch and copy the default value. */
+ bytea* val = DatumGetByteaP(dval);
+ int len = VARSIZE(val) - VARHDRSZ;
+ char* buf = (char*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), len);
+ errno_t rc = memcpy_s(buf, len, VARDATA(val), len);
+ securec_check(rc, "", "");
+
+ initdvals[index].isNull = false;
+ initdvals[index].datum = (Datum*)buf;
+ initdvals[index].dataLen = len;
+ *hasInitDefval = true;
+ }
+}
+
/*
* RelationBuildTupleDesc
*
@@ -1515,8 +1552,6 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal)
int ndef = 0;
/* alter table instantly */
- Datum dval;
- bool isNull = false;
bool hasInitDefval = false;
TupInitDefVal* initdvals = NULL;
@@ -1612,25 +1647,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal)
securec_check(rc, "\0", "\0");
}
if (initdvals != NULL) {
- dval = fastgetattr(pg_attribute_tuple, Anum_pg_attribute_attinitdefval, pg_attribute_desc->rd_att, &isNull);
-
- if (isNull) {
- initdvals[attp->attnum - 1].isNull = true;
- initdvals[attp->attnum - 1].datum = NULL;
- initdvals[attp->attnum - 1].dataLen = 0;
- } else {
- /* fetch and copy the default value. */
- bytea* val = DatumGetByteaP(dval);
- int len = VARSIZE(val) - VARHDRSZ;
- char* buf = (char*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), len);
- errno_t rc = memcpy_s(buf, len, VARDATA(val), len);
- securec_check(rc, "", "");
-
- initdvals[attp->attnum - 1].isNull = false;
- initdvals[attp->attnum - 1].datum = (Datum*)buf;
- initdvals[attp->attnum - 1].dataLen = len;
- hasInitDefval = true;
- }
+ GetInitdvals(pg_attribute_desc, pg_attribute_tuple, initdvals, attp->attnum - 1, &hasInitDefval);
}
/* Update constraint/default info */
@@ -1655,6 +1672,46 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal)
}
}
+ if (DB_IS_CMPT(B_FORMAT) && need == 1) {
+ Form_pg_attribute attp;
+
+ pg_attribute_tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(RelationGetRelid(relation)), Int16GetDatum(0));
+ attp = (Form_pg_attribute)GETSTRUCT(pg_attribute_tuple);
+ if (HeapTupleIsValid(pg_attribute_tuple)) {
+ for (int i = 0; i < relation->rd_att->natts; i++) {
+ if (relation->rd_att->attrs[i].attnum == 0) {
+ errno_t rc = memcpy_s(&relation->rd_att->attrs[i], ATTRIBUTE_FIXED_PART_SIZE,
+ attp, ATTRIBUTE_FIXED_PART_SIZE);
+ securec_check(rc, "\0", "\0");
+
+ if (initdvals != NULL) {
+ GetInitdvals(pg_attribute_desc, pg_attribute_tuple, initdvals, i, &hasInitDefval);
+ }
+
+ if (attp->attnotnull && !onlyLoadInitDefVal) {
+ constr->has_not_null = true;
+ }
+
+ if (attp->atthasdef && !onlyLoadInitDefVal) {
+ if (attrdef == NULL) {
+ attrdef = (AttrDefault*)MemoryContextAllocZero(
+ LocalMyDBCacheMemCxt(),
+ RelationGetNumberOfAttributes(relation) * sizeof(AttrDefault));
+ }
+
+ attrdef[ndef].adnum = i + 1;
+ attrdef[ndef].adbin = NULL;
+ ndef++;
+ }
+ break;
+
+ }
+ }
+ need--;
+ }
+ ReleaseSysCache(pg_attribute_tuple);
+ }
+
/*
* end the scan and close the attribute relation
*/
@@ -3760,6 +3817,16 @@ void RelationClearRelation(Relation relation, bool rebuild)
errno_t rc = memcpy_s(relation->rd_rel, CLASS_TUPLE_SIZE, newrel->rd_rel, CLASS_TUPLE_SIZE);
securec_check(rc, "", "");
if (newrel->partMap) {
+ /*
+ * The old partMap pointer has be used by a opened relation.
+ * Therefore, we need to ensure that the memory pointed to by the old partMap pointer cannot be freed
+ * and that the value in the memory pointed to by the old partMap pointer is new.
+ * 1. When the old and new partMaps are equal, keep old partMap pointer by SWAPFIELD.
+ * Then new partMap will be destoryed later.
+ * 2. When the old and new partMaps are not equal, keep old partMap pointer by SWAPFIELD
+ * and swap the memory that two partMap pointers point to in partition_rebuild_partmap.
+ * The new partMap pointer and the memory it points to are then destroyed later.
+ */
if (!keep_partmap) {
RebuildPartitonMap(newrel->partMap, relation->partMap);
}
@@ -5611,48 +5678,69 @@ static void GeneratedColFetch(TupleConstr *constr, HeapTuple htup, Relation adre
generatedCol = DatumGetChar(val);
}
}
- attrdef[attrdefIndex].generatedCol = generatedCol;
- genCols[attrdef[attrdefIndex].adnum - 1] = generatedCol;
+
if (generatedCol == ATTRIBUTE_GENERATED_STORED) {
+ attrdef[attrdefIndex].generatedCol = generatedCol;
+ genCols[attrdef[attrdefIndex].adnum - 1] = generatedCol;
constr->has_generated_stored = true;
}
}
-static void AttrAutoIncrementFetch(Relation relation, AttrNumber attnum, char* adbin)
+static void AttrAutoIncrementFetch(Relation relation, AttrNumber attnum, char* adbin, const char* adsrc)
{
- ConstrAutoInc* cons_autoinc = NULL;
- AutoIncrement* autoinc = NULL;
- const FmgrBuiltin* castfunc = NULL;
- Node *adexpr = (Node*)stringToNode_skip_extern_fields(adbin);
- Assert(adexpr != NULL);
- if (!IsA(adexpr, AutoIncrement)) {
+ if (adsrc == NULL || strcmp(adsrc, "AUTO_INCREMENT") != 0) {
return;
}
- autoinc = (AutoIncrement*)adexpr;
- cons_autoinc = (ConstrAutoInc*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), sizeof(ConstrAutoInc));
- cons_autoinc->attnum = attnum;
- if (relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
- cons_autoinc->next = find_tmptable_cache_autoinc(relation->rd_rel->relfilenode);
- cons_autoinc->seqoid = InvalidOid;
- } else {
- cons_autoinc->next = NULL;
- find_nextval_seqoid_walker(adexpr, &cons_autoinc->seqoid);
- }
+ ConstrAutoInc* cons_autoinc = NULL;
+ Node *adexpr = NULL;
+ AutoIncrement* autoinc = NULL;
+ const FmgrBuiltin* castfunc = NULL;
+ MemoryContext tmp_cxt;
+ MemoryContext old_cxt;
+
+ /* We cannot free Node *adexpr. So we use tmp_cxt to prevent memory leaks. */
+ tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "auto_increment temporary cxt",
+ ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE);
+
+ PG_TRY();
+ {
+ old_cxt = MemoryContextSwitchTo(tmp_cxt);
+ adexpr = (Node*)stringToNode_skip_extern_fields(adbin);
+ (void)MemoryContextSwitchTo(old_cxt);
+ Assert(IsA(adexpr, AutoIncrement));
+ autoinc = (AutoIncrement*)adexpr;
+ cons_autoinc = (ConstrAutoInc*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), sizeof(ConstrAutoInc));
+ cons_autoinc->attnum = attnum;
+ if (relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
+ cons_autoinc->next = find_tmptable_cache_autoinc(relation->rd_rel->relfilenode);
+ cons_autoinc->seqoid = InvalidOid;
+ } else {
+ cons_autoinc->next = NULL;
+ find_nextval_seqoid_walker(adexpr, &cons_autoinc->seqoid);
+ }
- castfunc = (const FmgrBuiltin*)SearchBuiltinFuncByOid(autoinc->autoincin_funcid);
- cons_autoinc->datum2autoinc_func = castfunc ? (void*)(uintptr_t)castfunc->func : NULL;
- if (cons_autoinc->datum2autoinc_func == NULL && u_sess->hook_cxt.searchFuncHook != NULL) {
- PGFunction castFunc2 = ((searchFunc)(u_sess->hook_cxt.searchFuncHook))(autoinc->autoincin_funcid);
- cons_autoinc->datum2autoinc_func = castFunc2 ? (void*)(uintptr_t)castFunc2 : NULL;
+ castfunc = (const FmgrBuiltin*)SearchBuiltinFuncByOid(autoinc->autoincin_funcid);
+ cons_autoinc->datum2autoinc_func = castfunc ? (void*)(uintptr_t)castfunc->func : NULL;
+ if (cons_autoinc->datum2autoinc_func == NULL && u_sess->hook_cxt.searchFuncHook != NULL) {
+ PGFunction castFunc2 = ((searchFunc)(u_sess->hook_cxt.searchFuncHook))(autoinc->autoincin_funcid);
+ cons_autoinc->datum2autoinc_func = castFunc2 ? (void*)(uintptr_t)castFunc2 : NULL;
+ }
+ castfunc = (const FmgrBuiltin*)SearchBuiltinFuncByOid(autoinc->autoincout_funcid);
+ cons_autoinc->autoinc2datum_func = castfunc ? (void*)(uintptr_t)castfunc->func : NULL;
+ if (cons_autoinc->autoinc2datum_func == NULL && u_sess->hook_cxt.searchFuncHook != NULL) {
+ PGFunction castFunc2 = ((searchFunc)(u_sess->hook_cxt.searchFuncHook))(autoinc->autoincout_funcid);
+ cons_autoinc->autoinc2datum_func = castFunc2 ? (void*)(uintptr_t)castFunc2 : NULL;
+ }
+ relation->rd_att->constr->cons_autoinc = cons_autoinc;
}
- castfunc = (const FmgrBuiltin*)SearchBuiltinFuncByOid(autoinc->autoincout_funcid);
- cons_autoinc->autoinc2datum_func = castfunc ? (void*)(uintptr_t)castfunc->func : NULL;
- if (cons_autoinc->autoinc2datum_func == NULL && u_sess->hook_cxt.searchFuncHook != NULL) {
- PGFunction castFunc2 = ((searchFunc)(u_sess->hook_cxt.searchFuncHook))(autoinc->autoincout_funcid);
- cons_autoinc->autoinc2datum_func = castFunc2 ? (void*)(uintptr_t)castFunc2 : NULL;
+ PG_CATCH();
+ {
+ MemoryContextDelete(tmp_cxt);
+ PG_RE_THROW();
}
- relation->rd_att->constr->cons_autoinc = cons_autoinc;
+ PG_END_TRY();
+ MemoryContextDelete(tmp_cxt);
}
/*
@@ -5665,18 +5753,56 @@ static void UpdatedColFetch(TupleConstr *constr, HeapTuple htup, Relation adrel,
bool updatedCol = false;
if (HeapTupleHeaderGetNatts(htup->t_data, adrel->rd_att) >= Anum_pg_attrdef_adsrc_on_update) {
bool isnull = false;
- Datum val;
- val = fastgetattr(htup, Anum_pg_attrdef_adsrc_on_update, adrel->rd_att, &isnull);
- if (val && pg_strcasecmp(TextDatumGetCString(val), "") != 0) {
+ Datum val = fastgetattr(htup, Anum_pg_attrdef_adsrc_on_update, adrel->rd_att, &isnull);
+ char* adsrc_str = isnull ? NULL : TextDatumGetCString(val);
+ if (adsrc_str && pg_strcasecmp(adsrc_str, "") != 0) {
updatedCol = true;
} else {
updatedCol = false;
}
+ pfree_ext(adsrc_str);
}
attrdef[attrdefIndex].has_on_update = updatedCol;
on_update[attrdef[attrdefIndex].adnum - 1] = updatedCol;
}
+static void GetDefaultAttr(Relation relation, Relation adrel, HeapTuple htup, int attrdef_index, int attnum)
+{
+ Datum val;
+ bool isnull = false;
+ AttrDefault *attrdef = relation->rd_att->constr->defval;
+
+ if (t_thrd.proc->workingVersionNum >= GENERATED_COL_VERSION_NUM) {
+ GeneratedColFetch(relation->rd_att->constr, htup, adrel, attrdef_index);
+ }
+
+ UpdatedColFetch(relation->rd_att->constr, htup, adrel, attrdef_index);
+
+ val = fastgetattr(htup, Anum_pg_attrdef_adbin, adrel->rd_att, &isnull);
+ if (isnull) {
+ ereport(WARNING, (errmsg("null adbin for attr %s of rel %s",
+ NameStr(relation->rd_att->attrs[attnum - 1].attname), RelationGetRelationName(relation))));
+ } else {
+ char* adsrc_str = NULL;
+ char* adbin_str = TextDatumGetCString(val);
+ attrdef[attrdef_index].adbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), adbin_str);
+ val = fastgetattr(htup, Anum_pg_attrdef_adsrc, adrel->rd_att, &isnull);
+ adsrc_str = (isnull) ? NULL : TextDatumGetCString(val);
+ if (!attrdef[attrdef_index].has_on_update) {
+ AttrAutoIncrementFetch(relation, attnum, adbin_str, adsrc_str);
+ }
+ pfree(adbin_str);
+ pfree_ext(adsrc_str);
+ }
+
+ val = fastgetattr(htup, Anum_pg_attrdef_adbin_on_update, adrel->rd_att, &isnull);
+ if (!isnull) {
+ char* adbin_str = TextDatumGetCString(val);
+ attrdef[attrdef_index].adbin_on_update = MemoryContextStrdup(LocalMyDBCacheMemCxt(), adbin_str);
+ pfree(adbin_str);
+ }
+}
+
/*
* Load any default attribute value definitions for the relation.
*/
@@ -5686,8 +5812,6 @@ static void AttrDefaultFetch(Relation relation)
int ndef = relation->rd_att->constr->num_defval;
ScanKeyData skey;
HeapTuple htup;
- Datum val;
- bool isnull = false;
int i;
int found = 0;
@@ -5698,6 +5822,14 @@ static void AttrDefaultFetch(Relation relation)
while (HeapTupleIsValid(htup = systable_getnext(adscan))) {
Form_pg_attrdef adform = (Form_pg_attrdef)GETSTRUCT(htup);
+ if (DB_IS_CMPT(B_FORMAT)) {
+ if (adform->adnum == 0) {
+ found++;
+
+ GetDefaultAttr(relation, adrel, htup, ndef - 1, attrdef[ndef - 1].adnum);
+ continue;
+ }
+ }
for (i = 0; i < ndef; i++) {
if (adform->adnum != attrdef[i].adnum)
@@ -5709,26 +5841,7 @@ static void AttrDefaultFetch(Relation relation)
else
found++;
- if (t_thrd.proc->workingVersionNum >= GENERATED_COL_VERSION_NUM) {
- GeneratedColFetch(relation->rd_att->constr, htup, adrel, i);
- }
-
- UpdatedColFetch(relation->rd_att->constr, htup, adrel, i);
-
- val = fastgetattr(htup, Anum_pg_attrdef_adbin, adrel->rd_att, &isnull);
- if (isnull) {
- ereport(WARNING, (errmsg("null adbin for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1].attname), RelationGetRelationName(relation))));
- } else {
- attrdef[i].adbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), TextDatumGetCString(val));
- if (!attrdef[i].has_on_update) {
- AttrAutoIncrementFetch(relation, adform->adnum, attrdef[i].adbin);
- }
- }
-
- val = fastgetattr(htup, Anum_pg_attrdef_adbin_on_update, adrel->rd_att, &isnull);
- if (!isnull)
- attrdef[i].adbin_on_update = MemoryContextStrdup(LocalMyDBCacheMemCxt(), TextDatumGetCString(val));
+ GetDefaultAttr(relation, adrel, htup, i, adform->adnum);
break;
}
@@ -5772,6 +5885,7 @@ static void CheckConstraintFetch(Relation relation)
while (HeapTupleIsValid(htup = systable_getnext(conscan))) {
Form_pg_constraint conform = (Form_pg_constraint)GETSTRUCT(htup);
+ char* ccbin_str = NULL;
/* We want check constraints only */
if (conform->contype != CONSTRAINT_CHECK)
@@ -5792,8 +5906,9 @@ static void CheckConstraintFetch(Relation relation)
ereport(ERROR,
(errcode(ERRCODE_UNEXPECTED_NULL_VALUE),
errmsg("null conbin for rel %s", RelationGetRelationName(relation))));
-
- check[found].ccbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), TextDatumGetCString(val));
+ ccbin_str = TextDatumGetCString(val);
+ check[found].ccbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), ccbin_str);
+ pfree(ccbin_str);
found++;
}
@@ -8574,3 +8689,25 @@ char RelationGetRelReplident(Relation r)
return relreplident;
}
+bool IsRelationReplidentKey(Relation r, int attno)
+{
+ if (RelationGetRelReplident(r) == REPLICA_IDENTITY_FULL)
+ return true;
+
+ Oid replidindex = RelationGetReplicaIndex(r);
+ if (!OidIsValid(replidindex))
+ return true;
+
+ Relation idx_rel = RelationIdGetRelation(replidindex);
+
+ for (int natt = 0; natt < IndexRelationGetNumberOfKeyAttributes(idx_rel); natt++) {
+ if (idx_rel->rd_index->indkey.values[natt] == attno) {
+ RelationClose(idx_rel);
+ return true;
+ }
+ }
+
+ RelationClose(idx_rel);
+ return false;
+}
+
diff --git a/src/common/backend/utils/error/be_module.cpp b/src/common/backend/utils/error/be_module.cpp
index b96ed1193..bd0883406 100755
--- a/src/common/backend/utils/error/be_module.cpp
+++ b/src/common/backend/utils/error/be_module.cpp
@@ -117,6 +117,7 @@ const module_data module_map[] = {{MOD_ALL, "ALL"},
{MOD_SEC_POLICY, "SEC_POLICY"},
{MOD_SEC_SDD, "SEC_SDD"},
{MOD_SEC_TDE, "SEC_TDE"},
+ {MOD_COMM_FRAMEWORK, "COMM_FRAMEWORK"},
{MOD_COMM_PROXY, "COMM_PROXY"},
{MOD_COMM_POOLER, "COMM_POOLER"},
{MOD_VACUUM, "VACUUM"},
@@ -133,6 +134,7 @@ const module_data module_map[] = {{MOD_ALL, "ALL"},
{MOD_DMS, "DMS"},
{MOD_DSS, "DSS_API"},
{MOD_GPI, "GPI"},
+ {MOD_PARTITION, "PARTITION"},
/* add your module name above */
{MOD_MAX, "BACKEND"}};
diff --git a/src/common/backend/utils/error/elog.cpp b/src/common/backend/utils/error/elog.cpp
index 3d399a2c8..b41d0ce22 100644
--- a/src/common/backend/utils/error/elog.cpp
+++ b/src/common/backend/utils/error/elog.cpp
@@ -136,6 +136,7 @@ static void write_eventlog(int level, const char* line, int len);
#endif
static const int CREATE_ALTER_SUBSCRIPTION = 16;
+static const int CREATE_ALTER_USERMAPPING = 18;
/* Macro for checking t_thrd.log_cxt.errordata_stack_depth is reasonable */
#define CHECK_STACK_DEPTH() \
@@ -4124,6 +4125,9 @@ static char* mask_execute_direct_cmd(const char* query_string)
break;
}
}
+ if (position >= query_len) {
+ return NULL;
+ }
/* Parsing execute direct on detail content */
parse_query = (char*)palloc0(query_len - position);
rc = memcpy_s(parse_query, (query_len - position),
@@ -4132,6 +4136,10 @@ static char* mask_execute_direct_cmd(const char* query_string)
rc = strcat_s(parse_query, (query_len - position), ";\0");
securec_check(rc, "\0", "\0");
mask_query = mask_Password_internal(parse_query);
+ if (mask_query == NULL) {
+ pfree_ext(parse_query);
+ return NULL;
+ }
mask_len = strlen(mask_query);
/* Concatenate character string */
tmp_string = (char*)palloc0(mask_len + 1 + position + 1);
@@ -4351,6 +4359,7 @@ static char* mask_Password_internal(const char* query_string)
* 15 - for funCrypt
* 16 - create/alter subscription(CREATE_ALTER_SUBSCRIPTION)
* 17 - set password (b compatibility)
+ * 18 - create/alter user mapping
*/
int curStmtType = 0;
int prevToken[5] = {0};
@@ -4569,7 +4578,7 @@ static char* mask_Password_internal(const char* query_string)
}
idx = 0;
isPassword = false;
- if (curStmtType == 10 || curStmtType == 11) {
+ if (curStmtType == 10 || curStmtType == 11 || curStmtType == CREATE_ALTER_USERMAPPING) {
curStmtType = 0;
}
}
@@ -4629,6 +4638,9 @@ static char* mask_Password_internal(const char* query_string)
currToken = IDENT;
} else if (DB_IS_CMPT(B_FORMAT) && prevToken[0] == SET) {
curStmtType = 17;
+ } else if (prevToken[1] == MAPPING && prevToken[2] == SERVER && prevToken[3] == OPTIONS) {
+ curStmtType = CREATE_ALTER_USERMAPPING;
+ currToken = IDENT;
}
if (curStmtType != 17) {
@@ -4842,6 +4854,8 @@ static char* mask_Password_internal(const char* query_string)
case SERVER:
if (prevToken[0] == CREATE || prevToken[0] == ALTER) {
prevToken[1] = SERVER;
+ } else if (prevToken[1] == MAPPING) {
+ prevToken[2] = SERVER;
}
break;
case OPTIONS:
@@ -4851,6 +4865,8 @@ static char* mask_Password_internal(const char* query_string)
prevToken[3] = OPTIONS;
} else if (prevToken[1] == DATA_P && prevToken[2] == SOURCE_P) {
prevToken[3] = OPTIONS;
+ } else if (prevToken[1] == MAPPING && prevToken[2] == SERVER) {
+ prevToken[3] = OPTIONS;
}
break;
/* For create/alter text search dictionary */
@@ -4926,6 +4942,20 @@ static char* mask_Password_internal(const char* query_string)
*/
curStmtType = pg_strcasecmp(yylval.str, "conninfo") == 0 ? CREATE_ALTER_SUBSCRIPTION : 0;
idx = 0;
+ } else if (prevToken[1] == MAPPING && prevToken[2] == SERVER && prevToken[3] == OPTIONS) {
+ /*
+ * For create/alter user mapping: sensitive opt is 'password'.
+ * 'password' is usually marked as a standard Token.
+ * However, password will be taken as SCONST if wrapped around double-quote, which needs
+ * to be handled here.
+ */
+ if (pg_strcasecmp(yylval.str, "password") == 0) {
+ curStmtType = CREATE_ALTER_USERMAPPING;
+ isPassword = true;
+ } else {
+ curStmtType = 0;
+ }
+ idx = 0;
}
break;
case SCONST:
@@ -5036,6 +5066,11 @@ static char* mask_Password_internal(const char* query_string)
curStmtType = 0;
}
break;
+ case MAPPING:
+ if (prevToken[0] == USER) {
+ prevToken[1] = MAPPING;
+ }
+ break;
default:
break;
}
diff --git a/src/common/backend/utils/error/plog.cpp b/src/common/backend/utils/error/plog.cpp
index fe0b47ebf..d9bf72d1e 100644
--- a/src/common/backend/utils/error/plog.cpp
+++ b/src/common/backend/utils/error/plog.cpp
@@ -351,7 +351,7 @@ static inline void write_logmsg_with_one_item(IndicatorItem* item, struct timeva
}
/*
- * there are all 32 slots avaiable, and we these ranges:
+ * there are all 32 slots available, and we these ranges:
* R1: step = 16us,
* 0 ~ 16, < 32, < 48, ..., < 320
* R2:
@@ -1444,7 +1444,7 @@ static const int log2_map[LOG2_MAP_SIZE] = {0,
10};
/*
- * there are all 32 slots avaiable, and we these ranges:
+ * there are all 32 slots available, and we these ranges:
* R1: step = 50us,
* 0 ~ 50, < 100, < 150, ..., < 1000
* R2: step = 2^n ms
@@ -1478,7 +1478,7 @@ static int plog_hdp_get_slot(IndicatorItem* item)
}
/*
- * there are all 21 slots avaiable, and we these ranges:
+ * there are all 21 slots available, and we these ranges:
* R1: step = 100ms,
* 0 ~ 100, < 200, < 300, ..., < 1000
* R2: step = 1000ms
diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp
index 8f733ee19..b835b3e84 100644
--- a/src/common/backend/utils/init/globals.cpp
+++ b/src/common/backend/utils/init/globals.cpp
@@ -59,8 +59,10 @@ bool open_join_children = true;
bool will_shutdown = false;
/* hard-wired binary version number */
-const uint32 GRAND_VERSION_NUM = 92843;
+const uint32 GRAND_VERSION_NUM = 92844;
+const uint32 PARTITION_ENHANCE_VERSION_NUM = 92844;
+const uint32 SELECT_INTO_FILE_VERSION_NUM = 92844;
const uint32 SELECT_INTO_VAR_VERSION_NUM = 92834;
const uint32 DOLPHIN_ENABLE_DROP_NUM = 92830;
const uint32 SQL_PATCH_VERSION_NUM = 92675;
@@ -119,6 +121,7 @@ const uint32 PG_AUTHID_PASSWORDEXT_VERSION_NUM = 92830;
const uint32 SUPPORT_VIEW_AUTO_UPDATABLE = 92838;
const uint32 INSERT_RIGHT_REF_VERSION_NUM = 92842;
const uint32 B_DUMP_TRIGGER_VERSION_NUM = 92843;
+const uint32 CHARACTER_SET_VERSION_NUM = 92844;
/* Version number of the guc parameter backend_version added in V500R001C20 */
const uint32 V5R1C20_BACKEND_VERSION_NUM = 92305;
@@ -171,7 +174,9 @@ const uint32 FDW_SUPPORT_JOIN_AGG_VERSION_NUM = 92839;
const uint32 UNION_NULL_VERSION_NUM = 92841;
const uint32 CREATE_INDEX_IF_NOT_EXISTS_VERSION_NUM = 92843;
+const uint32 EVENT_VERSION_NUM = 92844;
+const uint32 SLOW_SQL_VERSION_NUM = 92844;
#ifdef PGXC
bool useLocalXid = false;
#endif
diff --git a/src/common/backend/utils/init/miscinit.cpp b/src/common/backend/utils/init/miscinit.cpp
index 69ae99992..a986fb89d 100644
--- a/src/common/backend/utils/init/miscinit.cpp
+++ b/src/common/backend/utils/init/miscinit.cpp
@@ -1678,6 +1678,9 @@ void AddToDataDirLockFile(int target_line, const char* str)
int lineno;
char* ptr = NULL;
char buffer[BLCKSZ];
+ char temp[BLCKSZ] = {0};
+ char new_file[MAXPGPATH];
+ bool has_split = false;
fd = open(DIRECTORY_LOCK_FILE, O_RDWR | PG_BINARY, 0);
@@ -1717,12 +1720,59 @@ void AddToDataDirLockFile(int target_line, const char* str)
ptr++;
}
+#ifndef ENABLE_MULTIPLE_NODES
+ /* If there are extra info, we should copy the string to right place in buffer */
+ if (target_line == LOCK_FILE_LINE_LISTEN_ADDR && ptr != NULL && strlen(ptr) != 0) {
+ char *end = ptr;
+ end = strchr(end, '\n');
+
+ if (end != NULL) {
+ end++;
+ /* set last character to '\0' */
+ char *invalid = strchr(end, '\n');
+ if (invalid != NULL) {
+ *(invalid + 1) = '\0';
+ }
+ int rcs = strcpy_s(temp, BLCKSZ - 1, end);
+ securec_check(rcs, "\0", "\0");
+ has_split = true;
+ }
+ }
+#endif
+
/*
* Write or rewrite the target line.
*/
int rcs = snprintf_s(ptr, buffer + sizeof(buffer) - ptr, buffer + sizeof(buffer) - ptr - 1, "%s\n", str);
securec_check_ss(rcs, "\0", "\0");
+ if (has_split) {
+ /* reload listen_addresses, we will write IP to postmaster.pid.new and then rename it to postmaster.pid */
+ size_t str_len = strlen(str);
+ rcs = snprintf_s(ptr + str_len + 1, buffer + sizeof(buffer) - ptr - str_len - 1,
+ buffer + sizeof(buffer) - ptr - str_len - 1 - 1, "%s", temp);
+ securec_check_ss(rcs, "\0", "\0");
+
+ rcs = snprintf_s(new_file, MAXPGPATH, MAXPGPATH - 1, "%s.new", DIRECTORY_LOCK_FILE);
+ securec_check_ss(rcs, "\0", "\0");
+ close(fd);
+ fd = open(new_file, O_RDWR | O_CREAT, 0600);
+
+ if (fd < 0) {
+ ereport(LOG, (errcode_for_file_access(),
+ errmsg("could not open new temp file \"%s\": %m", new_file)));
+ return;
+ }
+ pgstat_report_waitevent(WAIT_EVENT_LOCK_FILE_ADDTODATADIR_WRITE);
+ if (ftruncate(fd, (off_t)0) != 0) {
+ pgstat_report_waitevent(WAIT_EVENT_END);
+ ereport(LOG, (errcode_for_file_access(),
+ errmsg("could not clear file \"%s\": %m", DIRECTORY_LOCK_FILE)));
+ close(fd);
+ return;
+ }
+ pgstat_report_waitevent(WAIT_EVENT_END);
+ }
/*
* And rewrite the data. Since we write in a single kernel call, this
* update should appear atomic to onlookers.
@@ -1752,6 +1802,13 @@ void AddToDataDirLockFile(int target_line, const char* str)
if (close(fd) != 0) {
ereport(LOG, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", DIRECTORY_LOCK_FILE)));
}
+
+ if (has_split) {
+ if (rename(new_file, DIRECTORY_LOCK_FILE)) {
+ ereport(LOG, (errcode_for_file_access(),
+ errmsg("failed to rename file \"%s\": %m", new_file)));
+ }
+ }
}
/* -------------------------------------------------------------------------
diff --git a/src/common/backend/utils/init/postinit.cpp b/src/common/backend/utils/init/postinit.cpp
index 5814706eb..237947173 100644
--- a/src/common/backend/utils/init/postinit.cpp
+++ b/src/common/backend/utils/init/postinit.cpp
@@ -132,6 +132,8 @@ static bool hasTuples(const Oid relOid);
THR_LOCAL LoginUserPtr user_login_hook = nullptr;
+#define MAX_COPY_NUM 20
+
/*** InitPostgres support ***/
AlarmCheckResult ConnAuthMethodChecker(Alarm* alarm, AlarmAdditionalParam* additionalParam)
{
@@ -2162,6 +2164,11 @@ void PostgresInitializer::InitLoadLocalSysCache(Oid db_oid, const char *db_name)
void PostgresInitializer::InitSession()
{
+ /* standby read on paraller redo */
+ if (RecoveryInProgress() && g_instance.attr.attr_storage.EnableHotStandby && u_sess->proc_cxt.clientIsGsql) {
+ u_sess->proc_cxt.gsqlRemainCopyNum = MAX_COPY_NUM;
+ }
+
/* Init rel cache for new session. */
InitSysCache();
diff --git a/src/common/backend/utils/mb/encnames.cpp b/src/common/backend/utils/mb/encnames.cpp
index 4caa7ded5..1a950cf47 100644
--- a/src/common/backend/utils/mb/encnames.cpp
+++ b/src/common/backend/utils/mb/encnames.cpp
@@ -36,6 +36,7 @@ pg_encname pg_encname_tbl[] = {
{"abc", PG_WIN1258}, /* alias for WIN1258 */
{"alt", PG_WIN866}, /* IBM866 */
{"big5", PG_BIG5}, /* Big5; Chinese for Taiwan multibyte set */
+ {"binary", PG_SQL_ASCII}, /* Binary; alias for sqlascii */
{"euccn", PG_EUC_CN}, /* EUC-CN; Extended Unix Code for simplified Chinese */
/* EUC-JIS-2004; Extended UNIX Code fixed
* Width for Japanese, standard JIS X 0213 */
@@ -85,6 +86,7 @@ pg_encname pg_encname_tbl[] = {
{"uhc", PG_UHC}, /* UHC; Korean Windows CodePage 949 */
{"unicode", PG_UTF8}, /* alias for UTF8 */
{"utf8", PG_UTF8}, /* alias for UTF8 */
+ {"utf8mb4", PG_UTF8}, /* alias for UTF8 */
{"vscii", PG_WIN1258}, /* alias for WIN1258 */
{"win", PG_WIN1251}, /* _dirty_ alias for windows-1251 (backward compatibility) */
{"win1250", PG_WIN1250}, /* alias for Windows-1250 */
@@ -122,6 +124,17 @@ pg_encname pg_encname_tbl[] = {
unsigned int pg_encname_tbl_sz = sizeof(pg_encname_tbl) / sizeof(pg_encname_tbl[0]) - 1;
+/*
+ * character set and its map supported in B format. We can find charset quickly
+ * by collation oid use FAST_GET_CHARSET_BY_COLL.
+ */
+pg_enc pg_enc_coll_map_b[] = {
+ PG_SQL_ASCII, /* SQL/ASCII */
+ PG_GBK, /* GBK (Windows-936) */
+ PG_UTF8, /* Unicode UTF8 */
+ PG_GB18030 /* GB18030 */
+};
+
/* ----------
* These are "official" encoding names.
* XXX must be sorted by the same order as enum pg_enc (in mb/pg_wchar.h)
diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp
index 00deaa043..cb966095f 100755
--- a/src/common/backend/utils/misc/guc.cpp
+++ b/src/common/backend/utils/misc/guc.cpp
@@ -45,6 +45,8 @@
#include "catalog/namespace.h"
#include "catalog/pgxc_group.h"
#include "catalog/storage_gtt.h"
+#include "catalog/pg_db_role_setting.h"
+#include "catalog/pg_database.h"
#include "commands/async.h"
#ifdef ENABLE_MULTIPLE_NODES
#include "commands/copy.h"
@@ -52,6 +54,7 @@
#include "commands/prepare.h"
#include "commands/vacuum.h"
#include "commands/variable.h"
+#include "commands/dbcommands.h"
#include "commands/tablespace.h"
#include "commands/trigger.h"
#include "funcapi.h"
@@ -149,6 +152,7 @@
#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/plancache.h"
+#include "utils/fmgroids.h"
#include "utils/portal.h"
#include "utils/ps_status.h"
#include "utils/rel_gs.h"
@@ -219,6 +223,7 @@
#define S_PER_D (60 * 60 * 24)
#define MS_PER_D (1000 * 60 * 60 * 24)
#define H_PER_D 24
+#define NUM_KEYS 2
#define AUDITFILE_THRESHOLD_LOWER_BOUND 100
const uint32 AUDIT_THRESHOLD_VERSION_NUM = 92735;
@@ -340,6 +345,7 @@ const char* sync_guc_variable_namelist[] = {"work_mem",
"enable_hdfs_predicate_pushdown",
"enable_hadoop_env",
"behavior_compat_options",
+ "b_format_behavior_compat_options",
#ifndef ENABLE_MULTIPLE_NODES
"plsql_compile_check_options",
#endif
@@ -452,6 +458,8 @@ static bool check_client_min_messages(int* newval, void** extra, GucSource sourc
static bool check_default_transaction_isolation(int* newval, void** extra, GucSource source);
static bool check_enable_stmt_track(bool* newval, void** extra, GucSource source);
static bool check_debug_assertions(bool* newval, void** extra, GucSource source);
+static void process_set_global_transation(Oid databaseid, Oid roleid, VariableSetStmt* setstmt);
+static VariableSetStmt* process_set_global_trans_args(ListCell* lcell);
#ifdef USE_BONJOUR
static bool check_bonjour(bool* newval, void** extra, GucSource source);
#endif
@@ -8538,6 +8546,7 @@ static void CheckAlterSystemSetPrivilege(const char* name)
"unix_socket_directory", "unix_socket_group", "unix_socket_permissions",
"krb_caseins_users", "krb_server_keyfile", "krb_srvname", "allow_system_table_mods", "enableSeparationOfDuty",
"modify_initial_password", "password_encryption_type", "password_policy", "audit_xid_info",
+ "no_audit_client", "full_audit_users", "audit_system_function_exec",
"allow_create_sysobject",
NULL
};
@@ -8925,9 +8934,17 @@ void ExecSetVariableStmt(VariableSetStmt* stmt, ParamListInfo paramInfo)
DefElem* item = (DefElem*)lfirst(head);
if (strcmp(item->defname, "transaction_isolation") == 0)
- SetPGVariable("transaction_isolation", list_make1(item->arg), stmt->is_local);
+ if (!stmt->is_local && ENABLE_SET_SESSION_TRANSACTION) {
+ SetPGVariable("default_transaction_isolation", list_make1(item->arg), stmt->is_local);
+ } else {
+ SetPGVariable("transaction_isolation", list_make1(item->arg), stmt->is_local);
+ }
else if (strcmp(item->defname, "transaction_read_only") == 0)
- SetPGVariable("transaction_read_only", list_make1(item->arg), stmt->is_local);
+ if (!stmt->is_local && ENABLE_SET_SESSION_TRANSACTION) {
+ SetPGVariable("default_transaction_read_only", list_make1(item->arg), stmt->is_local);
+ } else {
+ SetPGVariable("transaction_read_only", list_make1(item->arg), stmt->is_local);
+ }
else if (strcmp(item->defname, "transaction_deferrable") == 0)
SetPGVariable("transaction_deferrable", list_make1(item->arg), stmt->is_local);
else
@@ -8935,6 +8952,8 @@ void ExecSetVariableStmt(VariableSetStmt* stmt, ParamListInfo paramInfo)
(errcode(ERRCODE_INVALID_OPERATION),
errmsg("unexpected SET TRANSACTION element: %s", item->defname)));
}
+ } else if (strcmp(stmt->name, "GLOBAL TRANSACTION") == 0) {
+ process_set_global_transation(u_sess->proc_cxt.MyDatabaseId, InvalidOid, stmt);
} else if (strcmp(stmt->name, "SESSION CHARACTERISTICS") == 0) {
ListCell* head = NULL;
@@ -9064,6 +9083,7 @@ void ExecSetVariableStmt(VariableSetStmt* stmt, ParamListInfo paramInfo)
case VAR_SET_DEFINED:
if (strcmp(stmt->name, "USER DEFINED VARIABLE") == 0) {
ListCell *head = NULL;
+ List *resultlist = NIL;
foreach (head, stmt->defined_args) {
UserSetElem *elem = (UserSetElem *)lfirst(head);
@@ -9078,13 +9098,20 @@ void ExecSetVariableStmt(VariableSetStmt* stmt, ParamListInfo paramInfo)
elem->val = (Expr *)const_expression_to_const(QueryRewriteNonConstant(node));
}
+ resultlist = lappend(resultlist, (Node *)elem);
+ }
+
+ ListCell* l = NULL;
+ foreach (l, resultlist) {
+ UserSetElem *elem = (UserSetElem *)lfirst(l);
check_set_user_message(elem);
}
+ list_free(resultlist);
} else if (strcmp(stmt->name, "SELECT INTO VARLIST") == 0) {
ListCell *head = list_head(stmt->defined_args);
UserSetElem *elem = (UserSetElem *)lfirst(head);
Node *node = (Node *)copyObject(((SelectIntoVarList *)elem->val)->sublink);
- node = simplify_subselect_expression(node, paramInfo);
+ node = simplify_select_into_expression(node, paramInfo);
List *val_list = QueryRewriteSelectIntoVarList(node);
ListCell *name_cur = NULL;
@@ -11347,6 +11374,122 @@ static bool check_bonjour(bool* newval, void** extra, GucSource source)
}
#endif
+static void process_set_global_transation(Oid databaseid, Oid roleid, VariableSetStmt* setstmt)
+{
+ const char* dbname = get_database_name(databaseid);
+ /*
+ * Obtain a lock on the database and make sure it didn't go away in the
+ * meantime.
+ */
+ shdepLockAndCheckObject(DatabaseRelationId, databaseid);
+
+ /* Permission check. */
+ AlterDatabasePermissionCheck(databaseid, dbname);
+
+ char* valuestr = NULL;
+ HeapTuple tuple = NULL;
+ Relation rel = NULL;
+ ScanKeyData scankey[2];
+ SysScanDesc scan = NULL;
+ errno_t rc = EOK;
+
+ /* Get the old tuple, if any. */
+ rel = heap_open(DbRoleSettingRelationId, RowExclusiveLock);
+ ScanKeyInit(
+ &scankey[0], Anum_pg_db_role_setting_setdatabase, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(databaseid));
+ ScanKeyInit(
+ &scankey[1], Anum_pg_db_role_setting_setrole, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(roleid));
+ scan = systable_beginscan(rel, DbRoleSettingDatidRolidIndexId, true, NULL, NUM_KEYS, scankey);
+ tuple = systable_getnext(scan);
+
+ if (tuple == NULL) {
+ /* non-null valuestr means it's not RESET, so insert a new tuple */
+ HeapTuple newtuple = NULL;
+ Datum values[Natts_pg_db_role_setting];
+ bool nulls[Natts_pg_db_role_setting];
+ ListCell *head = NULL;
+ ArrayType *a = NULL;
+
+ rc = memset_s(values, sizeof(values), 0, sizeof(values));
+ securec_check(rc, "", "");
+ rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls));
+ securec_check(rc, "", "");
+
+ values[Anum_pg_db_role_setting_setdatabase - 1] = ObjectIdGetDatum(databaseid);
+ values[Anum_pg_db_role_setting_setrole - 1] = ObjectIdGetDatum(roleid);
+ foreach(head, setstmt->args)
+ {
+ VariableSetStmt* vss = process_set_global_trans_args(head);
+ valuestr = ExtractSetVariableArgs(vss);
+ a = GUCArrayAdd(a, vss->name, valuestr);
+ }
+ values[Anum_pg_db_role_setting_setconfig - 1] = PointerGetDatum(a);
+ newtuple = heap_form_tuple(RelationGetDescr(rel), values, nulls);
+ (void)simple_heap_insert(rel, newtuple);
+
+ /* Update indexes */
+ CatalogUpdateIndexes(rel, newtuple);
+ } else {
+ Datum repl_val[Natts_pg_db_role_setting];
+ bool repl_null[Natts_pg_db_role_setting];
+ bool repl_repl[Natts_pg_db_role_setting];
+ HeapTuple newtuple = NULL;
+ bool isnull = false;
+ ArrayType *a = NULL;
+ ListCell *head = NULL;
+
+ rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val));
+ securec_check(rc, "", "");
+ rc = memset_s(repl_null, sizeof(repl_null), 0, sizeof(repl_null));
+ securec_check(rc, "", "");
+ rc = memset_s(repl_repl, sizeof(repl_repl), 0, sizeof(repl_repl));
+ securec_check(rc, "", "");
+
+ repl_repl[Anum_pg_db_role_setting_setconfig - 1] = true;
+ repl_null[Anum_pg_db_role_setting_setconfig - 1] = false;
+
+ /* Extract old value of setconfig */
+ Datum datum = heap_getattr(tuple, Anum_pg_db_role_setting_setconfig, RelationGetDescr(rel), &isnull);
+ a = isnull ? NULL : DatumGetArrayTypeP(datum);
+
+ foreach (head, setstmt->args)
+ {
+ VariableSetStmt* vss = process_set_global_trans_args(head);
+ valuestr = ExtractSetVariableArgs(vss);
+ a = GUCArrayAdd(a, vss->name, valuestr);
+ }
+ repl_val[Anum_pg_db_role_setting_setconfig - 1] = PointerGetDatum(a);
+ newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), repl_val, repl_null, repl_repl);
+ simple_heap_update(rel, &tuple->t_self, newtuple);
+
+ /* Update indexes */
+ CatalogUpdateIndexes(rel, newtuple);
+ }
+
+ systable_endscan(scan);
+
+ /* Close pg_db_role_setting, but keep lock till commit */
+ heap_close(rel, NoLock);
+ UnlockSharedObject(DatabaseRelationId, databaseid, 0, AccessShareLock);
+}
+
+static VariableSetStmt* process_set_global_trans_args(ListCell* lcell)
+{
+ DefElem *item = (DefElem *)lfirst(lcell);
+ VariableSetStmt *vss = makeNode(VariableSetStmt);
+ vss->args = list_make1(item->arg);
+ vss->kind = VAR_SET_VALUE;
+ vss->is_local = false;
+ if (strcmp(item->defname, "transaction_isolation") == 0) {
+ vss->name = "default_transaction_isolation";
+ } else if (strcmp(item->defname, "transaction_read_only") == 0) {
+ vss->name = "default_transaction_read_only";
+ } else
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("unexpected SET GLOBAL TRANSACTION element.")));
+ return vss;
+}
static bool check_gpc_syscache_threshold(bool* newval, void** extra, GucSource source)
{
@@ -12966,15 +13109,20 @@ void check_variable_value_info(const char* var_name, const Expr* var_expr)
{
bool found = false;
- if (nodeTag(var_expr) != T_Const) {
+ if (!IsA(var_expr, Const)) {
ereport(ERROR,
(errcode(ERRCODE_INVALID_OPERATION), errmsg("The value of user_defined variable must be a const")));
}
/* no hash table, we can only choose appending mode */
- if (u_sess->utils_cxt.set_user_params_htab == NULL || !StringIsValid(var_name)) {
+ if (u_sess->utils_cxt.set_user_params_htab == NULL) {
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid name or hash table is null.")));
+ (errcode(ERRCODE_INVALID_OPERATION), errmsg("hash table is null for user_defined varibales.")));
+ }
+
+ if (!StringIsValid(var_name)) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid user_defined name.")));
}
GucUserParamsEntry *entry = (GucUserParamsEntry *)hash_search(u_sess->utils_cxt.set_user_params_htab,
diff --git a/src/common/backend/utils/misc/guc/guc_network.cpp b/src/common/backend/utils/misc/guc/guc_network.cpp
index d7985c1df..a0dced21d 100755
--- a/src/common/backend/utils/misc/guc/guc_network.cpp
+++ b/src/common/backend/utils/misc/guc/guc_network.cpp
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
#include "utils/elog.h"
#ifdef HAVE_SYSLOG
@@ -50,6 +51,7 @@
#include "job/job_scheduler.h"
#include "libpq/auth.h"
#include "libpq/be-fsstubs.h"
+#include "libpq/ip.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "miscadmin.h"
@@ -157,6 +159,11 @@ static void assign_comm_ackchk_time(int newval, void* extra);
static bool CheckMaxInnerToolConnections(int* newval, void** extra, GucSource source);
static bool check_ssl(bool* newval, void** extra, GucSource source);
+#ifndef ENABLE_MULTIPLE_NODES
+static bool check_listen_addresses(char **newval, void **extra, GucSource source);
+static void assign_listen_addresses(const char *newval, void *extra);
+#endif
+
#ifdef LIBCOMM_SPEED_TEST_ENABLE
static void assign_comm_test_thread_num(int newval, void* extra);
static void assign_comm_test_msg_len(int newval, void* extra);
@@ -988,6 +995,7 @@ static void InitNetworkConfigureNamesString()
NULL,
NULL},
+#ifdef ENABLE_MULTIPLE_NODES
{{"listen_addresses",
PGC_POSTMASTER,
NODE_ALL,
@@ -1000,6 +1008,20 @@ static void InitNetworkConfigureNamesString()
NULL,
NULL,
NULL},
+#else
+ {{"listen_addresses",
+ PGC_SIGHUP,
+ NODE_ALL,
+ CONN_AUTH_SETTINGS,
+ gettext_noop("Sets the host name or IP address(es) to listen to."),
+ NULL,
+ GUC_LIST_INPUT},
+ &u_sess->attr.attr_network.ListenAddresses,
+ "localhost",
+ check_listen_addresses,
+ assign_listen_addresses,
+ NULL},
+#endif
{{"local_bind_address",
PGC_POSTMASTER,
@@ -1173,6 +1195,172 @@ static void assign_comm_ackchk_time(int newval, void* extra)
gs_set_ackchk_time(newval);
}
+#ifndef ENABLE_MULTIPLE_NODES
+static bool check_listen_addresses(char **newval, void **extra, GucSource source)
+{
+ if (*newval == NULL || strlen(*newval) == 0) {
+ GUC_check_errmsg("listen_addresses can not set to empty");
+ return false;
+ }
+
+ char* rawstring = NULL;
+ List* elemlist = NULL;
+ rawstring = pstrdup(*newval);
+ if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
+ /* syntax error in list */
+ GUC_check_errmsg("invalid list syntax for \"listen_addresses\": %s", *newval);
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ return false;
+ }
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ return true;
+}
+
+static void transform_ip_to_addr(char* host_name, unsigned short port_number)
+{
+ char* service = NULL;
+ struct addrinfo* addrs = NULL;
+ struct addrinfo* addr = NULL;
+ struct addrinfo hint;
+ char portNumberStr[32];
+ int family = AF_UNSPEC;
+ errno_t rc = snprintf_s(portNumberStr, sizeof(portNumberStr), sizeof(portNumberStr) - 1, "%hu", port_number);
+ securec_check_ss(rc, "\0", "\0");
+ service = portNumberStr;
+ /* Initialize hint structure */
+ rc = memset_s(&hint, sizeof(hint), 0, sizeof(hint));
+ securec_check(rc, "\0", "\0");
+ hint.ai_family = family;
+ hint.ai_flags = AI_PASSIVE;
+ hint.ai_socktype = SOCK_STREAM;
+
+ int ret = pg_getaddrinfo_all(host_name, service, &hint, &addrs);
+ if (ret || addrs == NULL) {
+ if (host_name != NULL) {
+ ereport(LOG,
+ (errmsg("could not translate host name \"%s\", service \"%s\" to address: %s",
+ host_name,
+ service,
+ gai_strerror(ret))));
+ } else {
+ ereport(LOG,
+ (errmsg("could not translate service \"%s\" to address: %s", service, gai_strerror(ret))));
+ }
+ if (addrs != NULL) {
+ pg_freeaddrinfo_all(hint.ai_family, addrs);
+ }
+ return;
+ }
+
+ for (addr = addrs; addr; addr = addr->ai_next) {
+ if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family)) {
+ /*
+ * Only set up a unix domain socket when they really asked for it.
+ * The service/port is different in that case.
+ */
+ continue;
+ }
+ struct sockaddr* sinp = NULL;
+ char* result = NULL;
+
+ sinp = (struct sockaddr*)(addr->ai_addr);
+ if (addr->ai_family == AF_INET6) {
+ result = inet_net_ntop(AF_INET6,
+ &((struct sockaddr_in6*)sinp)->sin6_addr,
+ 128,
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum],
+ IP_LEN);
+ } else if (addr->ai_family == AF_INET) {
+ result = inet_net_ntop(AF_INET,
+ &((struct sockaddr_in*)sinp)->sin_addr,
+ 32,
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum],
+ IP_LEN);
+ }
+ if (result == NULL) {
+ ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT)));
+ } else {
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]set LocalIpNum[%d] %s",
+ t_thrd.postmaster_cxt.LocalIpNum,
+ t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum])));
+ t_thrd.postmaster_cxt.LocalIpNum++;
+ }
+ }
+
+ /* finally free malloc memory */
+ if (addrs != NULL) {
+ pg_freeaddrinfo_all(hint.ai_family, addrs);
+ }
+}
+
+static void assign_listen_addresses(const char *newval, void *extra)
+{
+ if (t_thrd.postmaster_cxt.can_listen_addresses_reload && !IsUnderPostmaster) {
+ if (newval != NULL && strlen(newval) != 0 && u_sess->attr.attr_network.ListenAddresses != NULL &&
+ strcmp((const char *)newval, u_sess->attr.attr_network.ListenAddresses) != 0) {
+ ereport(WARNING,
+ (errmsg("Postmaster received signal to reload listen_addresses, update \"%s\" to \"%s\".",
+ u_sess->attr.attr_network.ListenAddresses, newval)));
+ t_thrd.postmaster_cxt.is_listen_addresses_reload = true;
+ }
+ }
+
+ if (IsUnderPostmaster) {
+ int i = 0;
+ errno_t rc = EOK;
+ char* rawstring = NULL;
+ List* elemlist = NULL;
+ rawstring = pstrdup(newval);
+ if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ return;
+ }
+ t_thrd.postmaster_cxt.LocalIpNum = 0;
+ for (i = 0; i < MAXLISTEN; i++) {
+ rc = memset_s(t_thrd.postmaster_cxt.LocalAddrList[i], IP_LEN, '\0', IP_LEN);
+ securec_check(rc, "", "");
+ }
+ ListCell* l = NULL;
+ ListCell* elem = NULL;
+ int checked_num = 0;
+ foreach(l, elemlist) {
+ char* curhost = (char*)lfirst(l);
+
+ /* Deduplicatd listen IP */
+ int check = 0;
+ bool has_checked = false;
+ foreach(elem, elemlist) {
+ if (check >= checked_num) {
+ break;
+ }
+ if (strcmp(curhost, (char*)lfirst(elem)) == 0) {
+ has_checked = true;
+ break;
+ }
+ check++;
+ }
+ checked_num++;
+ if (has_checked) {
+ has_checked = false;
+ continue;
+ }
+
+ if (strcmp(curhost, "*") == 0) {
+ transform_ip_to_addr(NULL, (unsigned short)g_instance.attr.attr_network.PostPortNumber);
+ } else {
+ transform_ip_to_addr(curhost, (unsigned short)g_instance.attr.attr_network.PostPortNumber);
+ }
+ }
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ }
+}
+#endif
+
#ifdef LIBCOMM_SPEED_TEST_ENABLE
static void assign_comm_test_thread_num(int newval, void* extra)
{
diff --git a/src/common/backend/utils/misc/guc/guc_security.cpp b/src/common/backend/utils/misc/guc/guc_security.cpp
index ff7dbbb7a..fd7f0906f 100755
--- a/src/common/backend/utils/misc/guc/guc_security.cpp
+++ b/src/common/backend/utils/misc/guc/guc_security.cpp
@@ -797,7 +797,7 @@ static void InitSecurityConfigureNamesInt()
&u_sess->attr.attr_security.Audit_DDL,
67121159,
0,
- 134217727,
+ 268435455,
NULL,
NULL,
NULL},
@@ -918,7 +918,20 @@ static void InitSecurityConfigureNamesInt()
NULL,
NULL,
NULL},
-
+ {{"audit_system_function_exec",
+ PGC_SIGHUP,
+ NODE_ALL,
+ AUDIT_OPTIONS,
+ gettext_noop("Sets whether audit system function execution or not."),
+ NULL,
+ 0},
+ &u_sess->attr.attr_security.audit_system_function_exec,
+ 0,
+ 0,
+ 1,
+ NULL,
+ NULL,
+ NULL},
/* End-of-list marker */
{{NULL,
(GucContext)0,
@@ -1205,6 +1218,29 @@ static void InitSecurityConfigureNamesString()
transparent_encrypt_kms_url_region_check,
NULL,
NULL},
+ {{"no_audit_client",
+ PGC_SIGHUP,
+ NODE_ALL,
+ AUDIT_OPTIONS,
+ gettext_noop("Sets the appname@ip should not be audited."),
+ NULL,
+ GUC_SUPERUSER_ONLY},
+ &u_sess->attr.attr_security.no_audit_client,
+ "",
+ NULL,
+ NULL,
+ NULL},
+ {{"full_audit_users",
+ PGC_SIGHUP,
+ NODE_ALL,
+ AUDIT_OPTIONS,
+ gettext_noop("Sets the users under comprehensive audit."),
+ NULL},
+ &u_sess->attr.attr_security.full_audit_users,
+ "",
+ NULL,
+ NULL,
+ NULL},
{{NULL,
(GucContext)0,
(GucNodeType)0,
diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp
index fbf3be289..d2d87446b 100755
--- a/src/common/backend/utils/misc/guc/guc_sql.cpp
+++ b/src/common/backend/utils/misc/guc/guc_sql.cpp
@@ -173,6 +173,8 @@ static bool check_fencedUDFMemoryLimit(int* newval, void** extra, GucSource sour
static bool check_udf_memory_limit(int* newval, void** extra, GucSource source);
static bool check_inlist2joininfo(char** newval, void** extra, GucSource source);
static void assign_inlist2joininfo(const char* newval, void* extra);
+static bool check_b_format_behavior_compat_options(char **newval, void **extra, GucSource source);
+static void assign_b_format_behavior_compat_options(const char *newval, void *extra);
static bool check_behavior_compat_options(char** newval, void** extra, GucSource source);
static void assign_behavior_compat_options(const char* newval, void* extra);
static bool check_plsql_compile_behavior_compat_options(char** newval, void** extra, GucSource source);
@@ -195,6 +197,7 @@ static void InitSqlConfigureNamesReal();
static void InitSqlConfigureNamesString();
static void InitSqlConfigureNamesEnum();
#define FORBID_GUC_NUM 3
+#define B_FORMAT_FORBID_GUC_NUM 2
/*
* Although only "on", "off", and "safe_encoding" are documented, we
* accept all the likely variants of "on" and "off".
@@ -332,6 +335,17 @@ static const struct config_enum_entry multi_stats_options[] = {
{NULL, 0, false}
};
+typedef struct b_format_behavior_compat_entry {
+ const char *name; /* name of behavior compat entry */
+ int flag; /* bit flag position */
+} b_format_behavior_compat_entry;
+
+static const struct b_format_behavior_compat_entry b_format_behavior_compat_options[B_FORMAT_OPT_MAX] = {
+ {"set_session_transaction", B_FORMAT_OPT_ENABLE_SET_SESSION_TRANSACTION},
+ {"enable_set_variables", B_FORMAT_OPT_ENABLE_SET_VARIABLES},
+ {"enable_modify_column", B_FORMAT_OPT_ENABLE_MODIFY_COLUMN},
+ {"default_collation", B_FORMAT_OPT_DEFAULT_COLLATION}
+};
typedef struct behavior_compat_entry {
const char* name; /* name of behavior compat entry */
@@ -2726,7 +2740,18 @@ static void InitSqlConfigureNamesString()
check_inlist2joininfo,
assign_inlist2joininfo,
NULL},
-
+ {{"b_format_behavior_compat_options",
+ PGC_USERSET,
+ NODE_ALL,
+ COMPAT_OPTIONS,
+ gettext_noop("b format compatibility options"),
+ NULL,
+ GUC_LIST_INPUT | GUC_REPORT},
+ &u_sess->attr.attr_sql.b_format_behavior_compat_string,
+ "",
+ check_b_format_behavior_compat_options,
+ assign_b_format_behavior_compat_options,
+ NULL},
{{"behavior_compat_options",
PGC_USERSET,
NODE_ALL,
@@ -3143,6 +3168,13 @@ static void AssignQueryDop(int newval, void* extra)
u_sess->opt_cxt.max_query_dop = abs(newval);
}
+#ifndef ENABLE_MULTIPLE_NODES
+ /* do not reset backend threads tag */
+ if (u_sess->opt_cxt.query_dop > 1 &&
+ (t_thrd.role == WORKER || t_thrd.role == THREADPOOL_WORKER)) {
+ u_sess->opt_cxt.smp_enabled = true;
+ }
+#endif
}
/*
@@ -3166,7 +3198,7 @@ static bool check_statement_max_mem(int* newval, void** extra, GucSource source)
if ((*newval < 0) || (*newval > 0 && *newval < SIMPLE_THRESHOLD)) {
if (source != PGC_S_FILE)
ereport(WARNING,
- (errmsg("query max mem can not be set lower than %dMB, so the guc variable is not avaiable.",
+ (errmsg("query max mem can not be set lower than %dMB, so the guc variable is not available.",
MEM_THRESHOLD)));
*newval = 0;
}
@@ -3180,7 +3212,7 @@ static bool check_statement_mem(int* newval, void** extra, GucSource source)
if (source != PGC_S_FILE)
ereport(WARNING,
(errmsg(
- "query mem can not be set lower than %dMB, so the guc variable is not avaiable.", MEM_THRESHOLD)));
+ "query mem can not be set lower than %dMB, so the guc variable is not available.", MEM_THRESHOLD)));
*newval = 0;
}
@@ -3273,6 +3305,110 @@ static void assign_inlist2joininfo(const char* newval, void* extra)
}
}
#ifdef ENABLE_MULTIPLE_NODES
+static bool b_format_forbid_distribute_parameter(const char *elem)
+{
+ const char *forbidList[] = {
+ "set_session_transaction",
+ "enable_set_variables",
+ "enable_modify_column"
+ };
+ for (int i = 0; i < B_FORMAT_FORBID_GUC_NUM; i++) {
+ if (strcmp(forbidList[i], elem) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+/*
+ * check_b_format_behavior_compat_options: GUC check_hook for behavior compat options
+ */
+static bool check_b_format_behavior_compat_options(char **newval, void **extra, GucSource source)
+{
+ char *rawstring = NULL;
+ List *elemlist = NULL;
+ ListCell *cell = NULL;
+ int start = 0;
+
+ /* Need a modifiable copy of string */
+ rawstring = pstrdup(*newval);
+ /* Parse string into list of identifiers */
+ if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
+ /* syntax error in list */
+ GUC_check_errdetail("invalid paramater for behavior compat information.");
+ pfree(rawstring);
+ list_free(elemlist);
+
+ return false;
+ }
+
+ foreach(cell, elemlist)
+ {
+ const char *item = (const char *)lfirst(cell);
+ bool nfound = true;
+
+ for (start = 0; start < B_FORMAT_OPT_MAX; start++) {
+#ifdef ENABLE_MULTIPLE_NODES
+ if (b_format_forbid_distribute_parameter(item)) {
+ GUC_check_errdetail("behavior compat option %s can not use"
+ " in distributed database system",
+ item);
+ pfree(rawstring);
+ list_free(elemlist);
+ return false;
+ }
+#endif
+ if (strcmp(item, b_format_behavior_compat_options[start].name) == 0) {
+ nfound = false;
+ break;
+ }
+ }
+ if (nfound) {
+ GUC_check_errdetail("invalid behavior compat option \"%s\"", item);
+ pfree(rawstring);
+ list_free(elemlist);
+ return false;
+ }
+ }
+
+ pfree(rawstring);
+ list_free(elemlist);
+
+ return true;
+}
+
+/*
+ * assign_b_format_behavior_compat_options: GUC assign_hook for distribute_test_param
+ */
+static void assign_b_format_behavior_compat_options(const char *newval, void *extra)
+{
+ char *rawstring = NULL;
+ List *elemlist = NULL;
+ ListCell *cell = NULL;
+ int start = 0;
+ int result = 0;
+
+ rawstring = pstrdup(newval);
+ (void)SplitIdentifierString(rawstring, ',', &elemlist);
+
+ u_sess->utils_cxt.b_format_behavior_compat_flags = 0;
+ foreach(cell, elemlist)
+ {
+ for (start = 0; start < B_FORMAT_OPT_MAX; start++) {
+ const char *item = (const char *)lfirst(cell);
+
+ if (strcmp(item, b_format_behavior_compat_options[start].name) == 0 &&
+ !(result & b_format_behavior_compat_options[start].flag))
+ result += b_format_behavior_compat_options[start].flag;
+ }
+ }
+
+ pfree(rawstring);
+ list_free(elemlist);
+
+ u_sess->utils_cxt.b_format_behavior_compat_flags = result;
+}
+#ifdef ENABLE_MULTIPLE_NODES
static bool ForbidDistributeParameter(const char* elem)
{
const char *forbidList[] = {
diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp
index e052b5080..1b8f64f63 100755
--- a/src/common/backend/utils/misc/guc/guc_storage.cpp
+++ b/src/common/backend/utils/misc/guc/guc_storage.cpp
@@ -1152,6 +1152,21 @@ static void InitStorageConfigureNamesBool()
NULL,
NULL},
#endif
+
+#ifndef ENABLE_MULTIPLE_NODES
+ {{"enable_availablezone",
+ PGC_POSTMASTER,
+ NODE_SINGLENODE,
+ AUDIT_OPTIONS,
+ gettext_noop("enable identifying available zone during cascade standby connection"),
+ NULL},
+ &g_instance.attr.attr_storage.enable_availablezone,
+ false,
+ NULL,
+ NULL,
+ NULL},
+#endif
+
/* End-of-list marker */
{{NULL,
(GucContext)0,
diff --git a/src/common/backend/utils/misc/postgresql_distribute.conf.sample b/src/common/backend/utils/misc/postgresql_distribute.conf.sample
index 99de2f99f..a78de7431 100644
--- a/src/common/backend/utils/misc/postgresql_distribute.conf.sample
+++ b/src/common/backend/utils/misc/postgresql_distribute.conf.sample
@@ -820,6 +820,9 @@ audit_enabled = on
#audit_set_parameter = 1 # whether audit set parameter operation
#audit_xid_info = 0 # whether record xid info in audit log
#audit_thread_num = 1
+#no_audit_client = ""
+#full_audit_users = ""
+#audit_system_function_exec = 0
#Choose which style to print the explain info, normal,pretty,summary,run
#explain_perf_mode = normal
diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample
index 6a93a9e0b..52c3584e8 100644
--- a/src/common/backend/utils/misc/postgresql_single.conf.sample
+++ b/src/common/backend/utils/misc/postgresql_single.conf.sample
@@ -763,6 +763,9 @@ audit_enabled = on
#audit_set_parameter = 1 # whether audit set parameter operation
#audit_xid_info = 0 # whether record xid info in audit log
#audit_thread_num = 1
+#no_audit_client = ""
+#full_audit_users = ""
+#audit_system_function_exec = 0
#Choose which style to print the explain info, normal,pretty,summary,run
#explain_perf_mode = normal
diff --git a/src/common/backend/utils/mmgr/memprot.cpp b/src/common/backend/utils/mmgr/memprot.cpp
index 7947c26fc..17a153716 100755
--- a/src/common/backend/utils/mmgr/memprot.cpp
+++ b/src/common/backend/utils/mmgr/memprot.cpp
@@ -859,7 +859,7 @@ void MemoryProtectFunctions::gs_memprot_free(void* ptr, Size sz)
template
void* MemoryProtectFunctions::gs_memprot_realloc(void* ptr, Size sz, Size newsz, bool needProtect)
{
- Assert(GS_MP_INITED); // Must be used when memory protect feature is avaiable
+ Assert(GS_MP_INITED); // Must be used when memory protect feature is available
void* ret = NULL;
diff --git a/src/common/backend/utils/mmgr/portalmem.cpp b/src/common/backend/utils/mmgr/portalmem.cpp
index 9e711477a..722804ce7 100755
--- a/src/common/backend/utils/mmgr/portalmem.cpp
+++ b/src/common/backend/utils/mmgr/portalmem.cpp
@@ -101,6 +101,20 @@ typedef struct portalhashent {
} \
} while (0)
+inline void ReleaseStreamGroup(Portal portal)
+{
+#ifndef ENABLE_MULTIPLE_NODES
+ if (!StreamThreadAmI()) {
+ portal->streamInfo.AttachToSession();
+ StreamNodeGroup::ReleaseStreamGroup(true);
+ portal->streamInfo.Reset();
+ }
+#else
+ /* multinode do nothing */
+ return;
+#endif
+}
+
/* -------------------portal_mem_cxt---------------------------------
* public portal interface functions
* ----------------------------------------------------------------
@@ -605,12 +619,7 @@ void PortalDrop(Portal portal, bool isTopCommit)
portal->holdStore = NULL;
}
-#ifndef ENABLE_MULTIPLE_NODES
- if (!StreamThreadAmI()) {
- portal->streamInfo.AttachToSession();
- StreamNodeGroup::ReleaseStreamGroup(true);
- }
-#endif
+ ReleaseStreamGroup(portal);
/* delete tuplestore storage, if any */
#ifndef ENABLE_MULTIPLE_NODES
@@ -892,6 +901,17 @@ void AtAbort_Portals(bool STP_rollback)
* commit and rollback patch.
*/
if(portal->status != PORTAL_ACTIVE) {
+#ifndef ENABLE_MULTIPLE_NODES
+ /*
+ * estate is under the queryDesc, and stream threads use it.
+ * we should wait all stream threads exit to cleanup queryDesc.
+ */
+ if (!StreamThreadAmI()) {
+ portal->streamInfo.AttachToSession();
+ StreamNodeGroup::ReleaseStreamGroup(true, STREAM_ERROR);
+ portal->streamInfo.Reset();
+ }
+#endif
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
}
@@ -1104,7 +1124,9 @@ void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid,
* upcoming transaction-wide cleanup; they will be gone before we run
* PortalDrop.
*/
- portal->resowner = NULL;
+ portal->resowner = NULL;
+
+ ReleaseStreamGroup(portal);
/*
* Although we can't delete the portal data structure proper, we can
@@ -1124,6 +1146,7 @@ void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid,
* don't destory its memory context.
*/
if (portal->status != PORTAL_ACTIVE) {
+ ReleaseStreamGroup(portal);
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
}
diff --git a/src/common/backend/utils/time/snapmgr.cpp b/src/common/backend/utils/time/snapmgr.cpp
index 2b0a5e402..7c78e5390 100644
--- a/src/common/backend/utils/time/snapmgr.cpp
+++ b/src/common/backend/utils/time/snapmgr.cpp
@@ -1055,6 +1055,8 @@ static void SnapshotResetXmin(void)
{
if (u_sess->utils_cxt.RegisteredSnapshots == 0 && u_sess->utils_cxt.ActiveSnapshot == NULL) {
t_thrd.pgxact->xmin = InvalidTransactionId;
+ t_thrd.proc->snapXmax = InvalidTransactionId;
+ t_thrd.proc->snapCSN = InvalidCommitSeqNo;
t_thrd.pgxact->csn_min = InvalidCommitSeqNo;
t_thrd.pgxact->csn_dr = InvalidCommitSeqNo;
}
diff --git a/src/common/interfaces/libpq/client_logic_cache/cached_column.cpp b/src/common/interfaces/libpq/client_logic_cache/cached_column.cpp
index 301d3f878..224bf55db 100644
--- a/src/common/interfaces/libpq/client_logic_cache/cached_column.cpp
+++ b/src/common/interfaces/libpq/client_logic_cache/cached_column.cpp
@@ -140,7 +140,6 @@ CachedColumn::~CachedColumn()
unsigned int CachedColumn::get_col_idx() const
{
- Assert(m_column_index > 0);
return m_column_index;
}
diff --git a/src/common/interfaces/libpq/client_logic_common/col_full_name.cpp b/src/common/interfaces/libpq/client_logic_common/col_full_name.cpp
index ead5c33f2..331386a5f 100644
--- a/src/common/interfaces/libpq/client_logic_common/col_full_name.cpp
+++ b/src/common/interfaces/libpq/client_logic_common/col_full_name.cpp
@@ -25,7 +25,10 @@
#include "libpq-int.h"
#include "securec.h"
-colFullName::colFullName() {}
+colFullName::colFullName()
+{
+ m_col_name = {{0}};
+}
colFullName::colFullName(const char *catalog_name, const char *schema_name, const char *rel_name, const char *colname)
: tableFullName(catalog_name, schema_name, rel_name)
diff --git a/src/common/interfaces/libpq/client_logic_common/table_full_name.cpp b/src/common/interfaces/libpq/client_logic_common/table_full_name.cpp
index 873d26101..eea35936b 100644
--- a/src/common/interfaces/libpq/client_logic_common/table_full_name.cpp
+++ b/src/common/interfaces/libpq/client_logic_common/table_full_name.cpp
@@ -25,7 +25,12 @@
#include "libpq-int.h"
#include "securec.h"
-tableFullName::tableFullName() {}
+tableFullName::tableFullName()
+{
+ m_catalog_name = {{0}};
+ m_schema_name = {{0}};
+ m_table_name = {{0}};
+}
tableFullName::~tableFullName() {}
diff --git a/src/common/interfaces/libpq/client_logic_expressions/expr_processor.cpp b/src/common/interfaces/libpq/client_logic_expressions/expr_processor.cpp
index 2f3fc3758..0ba717f19 100644
--- a/src/common/interfaces/libpq/client_logic_expressions/expr_processor.cpp
+++ b/src/common/interfaces/libpq/client_logic_expressions/expr_processor.cpp
@@ -234,7 +234,15 @@ bool exprProcessor::expand_expr(const Node * const expr, StatementData *statemen
expr_parts.param_ref = NULL;
expr_parts.operators = a_expr->name;
expr_parts_list->add(&expr_parts);
- }
+ } else if (IsA(lfirst(fl), ParamRef)) {
+ ExprParts expr_parts;
+ param_ref = (ParamRef *)lfirst(fl);
+ expr_parts.column_ref = column_ref;
+ expr_parts.aconst = NULL;
+ expr_parts.param_ref = param_ref;
+ expr_parts.operators = a_expr->name;
+ expr_parts_list->add(&expr_parts);
+ }
}
}
}
diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp
index 23e7c153f..6f34e3122 100644
--- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp
+++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp
@@ -19,9 +19,9 @@
* when CREATE CMKO, if KEY_STROE = gs_ktool, then:
* 1. KEY_PATH: gs_ktool use $key_id to identify keys, KEY_PATH = "gs_ktool/$key_id"
* 2. ALGORITHM: gs_ktool cannot generate asymmetric key pairs, so keys generated by gs_ktool are only
- * avaiable for AES_256 algorithm
+ * available for AES_256 algorithm
* if you register gs_ktool, you should be sure your system has installed gs_ktool, and the environment variables
- * and the configuration files are avaiable.
+ * and the configuration files are available.
*
* IDENTIFICATION
* src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp
diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.h b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.h
index fcdbda52f..d3c76b9cb 100644
--- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.h
+++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.h
@@ -19,9 +19,9 @@
* when CREATE CMKO, if KEY_STROE = gs_ktool, then:
* 1. KEY_PATH: gs_ktool use $key_id to identify keys, KEY_PATH = "gs_ktool/$key_id"
* 2. ALGORITHM: gs_ktool cannot generate asymmetric key pairs, so keys generated by gs_ktool are only
- * avaiable for AES_256 algorithm
+ * available for AES_256 algorithm
* if you register gs_ktool, you should be sure your system has installed gs_ktool, and the environment variables
- * and the configuration files are avaiable.
+ * and the configuration files are available.
*
* IDENTIFICATION
* src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.h
diff --git a/src/common/interfaces/libpq/client_logic_processor/Makefile b/src/common/interfaces/libpq/client_logic_processor/Makefile
index 703a62f40..044e5a186 100644
--- a/src/common/interfaces/libpq/client_logic_processor/Makefile
+++ b/src/common/interfaces/libpq/client_logic_processor/Makefile
@@ -22,7 +22,7 @@ override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -fstack-protector-all
# We can't use Makefile variables here because the MSVC build system scrapes
# OBJS from this file.
-OBJS=raw_value.o raw_values_cont.o processor_utils.o post_stmt_processor.o stmt_processor.o func_processor.o ../frontend_parser/parser.o ../client_logic_common/client_logic_utils.o where_clause_processor.o values_processor.o create_stmt_processor.o
+OBJS=raw_value.o raw_values_cont.o processor_utils.o pre_stmt_processor.o post_stmt_processor.o stmt_processor.o func_processor.o ../frontend_parser/parser.o ../client_logic_common/client_logic_utils.o where_clause_processor.o values_processor.o create_stmt_processor.o
OBJS+=prepared_statements_list.o prepared_statement.o func_hardcoded_values.o
include $(top_builddir)/src/Makefile.global
diff --git a/src/common/interfaces/libpq/client_logic_processor/create_stmt_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/create_stmt_processor.cpp
index b180e4065..76b563c94 100644
--- a/src/common/interfaces/libpq/client_logic_processor/create_stmt_processor.cpp
+++ b/src/common/interfaces/libpq/client_logic_processor/create_stmt_processor.cpp
@@ -320,6 +320,11 @@ bool createStmtProcessor::process_column_defintion(ColumnDef *column, Node *elem
return false;
}
+ if (column->typname != NULL && column->typname->arrayBounds != NULL) {
+ fprintf(stderr, "ERROR(CLIENT): Creating encrypted columns of type array is not supported\n");
+ return false;
+ }
+
char column_key_name[NAMEDATALEN * 4];
errno_t rc = EOK;
rc = memset_s(column_key_name, NAMEDATALEN * 4, 0, NAMEDATALEN * 4);
diff --git a/src/common/interfaces/libpq/client_logic_processor/pre_stmt_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/pre_stmt_processor.cpp
new file mode 100644
index 000000000..2f6513cd7
--- /dev/null
+++ b/src/common/interfaces/libpq/client_logic_processor/pre_stmt_processor.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Huawei Technologies Co.,Ltd.
+ *
+ * openGauss is licensed under Mulan PSL v2.
+ * You can use this software according to the terms and conditions of the Mulan PSL v2.
+ * You may obtain a copy of Mulan PSL v2 at:
+ *
+ * http://license.coscl.org.cn/MulanPSL2
+ *
+ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
+ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+ * See the Mulan PSL v2 for more details.
+ * -------------------------------------------------------------------------
+ *
+ * pre_stmt_processor.cpp
+ *
+ * IDENTIFICATION
+ * src\common\interfaces\libpq\client_logic_processor\pre_stmt_processor.cpp
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "pg_config.h"
+
+#include "stmt_processor.h"
+#include "cl_state.h"
+#include "client_logic_cache/cache_refresh_type.h"
+#include "client_logic_hooks/hooks_manager.h"
+#include "client_logic_common/statement_data.h"
+#include "libpq-fe.h"
+#include "libpq-int.h"
+#include "prepared_statement.h"
+#include "prepared_statements_list.h"
+
+#include
+
+#define BYTEAWITHOUTORDERWITHEQUALCOL_NAME "byteawithoutorderwithequalcol"
+#define BYTEAWITHOUTORDERCOL_NAME "byteawithoutordercol"
+
+bool Processor::run_pre_prepare_statement(const PrepareStmt *prepare_stmt, StatementData *statement_data)
+{
+ if (statement_data->GetCacheManager()->is_cache_empty()) {
+ return true;
+ }
+
+ /*
+ * func : call create() function instead but
+ * we need to make sure to always delete the prepared statements when they
+ * are no longer in use
+ */
+ (void)statement_data->conn->client_logic->pendingStatements->get_or_create(prepare_stmt->name);
+ statement_data->stmtName = prepare_stmt->name;
+ bool pre_ret = run_pre_statement(prepare_stmt->query, statement_data);
+ if (pre_ret) {
+ pre_ret = process_prepare_arg_types(prepare_stmt, statement_data);
+ }
+ return pre_ret;
+}
+
+/*
+ * @Description: replace original arg types for cl parameters in prepare statement declaration
+ * with corresponding client logic type to pass server side verification
+ * @IN PrepareStmt: prepare statement
+ * @IN StatementData: query data for client logic
+ */
+bool Processor::process_prepare_arg_types(const PrepareStmt *prepare_stmt, StatementData *statement_data)
+{
+ PreparedStatement *current_statement =
+ statement_data->conn->client_logic->pendingStatements->get_or_create(statement_data->stmtName);
+ bool is_invalid_args = current_statement == NULL || current_statement->cached_params == NULL ||
+ current_statement->cached_params->size() == 0;
+ if (is_invalid_args) {
+ return true;
+ }
+ List *arg_types_list = prepare_stmt->argtypes;
+ if (arg_types_list == 0) {
+ return true;
+ }
+ ListCell *curr_arg = list_head(arg_types_list);
+ /* estimate new query size change */
+ size_t new_size = 0;
+ bool need_replacement = false;
+ for (unsigned int i = 0; i < current_statement->cached_params->size() && curr_arg != NULL;
+ i++, curr_arg = lnext(curr_arg)) {
+ const ICachedColumn *cached_column = current_statement->cached_params->at(i);
+ /* check if column has policy for processing */
+ const char *datatype = NULL;
+ if (cached_column != NULL) {
+ if (cached_column->get_data_type() == BYTEAWITHOUTORDERWITHEQUALCOLOID) {
+ datatype = BYTEAWITHOUTORDERWITHEQUALCOL_NAME;
+ } else if (cached_column->get_data_type() == BYTEAWITHOUTORDERCOLOID) {
+ datatype = BYTEAWITHOUTORDERCOL_NAME;
+ } else {
+ continue;
+ }
+ if (datatype != NULL) {
+ need_replacement = true;
+ new_size += strlen(datatype) -
+ (((TypeName *)lfirst(curr_arg))->end_location - ((TypeName *)lfirst(curr_arg))->location);
+ }
+ }
+ }
+ if (!need_replacement) {
+ return true;
+ }
+ new_size += strlen(statement_data->params.adjusted_query);
+ char *new_query = (char *)malloc(new_size + 1);
+ if (new_query == NULL) {
+ return false;
+ }
+ /* construct new quuery */
+ arg_types_list = prepare_stmt->argtypes;
+ curr_arg = list_head(arg_types_list);
+ int new_offset = 0, old_offset = 0;
+ for (unsigned int i = 0; i < current_statement->cached_params->size() && curr_arg != NULL;
+ i++, curr_arg = lnext(curr_arg)) {
+ const char *datatype = NULL;
+ const ICachedColumn *cached_column = current_statement->cached_params->at(i);
+ /* check if column has policy for processing */
+ if (cached_column != NULL) {
+ if (cached_column->get_data_type() == BYTEAWITHOUTORDERWITHEQUALCOLOID) {
+ datatype = BYTEAWITHOUTORDERWITHEQUALCOL_NAME;
+ } else if (cached_column->get_data_type() == BYTEAWITHOUTORDERCOLOID) {
+ datatype = BYTEAWITHOUTORDERCOL_NAME;
+ } else {
+ continue;
+ }
+ if (datatype != NULL) {
+ check_strncpy_s(strncpy_s(new_query + new_offset, new_size + 1 - new_offset,
+ statement_data->params.adjusted_query + old_offset,
+ ((TypeName *)lfirst(curr_arg))->location - old_offset));
+ new_offset += ((TypeName *)lfirst(curr_arg))->location - old_offset;
+ old_offset = ((TypeName *)lfirst(curr_arg))->end_location;
+ check_strcpy_s(strcpy_s(new_query + new_offset, new_size + 1 - new_offset, datatype));
+ new_offset += strlen(datatype);
+ }
+ }
+ }
+ /* copy the rest */
+ check_strcpy_s(strcpy_s(new_query + new_offset, new_size + 1 - new_offset,
+ statement_data->params.adjusted_query + old_offset));
+ libpq_free(statement_data->params.new_query);
+ statement_data->params.new_query = new_query;
+ statement_data->params.new_query_size = new_size;
+ statement_data->params.adjusted_query = statement_data->params.new_query;
+ return true;
+}
\ No newline at end of file
diff --git a/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp b/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp
index 2e2bb8b52..a1a466cbe 100644
--- a/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp
+++ b/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp
@@ -31,8 +31,7 @@
#include "client_logic_fmt/gs_fmt.h"
#include "client_logic_cache/icached_column.h"
-extern unsigned char *PQescapeByteaConn1(PGconn *conn, const unsigned char *from, size_t from_length, size_t *to_length,
- bool add_quotes);
+unsigned char *PQescapeByteaCe(PGconn *conn, const unsigned char *from, size_t fromlen, size_t *tolen, bool addquote);
RawValue::RawValue(PGconn *conn)
: m_is_param(false),
@@ -168,7 +167,7 @@ bool RawValue::process(const ICachedColumn *cached_column, char *err_msg)
if (!m_is_param || m_data_value_format == 0) {
/* replace processedData with its escaped version */
size_t processed_data_size_tmp(0);
- const char *processed_data_tmp = (char *)PQescapeByteaConn1(m_conn, (const unsigned char *)m_processed_data,
+ const char *processed_data_tmp = (char *)PQescapeByteaCe(m_conn, (const unsigned char *)m_processed_data,
m_processed_data_size, &processed_data_size_tmp, !m_is_param);
free_processed_data();
if (!processed_data_tmp) {
@@ -180,7 +179,7 @@ bool RawValue::process(const ICachedColumn *cached_column, char *err_msg)
m_processed_data = (unsigned char *)processed_data_tmp;
m_processed_data_size = processed_data_size_tmp -
- 1; /* the \0 is counted in the orignal PQescapeByteaConn function, so we need -1 */
+ 1; /* the \0 is counted in the orignal PQescapeByteaCe function, so we need -1 */
}
return true;
diff --git a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp
index 9535e2094..92dd294ea 100755
--- a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp
+++ b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp
@@ -778,14 +778,20 @@ bool Processor::run_pre_select_target_list(List* targetList, StatementData* stat
return true;
}
-bool Processor::run_pre_select_statement(const SelectStmt * const select_stmt, StatementData *statement_data)
+bool Processor::run_pre_select_statement(const SelectStmt * const select_stmt, StatementData *statement_data,
+ bool *unencrypted)
{
SetOperation set_operation(SETOP_NONE);
bool all(false);
+ bool res(false);
CachedColumns cached_columns(false, true);
CachedColumns cached_columns_parents(false, true);
- return run_pre_select_statement(select_stmt, set_operation, all, statement_data,
+ res = run_pre_select_statement(select_stmt, set_operation, all, statement_data,
&cached_columns, &cached_columns_parents);
+ if (unencrypted != NULL && cached_columns.size() == 0) {
+ *unencrypted = true; /* if unencrypted is true, it means the table is not full encrypted */
+ }
+ return res;
}
bool Processor::run_pre_select_statement(const SelectStmt * const select_stmt, const SetOperation &parent_set_operation,
@@ -1031,22 +1037,6 @@ bool Processor::run_pre_delete_statement(const DeleteStmt *delete_stmt, Statemen
return true;
}
-
-bool Processor::run_pre_prepare_statement(const PrepareStmt *prepare_stmt, StatementData *statement_data)
-{
- if (statement_data->GetCacheManager()->is_cache_empty())
- return true;
-
- /*
- * func : call create() function instead but
- * we need to make sure to always delete the prepared statements when they
- * are no longer in use
- */
- statement_data->conn->client_logic->pendingStatements->get_or_create(prepare_stmt->name);
- statement_data->stmtName = prepare_stmt->name;
- return run_pre_statement(prepare_stmt->query, statement_data);
-}
-
bool Processor::run_pre_execute_statement(const ExecuteStmt * const execute_stmt, StatementData *statement_data)
{
if (statement_data->GetCacheManager()->is_cache_empty())
@@ -1090,17 +1080,6 @@ bool Processor::run_pre_copy_statement(const CopyStmt * const copy_stmt, Stateme
return true; /* nothing to do */
}
- if (copy_stmt->filename || copy_stmt->encrypted) { // we have a file the data will not pass through the client
- fprintf(stderr, "ERROR(CLIENT): column encryption does't support copy from server file to table\n");
- return false;
- }
- if (copy_stmt->query)
- /* we have a COPY FILE statement so the data is not passed through the client - it's a server side operation. */
- if (copy_stmt->filename) {
- // nothing to do
- return true;
- }
-
PreparedStatement *prepared_statement = NULL;
if (copy_stmt->query) {
/*
@@ -1109,9 +1088,29 @@ bool Processor::run_pre_copy_statement(const CopyStmt * const copy_stmt, Stateme
*/
if (nodeTag(copy_stmt->query) != T_SelectStmt)
return false;
- if (!run_pre_select_statement((SelectStmt *)copy_stmt->query, statement_data))
+ bool unecrypted = false;
+ bool res = run_pre_select_statement((SelectStmt *)copy_stmt->query, statement_data, &unecrypted);
+ if (!res) {
return false;
+ }
+ if (unecrypted) {
+ return true;
+ }
+ if (copy_stmt->filename || copy_stmt->encrypted) { // we have a file the data will not pass through the client
+ fprintf(stderr, "ERROR(CLIENT): column encryption does't support copy from server file to table\n");
+ return false;
+ }
} else if (copy_stmt->relation) {
+ CachedColumns cached_column_range(false);
+ (void)statement_data->conn->client_logic->m_cached_column_manager->get_cached_columns(copy_stmt->relation,
+ &cached_column_range);
+ if (cached_column_range.size() == 0) {
+ return true;
+ }
+ if (copy_stmt->filename || copy_stmt->encrypted) { // we have a file the data will not pass through the client
+ fprintf(stderr, "ERROR(CLIENT): column encryption does't support copy from server file to table\n");
+ return false;
+ }
/*
* "COPY FROM STDIN" requires us to build a cached columns list for the csv that will be inserted
* "COPY TO STDOUT" does not need any more processing
@@ -1717,7 +1716,8 @@ bool Processor::run_pre_create_function_stmt(const CreateFunctionStmt *stmt, Sta
foreach_cell (lc, stmt->parameters) {
FunctionParameter* fp = (FunctionParameter*) lfirst(lc);
const char* p_name = strVal(llast(fp->argType->names));
- if (strcmp(p_name, "byteawithoutordercol") == 0 || strcmp(p_name, "byteawithoutorderwithequalcol") == 0) {
+ if (strcmp(p_name, "byteawithoutordercol") == 0 || strcmp(p_name, "byteawithoutorderwithequalcol") == 0 ||
+ strcmp(p_name, "_byteawithoutordercol") == 0 || strcmp(p_name, "_byteawithoutorderwithequalcol") == 0) {
printfPQExpBuffer(&statement_data->conn->errorMessage,
libpq_gettext("ERROR(CLIENT): could not support functions when full encryption is on.\n"));
return false;
@@ -1905,8 +1905,10 @@ bool Processor::run_pre_statement(const Node * const stmt, StatementData *statem
return run_pre_delete_statement((DeleteStmt *)stmt, statement_data);
case T_UpdateStmt:
return run_pre_update_statement((UpdateStmt *)stmt, statement_data);
- case T_SelectStmt:
- return run_pre_select_statement((SelectStmt *)stmt, statement_data);
+ case T_SelectStmt: {
+ bool unencrypted = false;
+ return run_pre_select_statement((SelectStmt *)stmt, statement_data, &unencrypted);
+ }
case T_PrepareStmt:
return run_pre_prepare_statement((PrepareStmt *)stmt, statement_data);
case T_ExecuteStmt:
@@ -1941,12 +1943,14 @@ bool Processor::run_pre_statement(const Node * const stmt, StatementData *statem
const VariableSetStmt *set_stmt = (const VariableSetStmt *)stmt;
return run_pre_set_statement(set_stmt, statement_data);
}
- case T_ViewStmt:
+ case T_ViewStmt: {
+ bool unencrypted = false;
current_statement->cacheRefresh |= CacheRefreshType::COLUMNS;
/*
rewrite query in the CREATE VIEW clause if query has relevant columns
*/
- return run_pre_select_statement((SelectStmt *)((ViewStmt *)stmt)->query, statement_data);
+ return run_pre_select_statement((SelectStmt *)((ViewStmt *)stmt)->query, statement_data, &unencrypted);
+ }
case T_DropStmt:
return run_pre_drop_statement((DropStmt *)stmt, statement_data);
break;
@@ -1984,8 +1988,15 @@ bool Processor::run_pre_statement(const Node * const stmt, StatementData *statem
current_statement->cacheRefresh |= CacheRefreshType::GLOBAL_SETTING;
}
break;
- case T_CreateTableAsStmt:
- return run_pre_select_statement((SelectStmt *)((CreateTableAsStmt *)stmt)->query, statement_data);
+ case T_CreateTableAsStmt: {
+ bool unencrypted = false;
+ bool res = run_pre_select_statement((SelectStmt *)((CreateTableAsStmt *)stmt)->query,
+ statement_data, &unencrypted);
+ if (res && !unencrypted) {
+ current_statement->cacheRefresh |= CacheRefreshType::COLUMNS;
+ }
+ return res;
+ }
default:
break;
}
diff --git a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h
index bfae01832..5b79d0b45 100644
--- a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h
+++ b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h
@@ -76,7 +76,8 @@ private:
/*
* with cte as([select,insert,update,delete]) select from cte where .so we need to get CacheColumn from cte.
*/
- static bool run_pre_select_statement(const SelectStmt * const select_stmt, StatementData *statement_data);
+ static bool run_pre_select_statement(const SelectStmt * const select_stmt, StatementData *statement_data,
+ bool *unencrypted = nullptr);
static bool run_pre_select_statement(const SelectStmt * const select_stmt, const SetOperation &parent_set_operation,
const bool &parent_all, StatementData *statement_data, ICachedColumns *cacehd_columns = nullptr,
ICachedColumns *cached_columns_parents = nullptr);
@@ -91,6 +92,7 @@ private:
static bool run_pre_delete_statement(const DeleteStmt * const delete_stmt, StatementData *statement_data,
ICachedColumns *cached_columns = nullptr);
static bool run_pre_prepare_statement(const PrepareStmt * const prepare_stmt, StatementData *statement_data);
+ static bool process_prepare_arg_types(const PrepareStmt * const prepare_stmt, StatementData *statement_data);
static bool run_pre_execute_statement(const ExecuteStmt * const execute_stmt, StatementData *statement_data);
static bool run_pre_declare_cursor_statement(const DeclareCursorStmt * const declare_cursor_stmt,
StatementData *statement_data);
diff --git a/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp
index 7592689eb..2baf35e17 100644
--- a/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp
+++ b/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp
@@ -387,7 +387,6 @@ DecryptDataRes ValuesProcessor::deprocess_value(PGconn *conn, const unsigned cha
* if the size is smaller the size of Oid, so setting oid is not there
* and this is an error
*/
- fprintf(stderr, "ERROR(CLIENT): wrong value for processed column\n");
if (format == 0) {
libpq_free(unescaped_processed_data);
}
@@ -454,7 +453,7 @@ DecryptDataRes ValuesProcessor::deprocess_value(PGconn *conn, const unsigned cha
const bool ValuesProcessor::textual_rep(const Oid oid)
{
return (oid != BOOLOID && oid != INT8OID && oid != INT2OID && oid != INT1OID && oid != INT4OID && oid != OIDOID &&
- oid != NUMERICOID);
+ oid != NUMERICOID && oid != FLOAT4OID && oid != FLOAT8OID);
}
void ValuesProcessor::process_text_format(unsigned char **plain_text, size_t &plain_text_size,
diff --git a/src/common/interfaces/libpq/fe-connect.cpp b/src/common/interfaces/libpq/fe-connect.cpp
index a16b30a4a..92a1ed972 100644
--- a/src/common/interfaces/libpq/fe-connect.cpp
+++ b/src/common/interfaces/libpq/fe-connect.cpp
@@ -965,7 +965,11 @@ static void fillPGconn(PGconn* conn, PQconninfoOption* connOptions)
}
#ifdef HAVE_CE
tmp = conninfo_getval(connOptions, "enable_ce");
- conn->client_logic->enable_client_encryption = (tmp != NULL) ? true : false;
+ if (tmp != NULL && strcmp(tmp, "1") == 0) {
+ conn->client_logic->enable_client_encryption = true;
+ } else {
+ conn->client_logic->enable_client_encryption = false;
+ }
#endif
}
diff --git a/src/common/interfaces/libpq/fe-exec.cpp b/src/common/interfaces/libpq/fe-exec.cpp
index 707c64183..6e75ff1f8 100644
--- a/src/common/interfaces/libpq/fe-exec.cpp
+++ b/src/common/interfaces/libpq/fe-exec.cpp
@@ -3931,6 +3931,16 @@ unsigned char* PQescapeBytea(const unsigned char* from, size_t from_length, size
return PQescapeByteaInternal(NULL, from, from_length, to_length, static_std_strings, false /* can't use hex */);
}
+#ifdef HAVE_CE
+unsigned char *PQescapeByteaCe(PGconn *conn, const unsigned char *from, size_t fromlen, size_t *tolen, bool addquote)
+{
+ if (conn == NULL) {
+ return NULL;
+ }
+ return PQescapeByteaInternal(conn, from, fromlen, tolen, conn->std_strings, true, addquote);
+}
+#endif
+
#define ISFIRSTOCTDIGIT(CH) ((CH) >= '0' && (CH) <= '3')
#define ISOCTDIGIT(CH) ((CH) >= '0' && (CH) <= '7')
#define OCTVAL(CH) ((CH) - '0')
diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y
index 8dc8c079b..1d5075b60 100755
--- a/src/common/interfaces/libpq/frontend_parser/gram.y
+++ b/src/common/interfaces/libpq/frontend_parser/gram.y
@@ -111,6 +111,13 @@
#define CAS_NOT_VALID 0x10
#define CAS_NO_INHERIT 0x20
+/*
+ * In the IntoClause structure there is a char value which will eventually be
+ * set to RELKIND_RELATION or RELKIND_MATVIEW based on the relkind field in
+ * the statement-level structure, which is an ObjectType. Define the default
+ * here, which should always be overridden later.
+ */
+#define INTO_CLAUSE_RELKIND_DEFAULT '\0'
#define parser_yyerror(msg) scanner_yyerror(msg, yyscanner)
#define parser_errposition(pos) scanner_errposition(pos, yyscanner)
@@ -144,6 +151,8 @@ static void doNegateFloat(Value *v);
static Node *makeAArrayExpr(List *elements, int location);
static Node *makeXmlExpr(XmlExprOp op, char *name, List *named_args,
List *args, int location);
+static void SplitColQualList(List *qualList, List **constraintList, CollateClause **collClause,
+ core_yyscan_t yyscanner);
static void SplitColQualList(List *qualList,
List **constraintList, CollateClause **collClause, ClientLogicColumnRef **clientLogicColumnRef,
core_yyscan_t yyscanner);
@@ -217,7 +226,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
AlterRoleStmt AlterTableStmt AlterUserStmt
SelectStmt UpdateStmt InsertStmt DeleteStmt
VariableResetStmt VariableSetStmt
- CopyStmt CreateStmt TransactionStmt PreparableStmt CreateSchemaStmt
+ CopyStmt CreateAsStmt CreateStmt TransactionStmt PreparableStmt CreateSchemaStmt
DeclareCursorStmt CreateFunctionStmt CreateProcedureStmt CallFuncStmt
PrepareStmt ExecDirectStmt ExecuteStmt
CreateKeyStmt ViewStmt MergeStmt
@@ -226,8 +235,8 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
%type select_no_parens select_with_parens select_clause
simple_select values_clause
-%type alter_column_default alter_using
-%type opt_asc_desc opt_nulls_order
+%type alter_column_default alter_using AutoIncrementValue
+%type opt_asc_desc opt_nulls_order con_asc_desc
%type drop_type
%type alter_table_cmd alter_partition_cmd opt_collate_clause exchange_partition_cmd move_partition_cmd
@@ -239,6 +248,8 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
%type copy_opt_list callfunc_args
%type copy_opt_item
+%type opt_with_data
+
%type copy_file_name
database_name
@@ -268,6 +279,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
OptWith opt_distinct opt_definition definition def_list
opt_column_list columnList opt_name_list opt_multi_name_list
sort_clause opt_sort_clause sortby_list
+ opt_c_include constraint_params
name_list from_clause from_list opt_array_bounds
qualified_name_list any_name
any_operator expr_list attrs
@@ -284,14 +296,14 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
%type group_by_list
%type group_by_item empty_grouping_set rollup_clause cube_clause
-%type grouping_sets_clause
+%type grouping_sets_clause OptAutoIncrement
%type OptTempTableName
-%type into_clause
+%type into_clause create_as_target
%type func_type
-%type OptTemp opt_wait
+%type OptTemp opt_wait OptKind
%type opt_nowait_or_skip
%type OnCommitOption
@@ -347,7 +359,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
%type NumericOnly
%type alias_clause opt_alias_clause
%type sortby
-%type index_elem
+%type index_elem constraint_elem
%type table_ref
%type joined_table
%type relation_expr
@@ -413,15 +425,18 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
opt_frame_clause frame_extent frame_bound
%type opt_existing_window_name
%type OptCompress
+%type KVType
%type ColCmprsMode
%type column_item opt_table_partitioning_clause
range_partitioning_clause value_partitioning_clause opt_interval_partition_clause
interval_expr maxValueItem list_partitioning_clause hash_partitioning_clause
range_start_end_item range_less_than_item list_partition_item hash_partition_item
-%type range_partition_definition_list list_partition_definition_list hash_partition_definition_list maxValueList
- column_item_list tablespaceList opt_interval_tablespaceList
- split_dest_partition_define_list
- range_start_end_list range_less_than_list opt_range_every_list
+ subpartitioning_clause range_subpartitioning_clause hash_subpartitioning_clause
+ list_subpartitioning_clause subpartition_item
+%type range_partition_definition_list list_partition_definition_list hash_partition_definition_list
+ maxValueList listValueList column_item_list tablespaceList opt_interval_tablespaceList
+ split_dest_partition_define_list range_start_end_list range_less_than_list opt_range_every_list
+ subpartition_definition_list
%type partition_name
%type opt_row_movement_clause
@@ -500,10 +515,10 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS
BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL
- CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P
- CHARACTER CHARACTERISTICS CHARACTERSET CHECK CHECKPOINT CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE
- CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COMMENT COMMENTS COMMIT
- CONNECT COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPRESS CONDITION CONCURRENTLY CONFIGURATION CONNECTBY CONNECTION CONSTANT CONSTRAINT CONSTRAINTS
+ CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHANGE CHAR_P
+ CHARACTER CHARACTERISTICS CHARACTERSET CHECK CHECKPOINT CHARSET CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE
+ CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COLUMNS COMMENT COMMENTS COMMIT CONVERT_P
+ CONNECT COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPLETION COMPRESS CONDITION CONCURRENTLY CONFIGURATION CONNECTBY CONNECTION CONSTANT CONSTRAINT CONSTRAINTS
CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P COORDINATOR COORDINATORS COPY COST CREATE
CROSS CSN CSV CUBE CURRENT_P
CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA
@@ -515,12 +530,12 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
/* PGXC_BEGIN */
DICTIONARY DIRECT DIRECTORY DISABLE_P DISCARD DISTINCT DISTRIBUTE DISTRIBUTION DO DOCUMENT_P DOMAIN_P DOUBLE_P
/* PGXC_END */
- DROP DUPLICATE DISCONNECT
+ DROP DUPLICATE DISCONNECT DUMPFILE
EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE
- END_P ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVERY EXCEPT EXCHANGE
+ END_P ENDS ENFORCED ENUM_P ERRORS ESCAPE EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE
EXCLUDE EXCLUDED EXCLUDING EXCLUSIVE EXECUTE EXPIRED_P EXISTS EXPLAIN
- EXTENSION EXTERNAL EXTRACT
+ EXTENSION EXTERNAL EXTRACT ESCAPED
FALSE_P FAMILY FAST FENCED FETCH FIELDS FILEHEADER_P FILL_MISSING_FIELDS FILLER FILTER FIRST_P FIXED_P FLOAT_P FOLLOWING FOLLOWS_P FOR FORCE FOREIGN FORMATTER FORWARD
FEATURES // DB4AI
@@ -539,7 +554,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
KEY KILL KEY_PATH KEY_STORE
- LABEL LANGUAGE LARGE_P LAST_P LC_COLLATE_P LC_CTYPE_P LEADING LEAKPROOF
+ LABEL LANGUAGE LARGE_P LAST_P LC_COLLATE_P LC_CTYPE_P LEADING LEAKPROOF LINES
LEAST LESS LEFT LEVEL LIST LIKE LIMIT LISTEN LOAD LOCAL LOCALTIME LOCALTIMESTAMP
LOCATION LOCK_P LOCKED LOG_P LOGGING LOGIN_ANY LOGIN_SUCCESS LOGIN_FAILURE LOGOUT LOOP
MAPPING MASKING MASTER MASTR MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MERGE MINUS_P MINUTE_P MINVALUE MINEXTENTS MODE MODIFY_P MONTH_P MOVE MOVEMENT
@@ -548,7 +563,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
NOT NOTHING NOTIFY NOTNULL NOWAIT NULL_P NULLCOLS NULLIF NULLS_P NUMBER_P NUMERIC NUMSTR NVARCHAR NVARCHAR2 NVL
OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR
- ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER
+ ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER OUTFILE
PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION
/* PGXC_BEGIN */
@@ -567,12 +582,12 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
RESET RESIZE RESOURCE RESTART RESTRICT RETURN RETURNING RETURNS REUSE REVOKE RIGHT ROLE ROLES ROLLBACK ROLLUP
ROTATION ROW ROWNUM ROWS ROWTYPE_P RULE
- SAMPLE SAVEPOINT SCHEMA SCROLL SEARCH SECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES
+ SAMPLE SAVEPOINT SCHEDULE SCHEMA SCROLL SEARCH SECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES
SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHIPPABLE SHOW SHUTDOWN SIBLINGS
- SIMILAR SIMPLE SIZE SKIP SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPILL SPLIT STABLE STANDALONE_P START STARTWITH
- STATEMENT STATEMENT_ID STATISTICS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBPARTITION SUBSCRIPTION SUBSTRING SWLEVEL SWNOCYCLE SWROWNUM
- SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR
-
+ SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPILL SPLIT STABLE STANDALONE_P START STARTS STARTWITH
+ STATEMENT STATEMENT_ID STATISTICS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBPARTITION SUBPARTITIONS SUBSCRIPTION SUBSTRING SWLEVEL SWNOCYCLE SWROWNUM
+ SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR STARTING
+
TABLE TABLES TABLESAMPLE TABLESPACE TARGET TEMP TEMPLATE TEMPORARY TERMINATED TEXT_P THAN THEN TIME TIME_FORMAT_P TIMECAPSULE TIMESTAMP TIMESTAMP_FORMAT_P TIMESTAMPDIFF TINYINT
TO TRAILING TRANSACTION TRANSFORM TREAT TRIGGER TRIM TRUE_P
TRUNCATE TRUSTED TSFIELD TSTAG TSTIME TYPE_P TYPES_P
@@ -612,6 +627,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus;
%nonassoc PARTIAL_EMPTY_PREC
%nonassoc CLUSTER
%nonassoc SET /* see relation_expr_opt_alias */
+%nonassoc AUTO_INCREMENT
%left UNION EXCEPT MINUS_P
%left INTERSECT
%left OR
@@ -729,6 +745,7 @@ stmt :
| AlterRoleStmt
| AlterUserStmt
| CopyStmt
+ | CreateAsStmt
| CreateStmt
| CreateSchemaStmt
| DeclareCursorStmt
@@ -4191,34 +4208,37 @@ AnonyBlockStmt:
*****************************************************************************/
CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
- OptInherit OptWith OnCommitOption OptCompress OptPartitionElement
+ OptInherit OptAutoIncrement OptWith OnCommitOption OptCompress OptPartitionElement
/* PGXC_BEGIN */
OptDistributeBy OptSubCluster
/* PGXC_END */
opt_table_partitioning_clause
- opt_internal_data
+ opt_internal_data OptKind
{
CreateStmt *n = makeNode(CreateStmt);
$4->relpersistence = $2;
+ n->relkind = $18;
n->relation = $4;
n->tableElts = $6;
n->inhRelations = $8;
n->constraints = NIL;
- n->options = $9;
- n->oncommit = $10;
- n->row_compress = $11;
- n->tablespacename = $12;
+ n->options = $10;
+ n->oncommit = $11;
+ n->row_compress = $12;
+ n->tablespacename = $13;
n->if_not_exists = false;
/* PGXC_BEGIN */
- n->distributeby = $13;
- n->subcluster = $14;
+ n->distributeby = $14;
+ n->subcluster = $15;
/* PGXC_END */
- n->partTableState = (PartitionState *)$15;
- n->internalData = $16;
+ n->partTableState = (PartitionState *)$16;
+ n->internalData = $17;
+ n->autoIncStart = $9;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name '('
- OptTableElementList ')' OptInherit OptWith OnCommitOption
+ OptTableElementList ')' OptInherit OptAutoIncrement OptWith OnCommitOption
OptCompress OptPartitionElement
/* PGXC_BEGIN */
OptDistributeBy OptSubCluster
@@ -4232,17 +4252,19 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
n->tableElts = $9;
n->inhRelations = $11;
n->constraints = NIL;
- n->options = $12;
- n->oncommit = $13;
- n->row_compress = $14;
- n->tablespacename = $15;
+ n->options = $13;
+ n->oncommit = $14;
+ n->row_compress = $15;
+ n->tablespacename = $16;
n->if_not_exists = true;
/* PGXC_BEGIN */
- n->distributeby = $16;
- n->subcluster = $17;
+ n->distributeby = $17;
+ n->subcluster = $18;
/* PGXC_END */
- n->partTableState = (PartitionState *)$18;
- n->internalData = $19;
+ n->partTableState = (PartitionState *)$19;
+ n->internalData = $20;
+ n->autoIncStart = $12;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE OptTemp TABLE qualified_name OF any_name
@@ -4269,6 +4291,7 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
/* PGXC_END */
n->partTableState = NULL;
n->internalData = NULL;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
| CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name OF any_name
@@ -4295,24 +4318,35 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
/* PGXC_END */
n->partTableState = NULL;
n->internalData = NULL;
+ n->charset = PG_INVALID_ENCODING;
$$ = (Node *)n;
}
;
+OptKind:
+ FOR MATERIALIZED VIEW
+ {
+ $$ = OBJECT_MATVIEW;
+ }
+ | /* empty */
+ {
+ $$ = OBJECT_TABLE;
+ }
+ ;
opt_table_partitioning_clause:
range_partitioning_clause
{
$$ = $1;
}
- |hash_partitioning_clause
+ | hash_partitioning_clause
{
$$ = $1;
}
- |list_partitioning_clause
+ | list_partitioning_clause
{
$$ = $1;
}
- |value_partitioning_clause
+ | value_partitioning_clause
{
$$ = $1;
}
@@ -4321,27 +4355,164 @@ opt_table_partitioning_clause:
range_partitioning_clause:
PARTITION BY RANGE '(' column_item_list ')'
- opt_interval_partition_clause '(' range_partition_definition_list ')' opt_row_movement_clause
+ opt_interval_partition_clause subpartitioning_clause '(' range_partition_definition_list ')' opt_row_movement_clause
{
PartitionState *n = makeNode(PartitionState);
+ if ($8 != NULL && list_length($5) != 1) {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ if ($8 != NULL && $7 != NULL) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("Subpartitions do not support interval partition."),
+ errcause("System error."), erraction("Contact engineer to support.")));
+ }
n->partitionKey = $5;
n->intervalPartDef = (IntervalPartitionDefState *)$7;
- n->partitionList = $9;
+ n->partitionList = $10;
if (n->intervalPartDef)
n->partitionStrategy = 'i';
else
n->partitionStrategy = 'r';
- n->rowMovement = (RowMovementValue)$11;
+ n->rowMovement = (RowMovementValue)$12;
+ n->subPartitionState = (PartitionState *)$8;
$$ = (Node *)n;
}
;
list_partitioning_clause:
- PARTITION BY LIST '(' column_item_list ')'
- '(' list_partition_definition_list ')'
+ PARTITION BY LIST '(' column_item_list ')' subpartitioning_clause
+ '(' list_partition_definition_list ')' opt_row_movement_clause
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (list_length($5) != 1) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ PartitionState *n = makeNode(PartitionState);
+ n->partitionKey = $5;
+ n->intervalPartDef = NULL;
+ n->partitionList = $9;
+ n->partitionStrategy = 'l';
+ n->subPartitionState = (PartitionState *)$7;
+ n->rowMovement = (RowMovementValue)$11;
+
+ $$ = (Node *)n;
+
+ }
+ ;
+
+hash_partitioning_clause:
+ PARTITION BY IDENT '(' column_item_list ')' subpartitioning_clause
+ '(' hash_partition_definition_list ')' opt_row_movement_clause
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ if (list_length($5) != 1) {
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ if (strcmp($3, "hash") != 0) {
+ ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized option \"%s\"", $3)));
+ }
+ PartitionState *n = makeNode(PartitionState);
+ n->partitionKey = $5;
+ n->intervalPartDef = NULL;
+ n->partitionList = $9;
+ n->partitionStrategy = 'h';
+ n->subPartitionState = (PartitionState *)$7;;
+ n->rowMovement = (RowMovementValue)$11;
+ int i = 0;
+ ListCell *elem = NULL;
+ List *parts = n->partitionList;
+ foreach(elem, parts) {
+ HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
+ hashPart->boundary = list_make1(makeIntConst(i, -1));
+ i++;
+ }
+ $$ = (Node *)n;
+
+ }
+ ;
+
+value_partitioning_clause:
+ PARTITION BY VALUES '(' column_item_list ')'
+ {
+ PartitionState *n = makeNode(PartitionState);
+ n->partitionKey = $5;
+ n->partitionStrategy = 'v';
+
+ $$ = (Node *)n;
+ }
+ ;
+
+subpartitioning_clause:
+ range_subpartitioning_clause
+ {
+ $$ = $1;
+ }
+ | hash_subpartitioning_clause
+ {
+ $$ = $1;
+ }
+ | list_subpartitioning_clause
+ {
+ $$ = $1;
+ }
+ | /* empty */ { $$ = NULL; }
+ ;
+
+range_subpartitioning_clause:
+ SUBPARTITION BY RANGE '(' column_item_list ')'
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The distributed capability is not supported currently.")));
+#endif
+ PartitionState *n = makeNode(PartitionState);
+ if (list_length($5) != 1) {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-support feature"),
+ errdetail("The partition key's length should be 1.")));
+ }
+ n->partitionKey = $5;
+ n->intervalPartDef = NULL;
+ n->partitionList = NIL;
+ n->partitionStrategy = 'r';
+
+ n->rowMovement = ROWMOVEMENT_DEFAULT;
+ n->subPartitionState = NULL;
+
+ $$ = (Node *)n;
+ }
+ ;
+
+list_subpartitioning_clause:
+ SUBPARTITION BY LIST '(' column_item_list ')'
{
#ifdef ENABLE_MULTIPLE_NODES
ereport(ERROR,
@@ -4355,24 +4526,19 @@ list_partitioning_clause:
errmsg("Un-support feature"),
errdetail("The partition key's length should be 1.")));
}
- if (list_length($8) > 64) {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Un-support feature"),
- errdetail("The partition's length should be less than 65.")));
- }
PartitionState *n = makeNode(PartitionState);
n->partitionKey = $5;
n->intervalPartDef = NULL;
- n->partitionList = $8;
+ n->partitionList = NIL;
n->partitionStrategy = 'l';
+ n->subPartitionState = NULL;
$$ = (Node *)n;
}
;
-hash_partitioning_clause:
- PARTITION BY IDENT '(' column_item_list ')'
+hash_subpartitioning_clause:
+ SUBPARTITION BY IDENT '(' column_item_list ')'
'(' hash_partition_definition_list ')'
{
#ifdef ENABLE_MULTIPLE_NODES
@@ -4391,41 +4557,57 @@ hash_partitioning_clause:
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized option \"%s\"", $3)));
}
- if (list_length($8) > 64) {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Un-support feature"),
- errdetail("The partition's length should be less than 65.")));
- }
PartitionState *n = makeNode(PartitionState);
n->partitionKey = $5;
n->intervalPartDef = NULL;
- n->partitionList = $8;
+ n->partitionList = NIL;
n->partitionStrategy = 'h';
- int i = 0;
- ListCell *elem = NULL;
- List *parts = n->partitionList;
- foreach(elem, parts) {
- HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
- hashPart->boundary = list_make1(makeIntConst(i, -1));
- i++;
- }
+ n->subPartitionState = NULL;
$$ = (Node *)n;
}
;
-value_partitioning_clause:
- PARTITION BY VALUES '(' column_item_list ')'
+subpartition_definition_list:
+ subpartition_item
{
- PartitionState *n = makeNode(PartitionState);
- n->partitionKey = $5;
- n->partitionStrategy = 'v';
+ $$ = list_make1($1);
+ }
+ | subpartition_definition_list ',' subpartition_item
+ {
+ $$ = lappend($1, $3);
+ }
+ ;
+
+subpartition_item:
+ SUBPARTITION name VALUES '(' listValueList ')' OptTableSpace
+ {
+ ListPartitionDefState *n = makeNode(ListPartitionDefState);
+ n->partitionName = $2;
+ n->boundary = $5;
+ n->tablespacename = $7;
+
+ $$ = (Node *)n;
+ }
+ | SUBPARTITION name OptTableSpace
+ {
+ HashPartitionDefState *n = makeNode(HashPartitionDefState);
+ n->partitionName = $2;
+ n->tablespacename = $3;
+
+ $$ = (Node*)n;
+ }
+ | SUBPARTITION name VALUES LESS THAN
+ '(' maxValueList ')' OptTableSpace
+ {
+ RangePartitionDefState *n = makeNode(RangePartitionDefState);
+ n->partitionName = $2;
+ n->boundary = $7;
+ n->tablespacename = $9;
$$ = (Node *)n;
}
;
-
column_item_list:
column_item
{
@@ -4529,7 +4711,7 @@ range_less_than_list:
;
list_partition_item:
- PARTITION name VALUES '(' expr_list ')' OptTableSpace
+ PARTITION name VALUES '(' listValueList ')' OptTableSpace
{
ListPartitionDefState *n = makeNode(ListPartitionDefState);
n->partitionName = $2;
@@ -4538,19 +4720,24 @@ list_partition_item:
$$ = (Node *)n;
}
- | PARTITION name VALUES '(' DEFAULT ')' OptTableSpace
+ | PARTITION name VALUES '(' listValueList ')' OptTableSpace '(' subpartition_definition_list ')'
{
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Un-support feature"),
- errdetail("The default list's partition is not supported currently.")));
ListPartitionDefState *n = makeNode(ListPartitionDefState);
n->partitionName = $2;
- Const *n_default = makeNode(Const);
- n_default->ismaxvalue = true;
- n_default->location = -1;
- n->boundary = list_make1(n_default);
+ n->boundary = $5;
n->tablespacename = $7;
+ n->subPartitionDefState = $9;
+ int i = 0;
+ ListCell *elem = NULL;
+ List *parts = n->subPartitionDefState;
+ foreach(elem, parts) {
+ if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) {
+ break;
+ }
+ HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
+ hashPart->boundary = list_make1(makeIntConst(i, -1));
+ i++;
+ }
$$ = (Node *)n;
}
;
@@ -4564,6 +4751,26 @@ hash_partition_item:
$$ = (Node*)n;
}
+ | PARTITION name OptTableSpace '(' subpartition_definition_list ')'
+ {
+ HashPartitionDefState *n = makeNode(HashPartitionDefState);
+ n->partitionName = $2;
+ n->tablespacename = $3;
+ n->subPartitionDefState = $5;
+ int i = 0;
+ ListCell *elem = NULL;
+ List *parts = n->subPartitionDefState;
+ foreach(elem, parts) {
+ if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) {
+ break;
+ }
+ HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
+ hashPart->boundary = list_make1(makeIntConst(i, -1));
+ i++;
+ }
+ $$ = (Node *)n;
+ }
+ ;
range_less_than_item:
PARTITION name VALUES LESS THAN
@@ -4574,6 +4781,28 @@ range_less_than_item:
n->boundary = $7;
n->tablespacename = $9;
+ $$ = (Node *)n;
+ }
+ | PARTITION name VALUES LESS THAN
+ '(' maxValueList ')' OptTableSpace '(' subpartition_definition_list ')'
+ {
+ RangePartitionDefState *n = makeNode(RangePartitionDefState);
+ n->partitionName = $2;
+ n->boundary = $7;
+ n->tablespacename = $9;
+ n->subPartitionDefState = $11;
+ int i = 0;
+ ListCell *elem = NULL;
+ List *parts = n->subPartitionDefState;
+ foreach(elem, parts) {
+ if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) {
+ break;
+ }
+ HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem);
+ hashPart->boundary = list_make1(makeIntConst(i, -1));
+ i++;
+ }
+
$$ = (Node *)n;
}
;
@@ -4631,7 +4860,7 @@ opt_range_every_list:
$$ = $3;
}
| /* empty */ { $$ = NIL; }
-
+
partition_name:
ColId
{
@@ -4666,6 +4895,21 @@ maxValueItem:
}
;
+listValueList:
+ expr_list
+ {
+ $$ = $1;
+ }
+ | DEFAULT
+ {
+ Const *n = makeNode(Const);
+
+ n->ismaxvalue = true;
+ n->location = @1;
+
+ $$ = list_make1(n);
+ }
+ ;
opt_row_movement_clause: ENABLE_P ROW MOVEMENT { $$ = ROWMOVEMENT_ENABLE; }
| DISABLE_P ROW MOVEMENT { $$ = ROWMOVEMENT_DISABLE; }
@@ -4689,13 +4933,19 @@ OptTemp: TEMPORARY { $$ = RELPERSISTENCE_TEMP; }
| LOCAL TEMP { $$ = RELPERSISTENCE_TEMP; }
| GLOBAL TEMPORARY
{
- feparser_printf("GLOBAL is deprecated in temporary table creation\n");
+#ifdef ENABLE_MULTIPLE_NODES
$$ = RELPERSISTENCE_TEMP;
+#else
+ $$ = RELPERSISTENCE_GLOBAL_TEMP;
+#endif
}
| GLOBAL TEMP
{
- feparser_printf("GLOBAL is deprecated in temporary table creation\n");
+#ifdef ENABLE_MULTIPLE_NODES
$$ = RELPERSISTENCE_TEMP;
+#else
+ $$ = RELPERSISTENCE_GLOBAL_TEMP;
+#endif
}
| UNLOGGED { $$ = RELPERSISTENCE_UNLOGGED; }
| /*EMPTY*/ { $$ = RELPERSISTENCE_PERMANENT; }
@@ -4744,25 +4994,30 @@ TypedTableElement:
| TableConstraint { $$ = $1; }
;
-columnDef: ColId Typename ColCmprsMode create_generic_options ColQualList
+columnDef: ColId Typename KVType ColCmprsMode create_generic_options ColQualList
{
ColumnDef *n = makeNode(ColumnDef);
n->colname = $1;
n->typname = $2;
+ n->kvtype = $3;
n->inhcount = 0;
n->is_local = true;
n->is_not_null = false;
n->is_from_type = false;
n->storage = 0;
- n->cmprs_mode = $3;
+ n->cmprs_mode = $4;
n->raw_default = NULL;
n->cooked_default = NULL;
n->collOid = InvalidOid;
- n->fdwoptions = $4;
+ n->fdwoptions = $5;
n->clientLogicColumnRef=NULL;
-
- SplitColQualList($5, &n->constraints, &n->collClause,&n->clientLogicColumnRef, yyscanner);
-
+ if ($3 == ATT_KV_UNDEFINED) {
+ SplitColQualList($6, &n->constraints, &n->collClause, &n->clientLogicColumnRef,
+ yyscanner);
+ } else {
+ SplitColQualList($6, &n->constraints, &n->collClause,
+ yyscanner);
+ }
$$ = (Node *)n;
}
;
@@ -4841,7 +5096,11 @@ alter_generic_option_elem:
$$ = makeDefElemExtended(NULL, $2, NULL, DEFELEM_DROP);
}
;
-
+KVType: TSTAG {$$ = ATT_KV_TAG;} /* tag for kv storage */
+ | TSFIELD {$$ = ATT_KV_FIELD;} /* field for kv storage */
+ | TSTIME {$$ = ATT_KV_TIMETAG;} /* field for kv storage */
+ | /* EMPTY */ {$$ = ATT_KV_UNDEFINED;} /* not using kv storage */
+;
ColCmprsMode: DELTA {$$ = ATT_CMPR_DELTA;} /* delta compression */
| PREFIX {$$ = ATT_CMPR_PREFIX;} /* prefix compression */
| DICTIONARY {$$ = ATT_CMPR_DICTIONARY;} /* dictionary compression */
@@ -4908,6 +5167,20 @@ ColConstraint:
{
$$=$2;
}
+ | AUTO_INCREMENT
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("auto_increment is not yet supported")));
+#endif
+ Constraint *n = makeNode(Constraint);
+ n->contype = CONSTR_AUTO_INCREMENT;
+ n->location = @1;
+ n->raw_expr = NULL;
+ n->cooked_expr = NULL;
+ $$ = (Node *)n;
+ }
;
with_algorithm:
@@ -5406,6 +5679,9 @@ TableLikeClause:
TableLikeClause *n = makeNode(TableLikeClause);
n->relation = $2;
n->options = CREATE_TABLE_LIKE_ALL & ~$4;
+#ifndef ENABLE_MULTIPLE_NODES
+ n->options = n->options & ~CREATE_TABLE_LIKE_DISTRIBUTION;
+#endif
$$ = (Node *)n;
}
;
@@ -5418,11 +5694,11 @@ excluding_option_list:
TableLikeOptionList:
TableLikeOptionList INCLUDING TableLikeIncludingOption { $$ = $1 | $3; }
| TableLikeOptionList EXCLUDING TableLikeExcludingOption { $$ = $1 & ~$3; }
- | /* EMPTY */ { $$ = 0; }
+ | /* EMPTY */ { $$ = CREATE_TABLE_LIKE_DEFAULTS_SERIAL; }
;
TableLikeIncludingOption:
- DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS; }
+ DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS | CREATE_TABLE_LIKE_DEFAULTS_SERIAL; }
| CONSTRAINTS { $$ = CREATE_TABLE_LIKE_CONSTRAINTS; }
| INDEXES { $$ = CREATE_TABLE_LIKE_INDEXES; }
| STORAGE { $$ = CREATE_TABLE_LIKE_STORAGE; }
@@ -5435,7 +5711,7 @@ TableLikeIncludingOption:
;
TableLikeExcludingOption:
- DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS; }
+ DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS | CREATE_TABLE_LIKE_DEFAULTS_SERIAL; }
| CONSTRAINTS { $$ = CREATE_TABLE_LIKE_CONSTRAINTS; }
| INDEXES { $$ = CREATE_TABLE_LIKE_INDEXES; }
| STORAGE { $$ = CREATE_TABLE_LIKE_STORAGE; }
@@ -5449,7 +5725,7 @@ TableLikeExcludingOption:
;
opt_internal_data:
- INTERNAL DATA_P internal_data_body {$$ = NULL;}
+ INTERNAL DATA_P internal_data_body {$$ = $3;}
| /* EMPTY */ {$$ = NULL;}
;
@@ -5497,11 +5773,34 @@ TableConstraint:
{
Constraint *n = (Constraint *) $3;
Assert(IsA(n, Constraint));
- n->conname = $2;
+ if ((n->conname == NULL) || (n->conname != NULL && n->contype == CONSTR_FOREIGN)) {
+ n->conname = $2;
+ }
n->location = @1;
$$ = (Node *) n;
}
| ConstraintElem { $$ = $1; }
+ | CONSTRAINT ConstraintElem
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("CONSTRAINT without constraint_name is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ Constraint *n = (Constraint *) $2;
+ Assert(IsA(n, Constraint));
+ n->location = @1;
+ $$ = (Node *) n;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CONSTRAINT without constraint_name is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+ }
;
ConstraintElem:
@@ -5518,20 +5817,87 @@ ConstraintElem:
n->initially_valid = !n->skip_validation;
$$ = (Node *)n;
}
- | UNIQUE '(' columnList ')' opt_definition OptConsTableSpace
+ | UNIQUE name access_method_clause '(' constraint_params ')' opt_c_include opt_definition OptConsTableSpace
+ ConstraintAttributeSpec InformationalConstraintElem
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("UNIQUE name is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ Constraint *n = makeNode(Constraint);
+ n->contype = CONSTR_UNIQUE;
+ n->location = @1;
+ n->conname = $2;
+ n->access_method = $3;
+ n->keys = $5;
+ n->including = $7;
+ n->options = $8;
+ n->indexname = NULL;
+ n->indexspace = $9;
+ processCASbits($10, @10, "UNIQUE",
+ &n->deferrable, &n->initdeferred, NULL,
+ NULL, yyscanner);
+ n->inforConstraint = (InformationalConstraint *) $11; /* informational constraint info */
+ $$ = (Node *)n;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("UNIQUE name is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+
+ }
+ | UNIQUE USING access_method '(' constraint_params ')' opt_c_include opt_definition OptConsTableSpace
+ ConstraintAttributeSpec InformationalConstraintElem
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("UNIQUE access_method_clause is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ Constraint *n = makeNode(Constraint);
+ n->contype = CONSTR_UNIQUE;
+ n->location = @1;
+ n->access_method = $3;
+ n->keys = $5;
+ n->including = $7;
+ n->options = $8;
+ n->indexname = NULL;
+ n->indexspace = $9;
+ processCASbits($10, @10, "UNIQUE",
+ &n->deferrable, &n->initdeferred, NULL,
+ NULL, yyscanner);
+ n->inforConstraint = (InformationalConstraint *) $11; /* informational constraint info */
+ $$ = (Node *)n;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("UNIQUE access_method_clause is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+ }
+ | UNIQUE '(' constraint_params ')' opt_c_include opt_definition OptConsTableSpace
ConstraintAttributeSpec InformationalConstraintElem
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_UNIQUE;
n->location = @1;
n->keys = $3;
- n->options = $5;
+ n->including = $5;
+ n->options = $6;
n->indexname = NULL;
- n->indexspace = $6;
- processCASbits($7, @7, "UNIQUE",
+ n->indexspace = $7;
+ processCASbits($8, @8, "UNIQUE",
&n->deferrable, &n->initdeferred, NULL,
NULL, yyscanner);
- n->inforConstraint = (InformationalConstraint *) $8; /* informational constraint info */
+ n->inforConstraint = (InformationalConstraint *) $9; /* informational constraint info */
$$ = (Node *)n;
}
| UNIQUE ExistingIndex ConstraintAttributeSpec InformationalConstraintElem
@@ -5540,6 +5906,7 @@ ConstraintElem:
n->contype = CONSTR_UNIQUE;
n->location = @1;
n->keys = NIL;
+ n->including = NIL;
n->options = NIL;
n->indexname = $2;
n->indexspace = NULL;
@@ -5549,20 +5916,53 @@ ConstraintElem:
n->inforConstraint = (InformationalConstraint *) $4; /* informational constraint info */
$$ = (Node *)n;
}
- | PRIMARY KEY '(' columnList ')' opt_definition OptConsTableSpace
+ | PRIMARY KEY USING access_method '(' constraint_params ')' opt_c_include opt_definition OptConsTableSpace
+ ConstraintAttributeSpec InformationalConstraintElem
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("PRIMARY KEY USING access_method is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ Constraint *n = makeNode(Constraint);
+ n->contype = CONSTR_PRIMARY;
+ n->location = @1;
+ n->access_method = $4;
+ n->keys = $6;
+ n->including = $8;
+ n->options = $9;
+ n->indexname = NULL;
+ n->indexspace = $10;
+ processCASbits($11, @11, "PRIMARY KEY",
+ &n->deferrable, &n->initdeferred, NULL,
+ NULL, yyscanner);
+ n->inforConstraint = (InformationalConstraint *) $12; /* informational constraint info */
+ $$ = (Node *)n;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("PRIMARY KEY USING access_method is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+ }
+ | PRIMARY KEY '(' constraint_params ')' opt_c_include opt_definition OptConsTableSpace
ConstraintAttributeSpec InformationalConstraintElem
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_PRIMARY;
n->location = @1;
n->keys = $4;
- n->options = $6;
+ n->including = $6;
+ n->options = $7;
n->indexname = NULL;
- n->indexspace = $7;
- processCASbits($8, @8, "PRIMARY KEY",
+ n->indexspace = $8;
+ processCASbits($9, @9, "PRIMARY KEY",
&n->deferrable, &n->initdeferred, NULL,
NULL, yyscanner);
- n->inforConstraint = (InformationalConstraint *) $9; /* informational constraint info */
+ n->inforConstraint = (InformationalConstraint *) $10; /* informational constraint info */
$$ = (Node *)n;
}
| PRIMARY KEY ExistingIndex ConstraintAttributeSpec InformationalConstraintElem
@@ -5581,7 +5981,7 @@ ConstraintElem:
$$ = (Node *)n;
}
| EXCLUDE access_method_clause '(' ExclusionConstraintList ')'
- opt_definition OptConsTableSpace ExclusionWhereClause
+ opt_c_include opt_definition OptConsTableSpace ExclusionWhereClause
ConstraintAttributeSpec
{
Constraint *n = makeNode(Constraint);
@@ -5589,15 +5989,50 @@ ConstraintElem:
n->location = @1;
n->access_method = $2;
n->exclusions = $4;
- n->options = $6;
+ n->including = $6;
+ n->options = $7;
n->indexname = NULL;
- n->indexspace = $7;
- n->where_clause = $8;
- processCASbits($9, @9, "EXCLUDE",
+ n->indexspace = $8;
+ n->where_clause = $9;
+ processCASbits($10, @10, "EXCLUDE",
&n->deferrable, &n->initdeferred, NULL,
NULL, yyscanner);
$$ = (Node *)n;
}
+ | FOREIGN KEY name '(' columnList ')' REFERENCES qualified_name
+ opt_column_list key_match key_actions ConstraintAttributeSpec
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("FOREIGN KEY name ... REFERENCES constraint is not yet supported.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ Constraint *n = makeNode(Constraint);
+ n->contype = CONSTR_FOREIGN;
+ n->location = @1;
+ n->conname = $3;
+ n->pktable = $8;
+ n->fk_attrs = $5;
+ n->pk_attrs = $9;
+ n->fk_matchtype = $10;
+ n->fk_upd_action = (char) ($11 >> 8);
+ n->fk_del_action = (char) ($11 & 0xFF);
+ processCASbits($12, @12, "FOREIGN KEY",
+ &n->deferrable, &n->initdeferred,
+ &n->skip_validation, NULL,
+ yyscanner);
+ n->initially_valid = !n->skip_validation;
+ $$ = (Node *)n;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("FOREIGN KEY name is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+ }
| FOREIGN KEY '(' columnList ')' REFERENCES qualified_name
opt_column_list key_match key_actions ConstraintAttributeSpec
{
@@ -5694,6 +6129,10 @@ columnElem: ColId
}
;
+opt_c_include: INCLUDE '(' columnList ')' { $$ = $3; }
+ | /* EMPTY */ { $$ = NIL; }
+ ;
+
key_match: MATCH FULL
{
$$ = FKCONSTR_MATCH_FULL;
@@ -5789,6 +6228,76 @@ index_elem: ColId opt_collate opt_class opt_asc_desc opt_nulls_order
$$->nulls_ordering = (SortByNulls)$7;
}
;
+constraint_params: constraint_elem { $$ = list_make1($1); }
+ | constraint_params ',' constraint_elem { $$ = lappend($1, $3); }
+ ;
+con_asc_desc: ASC { $$ = SORTBY_ASC; }
+ | DESC { $$ = SORTBY_DESC; }
+ ;
+
+constraint_elem: ColId con_asc_desc
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ASC/DESC is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ $$ = makeNode(IndexElem);
+ $$->name = $1;
+ $$->expr = NULL;
+ $$->indexcolname = NULL;
+ $$->collation = NIL;
+ $$->opclass = NIL;
+ $$->ordering = (SortByDir)$2;
+ $$->nulls_ordering = SORTBY_NULLS_DEFAULT;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("ASC/DESC is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+ }
+ | ColId
+ {
+ $$ = makeNode(IndexElem);
+ $$->name = $1;
+ $$->expr = NULL;
+ $$->indexcolname = NULL;
+ $$->collation = NIL;
+ $$->opclass = NIL;
+ $$->ordering = SORTBY_DEFAULT;
+ $$->nulls_ordering = SORTBY_NULLS_DEFAULT;
+ }
+ | '(' a_expr ')' opt_asc_desc
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("expression is not yet supported in distributed database.")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() == M_FORMAT) {
+ $$ = makeNode(IndexElem);
+ $$->name = NULL;
+ $$->expr = $2;
+ $$->indexcolname = NULL;
+ $$->collation = NIL;
+ $$->opclass = NIL;
+ $$->ordering = (SortByDir)$4;
+ $$->nulls_ordering = SORTBY_NULLS_DEFAULT;
+ } else {
+ ereport(errstate,
+ (errmodule(MOD_PARSER),
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("expression is supported only in B-format database."),
+ parser_errposition(@1)));
+ $$ = NULL;/* not reached */
+ }
+
+ }
+ ;
opt_nulls_order: NULLS_FIRST { $$ = SORTBY_NULLS_FIRST; }
@@ -5880,7 +6389,27 @@ OnCommitOption: ON COMMIT DROP { $$ = ONCOMMIT_DROP; }
| ON COMMIT PRESERVE ROWS { $$ = ONCOMMIT_PRESERVE_ROWS; }
| /*EMPTY*/ { $$ = ONCOMMIT_NOOP; }
;
+AutoIncrementValue: AUTO_INCREMENT Iconst { $$ = (Node *)makeInteger($2); }
+ | AUTO_INCREMENT '=' Iconst { $$ = (Node *)makeInteger($3); }
+ | AUTO_INCREMENT FCONST { $$ = (Node *)makeFloat($2); }
+ | AUTO_INCREMENT '=' FCONST { $$ = (Node *)makeFloat($3); }
+ ;
+OptAutoIncrement: AutoIncrementValue
+ {
+#ifdef ENABLE_MULTIPLE_NODES
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("auto_increment is not yet supported")));
+#endif
+ if (fe_pg_yyget_extra(yyscanner)->core_yy_extra.m_clientLogic->m_cached_column_manager->get_sql_compatibility() != M_FORMAT) {
+ ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("auto_increment is supported only in B-format database")));
+ }
+ $$ = (Node*)makeDefElem("start", $1);
+ }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
OptTableSpace: TABLESPACE name { $$ = $2; }
| /*EMPTY*/ { $$ = NULL; }
;
@@ -6283,9 +6812,48 @@ ExistingIndex: USING INDEX index_name { $$ = $3; }
*
*****************************************************************************/
+CreateAsStmt:
+ CREATE OptTemp TABLE create_as_target AS SelectStmt opt_with_data
+ {
+ CreateTableAsStmt *ctas = makeNode(CreateTableAsStmt);
+ ctas->query = $6;
+ ctas->into = $4;
+ ctas->relkind = OBJECT_TABLE;
+ ctas->is_select_into = false;
+ /* cram additional flags into the IntoClause */
+ $4->rel->relpersistence = $2;
+ $4->skipData = !($7);
+ $$ = (Node *) ctas;
+ }
+ ;
+create_as_target:
+ qualified_name opt_column_list OptWith OnCommitOption OptCompress OptTableSpace
+/* PGXC_BEGIN */
+ OptDistributeBy OptSubCluster
/* PGXC_END */
+ {
+ $$ = makeNode(IntoClause);
+ $$->rel = $1;
+ $$->colNames = $2;
+ $$->options = $3;
+ $$->onCommit = $4;
+ $$->row_compress = $5;
+ $$->tableSpaceName = $6;
+ $$->skipData = false; /* might get changed later */
+/* PGXC_BEGIN */
+ $$->distributeby = $7;
+ $$->subcluster = $8;
+ $$->relkind = INTO_CLAUSE_RELKIND_DEFAULT;
+/* PGXC_END */
+ }
+ ;
+opt_with_data:
+ WITH DATA_P { $$ = TRUE; }
+ | WITH NO DATA_P { $$ = FALSE; }
+ | /*EMPTY*/ { $$ = TRUE; }
+ ;
opt_as: AS {}
| /* EMPTY */ {}
;
@@ -7878,35 +8446,41 @@ Typename: SimpleTypename opt_array_bounds
{
$$ = $1;
$$->arrayBounds = $2;
+ $$->end_location = yylloc;
}
| SETOF SimpleTypename opt_array_bounds
{
$$ = $2;
$$->arrayBounds = $3;
$$->setof = TRUE;
+ $$->end_location = yylloc;
}
/* SQL standard syntax, currently only one-dimensional */
| SimpleTypename ARRAY '[' Iconst ']'
{
$$ = $1;
$$->arrayBounds = list_make1(makeInteger($4));
+ $$->end_location = yylloc;
}
| SETOF SimpleTypename ARRAY '[' Iconst ']'
{
$$ = $2;
$$->arrayBounds = list_make1(makeInteger($5));
$$->setof = TRUE;
+ $$->end_location = yylloc;
}
| SimpleTypename ARRAY
{
$$ = $1;
$$->arrayBounds = list_make1(makeInteger(-1));
+ $$->end_location = yylloc;
}
| SETOF SimpleTypename ARRAY
{
$$ = $2;
$$->arrayBounds = list_make1(makeInteger(-1));
$$->setof = TRUE;
+ $$->end_location = yylloc;
}
;
@@ -11068,6 +11642,7 @@ unreserved_keyword:
| CASCADED
| CATALOG_P
| CHAIN
+ | CHANGE
| CHARACTERISTICS
| CHARACTERSET
| CHECKPOINT
@@ -11081,12 +11656,14 @@ unreserved_keyword:
| CLUSTER
| COLUMN_ENCRYPTION_KEY
| COLUMN_ENCRYPTION_KEYS
+ | COLUMNS
| COMMENT
| COMMENTS
| COMMIT
| COMMITTED
| COMPATIBLE_ILLEGAL_CHARS
| COMPLETE
+ | COMPLETION
| COMPRESS
| CONDITION
| CONFIGURATION
@@ -11136,6 +11713,7 @@ unreserved_keyword:
| DOMAIN_P
| DOUBLE_P
| DROP
+ | DUMPFILE
| EACH
| ENABLE_P
| ENCLOSED
@@ -11145,11 +11723,15 @@ unreserved_keyword:
| ENCRYPTED_VALUE
| ENCRYPTION
| ENCRYPTION_TYPE
+ | ENDS
| ENUM_P
| EOL
| ERRORS
| ESCAPE
+ | ESCAPED
| ESCAPING
+ | EVENT
+ | EVENTS
| EVERY
| EXCHANGE
| EXCLUDE
@@ -11220,6 +11802,7 @@ unreserved_keyword:
| LC_CTYPE_P
| LEAKPROOF
| LEVEL
+ | LINES
| LIST
| LISTEN
| LOAD
@@ -11280,6 +11863,7 @@ unreserved_keyword:
| OPTIONS
| OWNED
| OWNER
+ | OUTFILE
| PACKAGE
| PARSER
| PARTIAL %prec PARTIAL_EMPTY_PREC
@@ -11349,6 +11933,7 @@ unreserved_keyword:
| ROWS
| RULE
| SAVEPOINT
+ | SCHEDULE
| SCHEMA
| SCROLL
| SEARCH
@@ -11369,6 +11954,7 @@ unreserved_keyword:
| SIMPLE
| SIZE
| SKIP
+ | SLAVE
| SLICE
| SMALLDATETIME_FORMAT_P
| SNAPSHOT
@@ -11378,6 +11964,8 @@ unreserved_keyword:
| STABLE
| STANDALONE_P
| START
+ | STARTING
+ | STARTS
| STATEMENT
| STATISTICS
| STDIN
@@ -11388,6 +11976,7 @@ unreserved_keyword:
| STRICT_P
| STRIP_P
| SUBPARTITION
+ | SUBPARTITIONS
| SUBSCRIPTION
| SYNONYM
| SYS_REFCURSOR { $$ = "refcursor"; }
@@ -12120,7 +12709,58 @@ makeXmlExpr(XmlExprOp op, char *name, List *named_args, List *args,
x->location = location;
return (Node *) x;
}
-
+/* Separate Constraint nodes from COLLATE clauses in a ColQualList */
+static void
+SplitColQualList(List *qualList,
+ List **constraintList, CollateClause **collClause,
+ core_yyscan_t yyscanner)
+{
+ ListCell *cell;
+ ListCell *prev;
+ ListCell *next;
+
+ *collClause = NULL;
+ prev = NULL;
+ for (cell = list_head(qualList); cell; cell = next)
+ {
+ Node *n = (Node *) lfirst(cell);
+
+ next = lnext(cell);
+ if (IsA(n, Constraint))
+ {
+ /* keep it in list */
+ prev = cell;
+ continue;
+ }
+ if (IsA(n, CollateClause))
+ {
+ CollateClause *c = (CollateClause *) n;
+
+ if (*collClause) {
+ ereport(errstate,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("multiple COLLATE clauses not allowed"),
+ parser_errposition(c->location)));
+ }
+ *collClause = c;
+ }
+ else if (IsA(n, ClientLogicColumnRef))
+ {
+ ereport(errstate, (errmodule(MOD_SEC), errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unsupported syntax: ENCRYPTED WITH in this operation"), errdetail("N/A"),
+ errcause("client encryption feature is not supported this operation."),
+ erraction("Check client encryption feature whether supported this operation.")));
+ }
+ else {
+ ereport(errstate,
+ (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE),
+ errmsg("unexpected node type %d", (int) n->type)));
+ }
+ /* remove non-Constraint nodes from qualList */
+ qualList = list_delete_cell(qualList, cell, prev);
+ }
+ *constraintList = qualList;
+}
/* Separate Constraint nodes from COLLATE clauses in a ColQualList */
static void
SplitColQualList(List *qualList,
diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y
index bf9556ab0..b654c331c 100755
--- a/src/common/pl/plpgsql/src/gram.y
+++ b/src/common/pl/plpgsql/src/gram.y
@@ -993,8 +993,16 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no
{
if (var->dtype == PLPGSQL_DTYPE_VAR)
((PLpgSQL_var *) var)->default_val = $6;
- else {
+ else if (var->dtype == PLPGSQL_DTYPE_ROW || var->dtype == PLPGSQL_DTYPE_RECORD) {
((PLpgSQL_row *) var)->default_val = $6;
+ }
+ else {
+ const char* message = "default value for rec variable is not supported";
+ InsertErrorMessage(message, plpgsql_yylloc);
+ ereport(errstate,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("default value for rec variable is not supported"),
+ parser_errposition(@5)));
}
}
}
@@ -4896,7 +4904,7 @@ stmt_dynexecute : K_EXECUTE
if (newp->into) /* multiple INTO */
yyerror("syntax error");
newp->into = true;
- read_into_target(&newp->rec, &newp->row, &newp->strict, false);
+ (void)read_into_target(&newp->rec, &newp->row, &newp->strict, false);
endtoken = yylex();
}
/* If we found "USING", collect the argument */
@@ -5045,7 +5053,7 @@ stmt_fetch : K_FETCH opt_fetch_direction cursor_variable K_INTO
PLpgSQL_row *row;
/* We have already parsed everything through the INTO keyword */
- read_into_target(&rec, &row, NULL, false);
+ (void)read_into_target(&rec, &row, NULL, false);
if (yylex() != ';')
yyerror("syntax error");
@@ -5144,7 +5152,7 @@ fetch_into_target :
PLpgSQL_datum *datum = NULL;
PLpgSQL_rec *rec;
PLpgSQL_row *row;
- read_into_target(&rec, &row, NULL, true);
+ (void)read_into_target(&rec, &row, NULL, true);
if (rec != NULL) {
datum = (PLpgSQL_datum *)rec;
@@ -9225,7 +9233,7 @@ make_execsql_stmt(int firsttoken, int location)
into_start_loc = yylloc;
}
u_sess->plsql_cxt.curr_compile_context->plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
- is_user_var = read_into_target(&rec, &row, &have_strict, have_bulk_collect);
+ is_user_var = read_into_target(&rec, &row, &have_strict, have_bulk_collect);
if (is_user_var) {
u_sess->plsql_cxt.curr_compile_context->plpgsql_IdentifierLookup = save_IdentifierLookup;
have_into = false;
@@ -10617,7 +10625,7 @@ read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict, bool bulk_c
if (strict)
*strict = true;
tok = yylex();
- if (tok == SET_USER_IDENT) {
+ if (tok == '@' || tok == SET_USER_IDENT) {
return true;
}
if (strict && tok == K_STRICT)
diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp
index f73349dc7..2e969eac5 100644
--- a/src/common/pl/plpgsql/src/pl_comp.cpp
+++ b/src/common/pl/plpgsql/src/pl_comp.cpp
@@ -4439,7 +4439,7 @@ void plpgsql_HashTableInit(void)
ctl.hash = tag_hash;
ctl.hcxt = u_sess->cache_mem_cxt;
u_sess->plsql_cxt.plpgsql_pkg_HashTable =
- hash_create("PLpgSQL package cache", PKGS_PER_USER, &ctl, HASH_ELEM | HASH_PACKAGE | HASH_CONTEXT);
+ hash_create("PLpgSQL package cache", PKGS_PER_USER, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
}
static PLpgSQL_function* plpgsql_HashTableLookup(PLpgSQL_func_hashkey* func_key)
diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp
index 75e46d8ad..abe3e08d3 100644
--- a/src/common/pl/plpgsql/src/pl_exec.cpp
+++ b/src/common/pl/plpgsql/src/pl_exec.cpp
@@ -1577,15 +1577,15 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo
TupleDesc tupdesc = get_func_param_desc(tp, func->fn_rettype, &out_args_num);
Datum *values = (Datum*)palloc(sizeof(Datum) * (out_args_num + 1));
bool *nulls = (bool*)palloc(sizeof(bool) * (out_args_num + 1));
- heap_deform_tuple((HeapTuple)DatumGetPointer(estate.paramval), estate.paramtupdesc, (values + 1),
- (nulls + 1));
- values[0] = estate.retval;
- nulls[0] = estate.retisnull;
if (unlikely(estate.paramval == 0 || estate.paramtupdesc ==NULL)) {
ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), errmodule(MOD_PLSQL),
errmsg("tuple is null"),
errdetail("it may be because change guc behavior_compat_options in one session")));
}
+ heap_deform_tuple((HeapTuple)DatumGetPointer(estate.paramval), estate.paramtupdesc, (values + 1),
+ (nulls + 1));
+ values[0] = estate.retval;
+ nulls[0] = estate.retisnull;
HeapTuple rettup = heap_form_tuple(tupdesc, values, nulls);
estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc));
pfree(values);
@@ -3581,6 +3581,7 @@ static void plpgsql_set_outparam_value(PLpgSQL_execstate* estate, PLpgSQL_expr*
}
PLpgSQL_row* row = (PLpgSQL_row*)estate->datums[expr->out_param_dno];
exec_move_row(estate, NULL, row, tuple, paramtupdesc);
+ heap_freetuple(tuple);
pfree(values);
pfree(nulls);
}
diff --git a/src/common/pl/plpgsql/src/pl_handler.cpp b/src/common/pl/plpgsql/src/pl_handler.cpp
index 6948ac084..6925c1007 100755
--- a/src/common/pl/plpgsql/src/pl_handler.cpp
+++ b/src/common/pl/plpgsql/src/pl_handler.cpp
@@ -740,6 +740,17 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS)
}
int fun_arg = fcinfo->nargs;
+
+ _PG_init();
+ /*
+ * Connect to SPI manager
+ */
+ SPI_STACK_LOG("connect", NULL, NULL);
+ rc = SPI_connect_ext(DestSPI, NULL, NULL, nonatomic ? SPI_OPT_NONATOMIC : 0, func_oid);
+ if (rc != SPI_OK_CONNECT) {
+ ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("SPI_connect failed: %s when execute PLSQL function.", SPI_result_code_string(rc))));
+ }
#ifdef ENABLE_MULTIPLE_NODES
bool outer_is_stream = false;
bool outer_is_stream_support = false;
@@ -756,18 +767,6 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS)
AutoDopControl dopControl;
dopControl.CloseSmp();
#endif
-
- _PG_init();
- /*
- * Connect to SPI manager
- */
- SPI_STACK_LOG("connect", NULL, NULL);
- rc = SPI_connect_ext(DestSPI, NULL, NULL, nonatomic ? SPI_OPT_NONATOMIC : 0, func_oid);
- if (rc != SPI_OK_CONNECT) {
- ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("SPI_connect failed: %s when execute PLSQL function.", SPI_result_code_string(rc))));
- }
-
int connect = SPI_connectid();
Oid firstLevelPkgOid = InvalidOid;
PG_TRY();
@@ -972,6 +971,8 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS)
}
#ifdef ENABLE_MULTIPLE_NODES
SetSendCommandId(saveSetSendCommandId);
+#else
+ dopControl.ResetSmp();
#endif
/* ErrorData could be allocted in SPI's MemoryContext, copy it. */
oldContext = MemoryContextSwitchTo(oldContext);
@@ -996,6 +997,8 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS)
}
#ifdef ENABLE_MULTIPLE_NODES
SetSendCommandId(saveSetSendCommandId);
+#else
+ dopControl.ResetSmp();
#endif
/*
@@ -1040,15 +1043,6 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS)
int64 startTime = 0;
bool needRecord = false;
-#ifndef ENABLE_MULTIPLE_NODES
- AutoDopControl dopControl;
- dopControl.CloseSmp();
-#else
- /* Saves the status of whether to send commandId. */
- bool saveSetSendCommandId = IsSendCommandId();
-#endif
-
-
_PG_init();
AssertEreport(IsA(codeblock, InlineCodeBlock), MOD_PLSQL, "Inline code block is required.");
@@ -1076,6 +1070,14 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS)
}
PGSTAT_START_PLSQL_TIME_RECORD();
+#ifndef ENABLE_MULTIPLE_NODES
+ AutoDopControl dopControl;
+ dopControl.CloseSmp();
+#else
+ /* Saves the status of whether to send commandId. */
+ bool saveSetSendCommandId = IsSendCommandId();
+#endif
+
/* Compile the anonymous code block */
PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context;
PG_TRY();
@@ -1088,6 +1090,9 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS)
}
PG_CATCH();
{
+#ifndef ENABLE_MULTIPLE_NODES
+ dopControl.ResetSmp();
+#endif
popToOldCompileContext(save_compile_context);
PG_RE_THROW();
}
@@ -1141,6 +1146,8 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS)
#ifndef ENABLE_MULTIPLE_NODES
/* for restore parent session and automn session package var values */
(void)processAutonmSessionPkgsInException(func);
+
+ dopControl.ResetSmp();
#endif
ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG),
errmsg("%s clear curr_compile_context because of error.", __func__)));
@@ -1167,6 +1174,8 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS)
#ifdef ENABLE_MULTIPLE_NODES
SetSendCommandId(saveSetSendCommandId);
+#else
+ dopControl.ResetSmp();
#endif
/* Disconnecting and releasing resources */
@@ -1244,11 +1253,6 @@ Datum plpgsql_validator(PG_FUNCTION_ARGS)
replace = PG_GETARG_BOOL(2);
}
-#ifndef ENABLE_MULTIPLE_NODES
- AutoDopControl dopControl;
- dopControl.CloseSmp();
-#endif
-
_PG_init();
if (!CheckFunctionValidatorAccess(fcinfo->flinfo->fn_oid, funcoid)) {
diff --git a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_memory.cpp b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_memory.cpp
old mode 100644
new mode 100755
index 388c88e83..95ae5bf8a
--- a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_memory.cpp
+++ b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_memory.cpp
@@ -287,13 +287,13 @@ bool gs_return_tuple(StreamState* node)
{
TupleVector* tupleVec = node->tempTupleVec;
- if (tupleVec->tuplePointer == 0) {
+ if (tupleVec->tuplePointer == tupleVec->tupleCount) {
return false;
}
- tupleVec->tuplePointer--;
int n = tupleVec->tuplePointer;
node->ss.ps.ps_ResultTupleSlot = tupleVec->tupleVector[n];
+ tupleVec->tuplePointer++;
return true;
}
@@ -334,7 +334,8 @@ bool gs_consume_memory_data(StreamState* node, int loc)
(void)ExecCopySlot(tupledst->tupleVector[i], tuplesrc->tupleVector[i]);
}
- tupledst->tuplePointer = tuplesrc->tuplePointer;
+ tupledst->tupleCount = tuplesrc->tuplePointer;
+ tupledst->tuplePointer = 0;
tuplesrc->tuplePointer = 0;
(void)gs_return_tuple(node);
}
diff --git a/src/gausskernel/cbb/extension/connector/connector.cpp b/src/gausskernel/cbb/extension/connector/connector.cpp
index 78f848589..296be9739 100644
--- a/src/gausskernel/cbb/extension/connector/connector.cpp
+++ b/src/gausskernel/cbb/extension/connector/connector.cpp
@@ -245,34 +245,36 @@ static void read_one_key_value(const char* key, const char* value)
GetKeyValueByString(NAME_ENCODING, t_thrd.conn_cxt.value_encoding, value, len);
} else if (0 == strcasecmp(key, NAME_USERNAME)) {
errno_t errCode;
- char plainuid[EC_CIPHER_TEXT_LENGTH] = {0};
+ char *plainuid = NULL;
/* If an empty username is given, just return */
if (len == 0)
return;
/* Decrypt username */
- decryptECString(value, plainuid, EC_CIPHER_TEXT_LENGTH, SOURCE_MODE);
+ decryptECString(value, &plainuid, SOURCE_MODE);
GetKeyValueByString(NAME_USERNAME, t_thrd.conn_cxt.value_username, plainuid, len);
/* Clear buffer */
- errCode = memset_s(plainuid, EC_CIPHER_TEXT_LENGTH, 0, EC_CIPHER_TEXT_LENGTH);
+ errCode = memset_s(plainuid, strlen(plainuid), 0, strlen(plainuid));
securec_check(errCode, "\0", "\0");
+ pfree(plainuid);
} else if (0 == strcasecmp(key, NAME_PASSWORD)) {
errno_t errCode;
- char plainpwd[EC_CIPHER_TEXT_LENGTH] = {0};
+ char *plainpwd = NULL;
/* If an empty password is given, just return */
if (len == 0)
return;
/* Decrypt password */
- decryptECString(value, plainpwd, EC_CIPHER_TEXT_LENGTH, SOURCE_MODE);
+ decryptECString(value, &plainpwd, SOURCE_MODE);
GetKeyValueByString(NAME_PASSWORD, t_thrd.conn_cxt.value_password, plainpwd, len);
/* Clear buffer */
- errCode = memset_s(plainpwd, EC_CIPHER_TEXT_LENGTH, 0, EC_CIPHER_TEXT_LENGTH);
+ errCode = memset_s(plainpwd, strlen(plainpwd), 0, strlen(plainpwd));
securec_check(errCode, "\0", "\0");
+ pfree(plainpwd);
} else
return; /* just skip unknown keys */
}
diff --git a/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp b/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp
index e61eee98d..9a9dc84ec 100644
--- a/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp
+++ b/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp
@@ -391,3 +391,30 @@ PgBackendStatus* gs_stat_fetch_stat_beentry(int32 beid)
return NULL;
}
+
+void gs_stat_free_stat_node(PgBackendStatusNode* node)
+{
+ PgBackendStatusNode* tmpNode = node;
+ PgBackendStatusNode* freeNode = NULL;
+ PgBackendStatus* beentry = NULL;
+
+ while (tmpNode != NULL) {
+ beentry = tmpNode->data;
+ gs_stat_free_stat_beentry(beentry);
+ freeNode = tmpNode;
+ tmpNode = tmpNode->next;
+ pfree_ext(freeNode);
+ }
+}
+
+void gs_stat_free_stat_beentry(PgBackendStatus* beentry)
+{
+ if (beentry == NULL) {
+ return;
+ }
+ pfree_ext(beentry->st_appname);
+ pfree_ext(beentry->st_clienthostname);
+ pfree_ext(beentry->st_conninfo);
+ pfree_ext(beentry->st_activity);
+ pfree_ext(beentry);
+}
\ No newline at end of file
diff --git a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp
index d2b489ce2..da154b052 100644
--- a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp
+++ b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp
@@ -98,7 +98,7 @@ void statement_init_metric_context()
StatementStatContext *reusedHandle = NULL;
/* won't assign handle when statement flush thread not started */
- if (g_instance.pid_cxt.StatementPID == 0) {
+ if (g_instance.pid_cxt.StatementPID == 0 || u_sess -> attr.attr_storage.DefaultXactReadOnly) {
return;
}
CHECK_STMT_TRACK_ENABLED();
@@ -224,6 +224,8 @@ static void print_stmt_basic_debug_log(int log_level)
ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t unique query: %s", CURRENT_STMT_METRIC_HANDLE->query)));
ereport(log_level, (errmodule(MOD_INSTR),
errmsg("\t slow query threshold: %ld", CURRENT_STMT_METRIC_HANDLE->slow_query_threshold)));
+ ereport(log_level, (errmodule(MOD_INSTR),
+ errmsg("\t slow query cause type: %u", CURRENT_STMT_METRIC_HANDLE->cause_type)));
ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t thread id: %lu", CURRENT_STMT_METRIC_HANDLE->tid)));
ereport(log_level, (errmodule(MOD_INSTR), errmsg("\t transaction id: %lu", CURRENT_STMT_METRIC_HANDLE->txn_id)));
ereport(log_level, (errmodule(MOD_INSTR),
diff --git a/src/gausskernel/cbb/instruments/statement/instr_mfchain.cpp b/src/gausskernel/cbb/instruments/statement/instr_mfchain.cpp
index b24c1778e..c66946deb 100644
--- a/src/gausskernel/cbb/instruments/statement/instr_mfchain.cpp
+++ b/src/gausskernel/cbb/instruments/statement/instr_mfchain.cpp
@@ -34,7 +34,6 @@
#include "access/heapam.h"
#include "miscadmin.h"
-
#define GetMemFileItemLen(item) (((MemFileItem*)(item))->len)
#define MFCHAIN_IS_ONLINE(mfchain) (mfchain != NULL && mfchain->state == MFCHAIN_STATE_ONLINE)
@@ -43,7 +42,8 @@
#define MFBLOCK_GET_FIRSTITEM_P(buff, ofs) (char*)(((char*)(buff)) + (ofs))
#define MFBLOCK_GET_FIRSTITEM_O(buff, ptr) (uint32)((ptr) - ((char*)(buff)))
-#define BlockIsEmpty(block) (((block)->state == MFBLOCK_IN_MEMORY && (block)->firstItem == (block)->barrier2) || \
+#define BlockIsEmpty(block) ((block) == NULL || \
+ ((block)->state == MFBLOCK_IN_MEMORY && (block)->firstItem == (block)->barrier2) || \
(block)->state == MFBLOCK_DELETED)
typedef enum BlockActionState {
@@ -63,7 +63,8 @@ typedef enum BlockActionState {
BLOCK_RELOAD_FILE_DAMAGE_ERR,
BLOCK_VERIFY_VERSION_ERR,
BLOCK_VERIFY_CHECKSUM_ERR,
- BLOCK_VERIFY_SDESC_ERR
+ BLOCK_VERIFY_SDESC_ERR,
+ BLOCK_GET_PATH_ERR
} BlockActionState;
typedef enum ChainActionState {
@@ -75,6 +76,7 @@ typedef enum ChainActionState {
CHAIN_CREATE_SIZEPARAM_ERR,
CHAIN_CREATE_TYPEMOD_ERR,
CHAIN_CREATE_LOCK_ERR,
+ CHAIN_CREATE_BLOCK_ERR,
/* ADVANCE */
CHAIN_ADV_DISK_ERR,
@@ -83,7 +85,9 @@ typedef enum ChainActionState {
CHAIN_SDESC_RECREATE,
CHAIN_TURN_ON,
- CHAIN_TURN_OFF
+ CHAIN_TURN_OFF,
+
+ CHAIN_LOAD_FILE_ERR
} ChainActionState;
typedef struct BlockAdvanceResult {
@@ -97,6 +101,7 @@ typedef struct BlockAdvanceResult {
} BlockAdvanceResult;
static ChainActionState ResetChain(MemFileChain* mfchain);
+void ReportChainException(MemFileChain* mfchain, ChainActionState state);
/* ------------------------------------------------
* UTILS func
@@ -202,6 +207,10 @@ static void ReportBlockException(uint32 blockId, BlockActionState state, int lev
case BLOCK_VERIFY_SDESC_ERR:
ereport(level, (errmsg("MemFileBlock %u verify exception: SimpleTupleDesc not match to now.", blockId)));
break;
+ case BLOCK_GET_PATH_ERR:
+ ereport(level, (errmodule(MOD_INSTR),
+ errmsg("MemFileBlock %u get path exception: The memory is inaccessible or insufficient.", blockId)));
+ break;
default:
ereport(ERROR, (errmsg("Unknow Mem-File-Block action state.")));
}
@@ -226,7 +235,11 @@ static char* GetBlockPath(MemFileBlock* block, char* buff = NULL)
{
Assert(block->parent != NULL);
- char* path = buff != NULL ? buff : (char*)palloc(MAXPGPATH);
+ char* path = buff != NULL ? buff : (char*)palloc0_noexcept(MAXPGPATH);
+ if (path == NULL) {
+ ReportBlockException(block->id, BLOCK_GET_PATH_ERR);
+ return path;
+ }
errno_t rc = sprintf_s(path, MAXPGPATH, "%s/%u", block->parent->path, block->id);
securec_check_ss(rc, "", "");
return path;
@@ -297,6 +310,9 @@ static BlockActionState FlushBlockBuff(MemFileBlock* block)
}
char* path = GetBlockPath(block);
+ if (path == NULL) {
+ return BLOCK_GET_PATH_ERR;
+ }
int fd = open(path, O_RDWR | O_CREAT, S_IREAD | S_IWRITE);
if (fd == -1) {
pfree(path);
@@ -359,9 +375,18 @@ static MemFileBlock* CreateBlock(MemFileChain* parent, uint32 id, MemFileBlockBu
{
Assert(parent != NULL && id > MFCHAIN_INVALID_ID);
- MemFileBlock* block = (MemFileBlock*)palloc(sizeof(MemFileBlock));
+ MemFileBlock* block = (MemFileBlock*)palloc0_noexcept(sizeof(MemFileBlock));
+ if (block == NULL) {
+ ReportChainException(parent, CHAIN_CREATE_BLOCK_ERR);
+ return block;
+ }
if (buff == NULL) {
- buff = (MemFileBlockBuff*)palloc(sizeof(MemFileBlockBuff));
+ buff = (MemFileBlockBuff*)palloc0_noexcept(sizeof(MemFileBlockBuff));
+ }
+ if (buff == NULL) {
+ ReportChainException(parent, CHAIN_CREATE_BLOCK_ERR);
+ pfree_ext(block);
+ return NULL;
}
block->state = MFBLOCK_IN_MEMORY;
@@ -471,8 +496,10 @@ static void DestoryBlock(MemFileBlock* block, bool deep = true)
if (deep) {
char* path = GetBlockPath(block);
- unlink(path);
- pfree(path);
+ if (path != nullptr) {
+ unlink(path);
+ pfree(path);
+ }
}
if (block->state == MFBLOCK_IN_BOTH) {
@@ -522,12 +549,12 @@ typedef ChainActionState (*CreateMFChainStep)(MemFileChain* mfchain, MemFileChai
(maxBlockNum) >= MIN_FBLOCK_NUM && (maxBlockNum) <= MAX_FBLOCK_NUM && \
(maxBlockNumM) <= (maxBlockNum) && (retentionTime) > 0)
-static void ReportChainException(MemFileChain* mfchain, ChainActionState state, int level = WARNING)
+void ReportChainException(MemFileChain* mfchain, ChainActionState state)
{
if (state == CHAIN_ACTION_SUCCESS) {
return;
}
-
+ int level = WARNING;
switch (state) {
case CHAIN_CREATE_LONGNAME_ERR:
ereport(level, (errmsg("MemFileChain create exception: name '%s' too long.", mfchain->name)));
@@ -544,6 +571,10 @@ static void ReportChainException(MemFileChain* mfchain, ChainActionState state,
case CHAIN_CREATE_LOCK_ERR:
ereport(level, (errmsg("MemFileChain create exception: invalid lock.")));
break;
+ case CHAIN_CREATE_BLOCK_ERR:
+ ereport(level,
+ (errmsg("MemFileChain create block exception: The memory is inaccessible or insufficient.")));
+ break;
case CHAIN_ADV_DISK_ERR:
ereport(level, (errmsg("MemFileChain advance exception: Disk is inaccessible or insufficient space.")));
break;
@@ -556,6 +587,9 @@ static void ReportChainException(MemFileChain* mfchain, ChainActionState state,
case CHAIN_TURN_OFF:
ereport(level, (errmsg("MemFileChain exception: Something bad happened, mem file chain turn off.")));
break;
+ case CHAIN_LOAD_FILE_ERR:
+ ereport(level, (errmsg("MemFileChain exception: load file error.")));
+ break;
default:
ereport(ERROR, (errmsg("Unknow MemFileChain action state.")));
}
@@ -580,15 +614,15 @@ static bool GetBlockFileAndPrecheck(const char* name, const char* path, char* ou
static MemFileBlock** ReconstructOldBlockFile(MemFileChain* mfchain, List* blocksList, int* size)
{
- if (blocksList == NULL) {
- *size = 0;
- ereport(LOG, (errmsg("there is no block file to reconstruct. ")));
+ /* firstly, sort by id */
+ int len = list_length(blocksList);
+ MemFileBlock** blocks = (MemFileBlock**)palloc0_noexcept(len * sizeof(MemFileBlock*));
+ if (blocks == NULL) {
+ ereport(WARNING, (errmodule(MOD_INSTR),
+ errmsg("MemFileChain load file, palloc file blocks memory is inaccessible or insufficient.")));
return NULL;
}
- /* firstly, sort by id */
- int len = list_length(blocksList);
- MemFileBlock** blocks = (MemFileBlock**)palloc(len * sizeof(MemFileBlock*));
int i = 0;
ListCell* lc = NULL;
foreach(lc, blocksList) {
@@ -609,17 +643,20 @@ static MemFileBlock** ReconstructOldBlockFile(MemFileChain* mfchain, List* block
GetBlockPath(blocks[i], path);
blocks[i]->id = newId;
GetBlockPath(blocks[i], newpath);
+ if (path[0] == '\0' || newpath[0] == '\0') {
+ return NULL;
+ }
rename(path, newpath);
newId++;
}
}
- *size = newId - 1;
+ *size = (int)newId - 1;
return blocks;
}
/* scan all file in chain location, remove or reload it. */
-static MemFileBlock** ReloadOrCleanOldBlockFile(MemFileChain* mfchain, int* len)
+static List* ReloadOrCleanOldBlockFile(MemFileChain* mfchain, int* len)
{
struct dirent* direntry = NULL;
char filename[MAXPGPATH] = {'\0'};
@@ -627,6 +664,8 @@ static MemFileBlock** ReloadOrCleanOldBlockFile(MemFileChain* mfchain, int* len)
bool clean = mfchain->needClean;
if (unlikely(NULL == dirdesc)) {
*len = 0;
+ ereport(WARNING, (errmodule(MOD_INSTR),
+ errmsg("MemFileChain load file, allocate dir error.")));
return NULL;
}
@@ -653,11 +692,7 @@ static MemFileBlock** ReloadOrCleanOldBlockFile(MemFileChain* mfchain, int* len)
}
pfree(buffer);
FreeDir(dirdesc);
-
- MemFileBlock** blocks = ReconstructOldBlockFile(mfchain, blocksList, len);
- list_free(blocksList);
-
- return blocks;
+ return blocksList;
}
static inline bool ChainNeedTrimOldest(MemFileChain* mfchain)
@@ -712,7 +747,10 @@ static ChainActionState CreateMFChainLocation(MemFileChain* mfchain, MemFileChai
return CHAIN_CREATE_LONGNAME_ERR;
}
- char* path = (char*)palloc(MAXPGPATH);
+ char* path = (char*)palloc0_noexcept(MAXPGPATH);
+ if (path == NULL) {
+ return CHAIN_CREATE_DIR_ERR;
+ }
errno_t rc = sprintf_s(path, MAXPGPATH, "%s/%s", t_thrd.proc_cxt.DataDir, param->dir);
securec_check_ss(rc, "", "");
if (access(path, F_OK) == -1) {
@@ -749,14 +787,27 @@ static ChainActionState CreateMFChainSize(MemFileChain* mfchain, MemFileChainCre
static ChainActionState CreateMFChainBlocks(MemFileChain* mfchain, MemFileChainCreateParam* param)
{
MemFileBlock** blocks = NULL;
- int len = 0;
+ int newId = 0;
mfchain->needClean = param->needClean;
/* 1, reload or clean old block file, it will do some trimming here. */
- blocks = ReloadOrCleanOldBlockFile(mfchain, &len);
+ List* blocksList = ReloadOrCleanOldBlockFile(mfchain, &newId);
+ int len = 0;
+ if (blocksList != NULL) {
+ len = list_length(blocksList);
+ blocks = ReconstructOldBlockFile(mfchain, blocksList, &newId);
+ list_free(blocksList);
+
+ if (blocks == NULL) {
+ return CHAIN_LOAD_FILE_ERR;
+ }
+ }
/* 2, create a new chain head */
- MemFileBlock* headBlock = CreateBlock(mfchain, len + MFCHAIN_FIRST_ID);
+ MemFileBlock* headBlock = CreateBlock(mfchain, newId + MFCHAIN_FIRST_ID);
+ if (headBlock == NULL) {
+ return CHAIN_CREATE_BLOCK_ERR;
+ }
mfchain->chainHead = headBlock;
mfchain->chainBoundary = headBlock;
mfchain->chainTail = headBlock;
@@ -765,6 +816,9 @@ static ChainActionState CreateMFChainBlocks(MemFileChain* mfchain, MemFileChainC
/* 3.append old blocks if have */
for (int i = 0; i < len; i++) {
+ if (blocks[i] == NULL) {
+ continue;
+ }
mfchain->chainTail->next = blocks[i];
blocks[i]->prev = mfchain->chainTail;
mfchain->chainTail = blocks[i];
@@ -871,10 +925,23 @@ static ChainActionState AdvanceChain(MemFileChain* mfchain)
return CHAIN_ACTION_SUCCESS;
}
MemoryContext oldContext = MemoryContextSwitchTo(mfchain->memCxt);
-
- /* 1, flush header node into disk */
+ /* 1. extend chain */
+ uint32 newId = mfchain->chainHead->id + 1;
+ MemFileBlockBuff* buff = (mfchain->blockNumM < mfchain->maxBlockNumM) ? NULL : mfchain->chainBoundary->buff;
+ MemFileBlock* newBlock = CreateBlock(mfchain, newId, buff);
+ if (newBlock == NULL) {
+ return CHAIN_CREATE_BLOCK_ERR;
+ }
+
+ /* 2. AdvanceBlock */
+ /* 2.1, flush header node into disk */
BlockAdvanceResult res = AdvanceBlock(mfchain->chainHead);
if (res.state != BLOCK_ACTION_SUCCESS) {
+ if (res.state == BLOCK_GET_PATH_ERR) {
+ DestoryBlock(newBlock, false);
+ return CHAIN_CREATE_BLOCK_ERR;
+ }
+
Assert(res.state == BLOCK_FLUSH_DISK_ERR);
// disk is full, so try to trim 1/3 of chain and retry, if still failed,
// report and turn off the chain.
@@ -890,28 +957,15 @@ static ChainActionState AdvanceChain(MemFileChain* mfchain)
}
}
- /* 2, extend chain */
- uint32 newId = mfchain->chainHead->id + 1;
+ /* 2.2, extend chain */
+ newBlock->next = mfchain->chainHead;
+ mfchain->chainHead->prev = newBlock;
+ mfchain->chainHead = newBlock;
+ mfchain->blockNum++;
if (mfchain->blockNumM < mfchain->maxBlockNumM) {
- MemFileBlock* newBlock = CreateBlock(mfchain, newId);
- newBlock->next = mfchain->chainHead;
- mfchain->chainHead->prev = newBlock;
- mfchain->chainHead = newBlock;
- mfchain->blockNum++;
mfchain->blockNumM++;
-
} else {
- // change last mem block to file block, using free buff of it to create a new block
- Assert(mfchain->blockNumM == mfchain->maxBlockNumM);
-
- res = AdvanceBlock(mfchain->chainBoundary);
- Assert(res.state == BLOCK_ACTION_SUCCESS);
-
- MemFileBlock* newBlock = CreateBlock(mfchain, newId, res.freeBuff);
- newBlock->next = mfchain->chainHead;
- mfchain->chainHead->prev = newBlock;
- mfchain->chainHead = newBlock;
- mfchain->blockNum++;
+ (void)AdvanceBlock(mfchain->chainBoundary);
mfchain->chainBoundary = mfchain->chainBoundary->prev;
}
@@ -985,6 +1039,7 @@ MemFileChain* MemFileChainCreate(MemFileChainCreateParam* param)
ReportChainException(mfchain, res);
MemFileChainDestory(mfchain);
MemoryContextSwitchTo(oldcxt);
+ ereport(ERROR, (errmodule(MOD_INSTR), errmsg("MemFileChain of %s init done and error.", param->name)));
return NULL;
}
}
@@ -1001,6 +1056,8 @@ void MemFileChainDestory(MemFileChain* mfchain)
if (mfchain == NULL || mfchain->state == MFCHAIN_STATE_NOT_READY) {
return;
}
+ ereport(LOG, (errmodule(MOD_INSTR),
+ errmsg("MemFileChain destory start, clean is %d.", mfchain->needClean)));
LWLock* lock = mfchain->lock;
if (lock != NULL) {
@@ -1020,9 +1077,15 @@ void MemFileChainDestory(MemFileChain* mfchain)
DestoryBlock(block, true);
block = next;
}
- rmdir(mfchain->path);
+ if (rmdir(mfchain->path) < 0) {
+ ereport(WARNING, (errmodule(MOD_INSTR),
+ errmsg("MemFileChain destory block failed: %s.", mfchain->path)));
+ }
} else {
- AdvanceChain(mfchain);
+ ChainActionState cres = AdvanceChain(mfchain);
+ if (cres != CHAIN_ACTION_SUCCESS) {
+ ReportChainException(mfchain, cres);
+ }
}
ereport(LOG, (errmsg("Mem-file chain of %s %s destory.", mfchain->name, mfchain->needClean ? "deep" : "light")));
MemoryContextDelete(mfchain->memCxt);
@@ -1117,8 +1180,7 @@ bool MemFileChainInsert(MemFileChain* mfchain, HeapTuple tup, Relation rel)
LWLockAcquire(mfchain->lock, LW_EXCLUSIVE);
ChainActionState cres = AdvanceChain(mfchain);
LWLockRelease(mfchain->lock);
- Assert(cres == CHAIN_ACTION_SUCCESS || cres == CHAIN_TURN_OFF);
- if (cres == CHAIN_TURN_OFF) {
+ if (cres == CHAIN_TURN_OFF || cres == CHAIN_CREATE_BLOCK_ERR) {
return false;
}
res = FillBlock(mfchain->chainHead, tup);
@@ -1191,6 +1253,9 @@ static BlockActionState ScannerLoadBlock(MemFileChainScanner* scanner, MemFileBl
} else if (block->state == MFBLOCK_IN_FILE) {
char* path = GetBlockPath(block);
+ if (path == NULL) {
+ ereport(ERROR, (errmodule(MOD_INSTR), errmsg("Load File: memory is temporarily unavailable")));
+ }
BlockActionState res = ReloadBlockFile(path, (char*)scanner->buff, false);
pfree(path);
if (res != BLOCK_ACTION_SUCCESS) {
diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp
index f9b6db261..5e0b7fe29 100755
--- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp
+++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp
@@ -75,7 +75,7 @@
#define STATEMENT_DETAILS_HEAD_SIZE (1) /* [VERSION] */
#define INSTR_STMT_UNIX_DOMAIN_PORT (-1)
-#define INSTR_STATEMENT_ATTRNUM 52
+#define INSTR_STATEMENT_ATTRNUM 53
/* support different areas in stmt detail column */
#define STATEMENT_DETAIL_TYPE_LEN (1)
@@ -181,7 +181,7 @@ static void StartStbyStmtHistory()
static void ShutdownStbyStmtHistory()
{
- if (!STBYSTMTHIST_IS_READY) {
+ if (!STBYSTMTHIST_IS_READY || u_sess->attr.attr_storage.DefaultXactReadOnly) {
return;
}
@@ -450,6 +450,31 @@ static void set_stmt_lock_summary(const LockSummaryStat *lock_summary, Datum val
values[(*i)++] = Int64GetDatum(lock_summary->lwlock_wait_time);
}
+static void set_stmt_advise(StatementStatContext* statementInfo, Datum values[], bool nulls[], int* i)
+{
+ if (statementInfo->cause_type == 0)
+ nulls[(*i)++] = true;
+ else {
+ errno_t rc;
+ char causeInfo[STRING_MAX_LEN];
+ rc = memset_s(causeInfo, sizeof(causeInfo), 0, sizeof(causeInfo));
+ securec_check(rc, "\0", "\0");
+ if (statementInfo->cause_type & NUM_F_TYPECASTING) {
+ rc = strcat_s(causeInfo, sizeof(causeInfo), "Cast Function Cause Index Miss. ");
+ securec_check(rc, "\0", "\0");
+ }
+ if (statementInfo->cause_type & NUM_F_LIMIT) {
+ rc = strcat_s(causeInfo, sizeof(causeInfo), "Limit too much rows.");
+ securec_check(rc, "\0", "\0");
+ }
+ if (statementInfo->cause_type & NUM_F_LEAKPROOF) {
+ rc = strcat_s(causeInfo, sizeof(causeInfo), "Proleakproof of function is false.");
+ securec_check(rc, "\0", "\0");
+ }
+ values[(*i)++] = CStringGetTextDatum(causeInfo);
+ }
+}
+
static HeapTuple GetStatementTuple(Relation rel, StatementStatContext* statementInfo,
const knl_u_statement_context* statementCxt, bool* isSlow = NULL)
{
@@ -524,6 +549,7 @@ static HeapTuple GetStatementTuple(Relation rel, StatementStatContext* statement
}
SET_TEXT_VALUES(statementInfo->trace_id, i++);
+ set_stmt_advise(statementInfo, values, nulls, &i);
Assert(INSTR_STATEMENT_ATTRNUM == i);
return heap_form_tuple(RelationGetDescr(rel), values, nulls);
}
@@ -863,10 +889,24 @@ static void StatementFlush()
{
const int flush_usleep_interval = 100000;
int count = 0;
+ bool is_readonly_log_needed = false;
while (!t_thrd.statement_cxt.need_exit && ENABLE_STATEMENT_TRACK) {
ReloadInfo();
- ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] start to flush statemnts.")));
+ if (u_sess->attr.attr_storage.DefaultXactReadOnly) {
+ if (!is_readonly_log_needed) {
+ is_readonly_log_needed = true;
+ ereport(WARNING, (errmodule(MOD_INSTR),
+ errmsg("[Statement] cannot flush suspend list to statement_history in a read-only transaction")));
+ }
+ pg_usleep(flush_usleep_interval);
+ continue;
+ }
+ if (is_readonly_log_needed) {
+ is_readonly_log_needed = false;
+ }
+
+ ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] start to flush statements.")));
StartCleanWorker(&count);
HOLD_INTERRUPTS();
@@ -875,7 +915,7 @@ static void StatementFlush()
RESUME_INTERRUPTS();
count++;
- ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] flush statemnts finished.")));
+ ereport(DEBUG4, (errmodule(MOD_INSTR), errmsg("[Statement] flush statements finished.")));
/* report statement_history state to pgstat */
if (OidIsValid(u_sess->proc_cxt.MyDatabaseId))
pgstat_report_stat(true);
@@ -2282,13 +2322,9 @@ static void instr_stmt_set_wait_events_in_handle_bms(int32 bms_event_idx)
PG_CATCH();
{
(void)MemoryContextSwitchTo(oldcontext);
- ErrorData* edata = NULL;
- edata = CopyErrorData();
FlushErrorState();
ereport(LOG, (errmodule(MOD_INSTR),
- errmsg("[Statement] bms handle event failed - bms event idx: %d, msg: %s", bms_event_idx,
- edata->message)));
- FreeErrorData(edata);
+ errmsg("[Statement] bms handle event failed - bms event idx: %d, msg: OOM", bms_event_idx)));
}
PG_END_TRY();
(void)MemoryContextSwitchTo(oldcontext);
@@ -2619,6 +2655,31 @@ void instr_stmt_dynamic_change_level()
u_sess->unique_sql_cxt.unique_sql_id, CURRENT_STMT_METRIC_HANDLE->level - 1)));
}
+void instr_stmt_report_cause_type(uint32 type)
+{
+ CHECK_STMT_HANDLE();
+
+ if (type & NUM_F_TYPECASTING)
+ CURRENT_STMT_METRIC_HANDLE->cause_type |= NUM_F_TYPECASTING;
+ if (type & NUM_F_LIMIT)
+ CURRENT_STMT_METRIC_HANDLE->cause_type |= NUM_F_LIMIT;
+ if (type & NUM_F_LEAKPROOF)
+ CURRENT_STMT_METRIC_HANDLE->cause_type |= NUM_F_LEAKPROOF;
+}
+
+bool instr_stmt_plan_need_report_cause_type()
+{
+ if (CURRENT_STMT_METRIC_HANDLE == NULL || CURRENT_STMT_METRIC_HANDLE->cause_type == 0)
+ return false;
+
+ return true;
+}
+
+uint32 instr_stmt_plan_get_cause_type()
+{
+ return CURRENT_STMT_METRIC_HANDLE->cause_type;
+}
+
/* **********************************************************************************************
* STANDBY STATEMENG HISTORY FUNCTIONS
diff --git a/src/gausskernel/cbb/instruments/utils/unique_query.cpp b/src/gausskernel/cbb/instruments/utils/unique_query.cpp
index 49f9ca5cd..495b23cdf 100755
--- a/src/gausskernel/cbb/instruments/utils/unique_query.cpp
+++ b/src/gausskernel/cbb/instruments/utils/unique_query.cpp
@@ -372,6 +372,8 @@ void UniqueSql::JumbleRangeTable(pgssJumbleState* jstate, List* rtable)
APP_JUMB(rte->partitionOid);
} else if (rte->isContainSubPartition && OidIsValid(rte->subpartitionOid)) {
APP_JUMB(rte->subpartitionOid);
+ } else {
+ APP_JUMB(rte->relid);
}
} else {
APP_JUMB(rte->relid);
diff --git a/src/gausskernel/cbb/utils/aes/cipherfn.cpp b/src/gausskernel/cbb/utils/aes/cipherfn.cpp
index 99b6d3721..fb8ad56a9 100644
--- a/src/gausskernel/cbb/utils/aes/cipherfn.cpp
+++ b/src/gausskernel/cbb/utils/aes/cipherfn.cpp
@@ -46,6 +46,7 @@
#include "openssl/crypto.h"
#include "utils/evp_cipher.h"
+#include "utils/guc_security.h"
#ifdef ENABLE_UT
#define static
@@ -887,46 +888,49 @@ static GS_UCHAR* getECKeyString(KeyMode mode)
* This function use aes128 to encrypt plain text; key comes from certificate file
*
* @IN src_plain_text: source plain text to be encrypted
- * @OUT dest_cipher_text: dest buffer to be filled with encrypted string, this buffer
- * should be given by caller
- * @IN dest_cipher_length: dest buffer length which is given by the caller
* @IN mode: key mode
- * @RETURN: void
+ * @RETURN: dest buffer to be filled with encrypted string, caller should reset and free the buffer
*/
-void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_cipher_length, int mode)
+char *encryptECString(char* src_plain_text, int mode)
{
+#define RANDOM_STR_NUM 4
+#define BASE64_INPUT_PER_GROUP_BYTE 3
+#define BASE64_OUTPUT_PER_GROUP_BYTE 4
GS_UINT32 ciphertextlen = 0;
- GS_UCHAR ciphertext[1024];
GS_UCHAR* cipherkey = NULL;
char* encodetext = NULL;
errno_t ret = EOK;
- if (NULL == src_plain_text) {
- return;
- }
-
/* First, get encrypt key */
cipherkey = getECKeyString((KeyMode)mode);
- /* Clear cipher buffer which will be used later */
- ret = memset_s(ciphertext, sizeof(ciphertext), 0, sizeof(ciphertext));
- securec_check(ret, "\0", "\0");
+ /*
+ * Calculate the max length of ciphertext:
+ * 1. contain aes-expand length, IV length, Salt length and backup length. Add mac text length.
+ * (refer to gs_encrypt_aes128_function)
+ * 2. calculate then length after base64((len / 3 + 1) * 4)
+ * 3. add encrypt prefix len and ending symbol len
+ */
+ GS_UINT32 cipherTextMaxLen = (GS_UINT32)(((strlen(src_plain_text) + RANDOM_LEN * RANDOM_STR_NUM + MAC_LEN) /
+ BASE64_INPUT_PER_GROUP_BYTE + 1) * BASE64_OUTPUT_PER_GROUP_BYTE + strlen(EC_ENCRYPT_PREFIX) + 1);
+ char *ciphertext = (char*)palloc0(cipherTextMaxLen);
/*
* Step-1: Cipher
* src_text with cipher key -> cipher text
*/
- ciphertextlen = (GS_UINT32)sizeof(ciphertext);
+ ciphertextlen = cipherTextMaxLen;
if (!gs_encrypt_aes_128((GS_UCHAR*)src_plain_text, cipherkey,
- (GS_UINT32)strlen((const char*)cipherkey), ciphertext, &ciphertextlen)) {
+ (GS_UINT32)strlen((const char*)cipherkey), (GS_UCHAR*)ciphertext, &ciphertextlen)) {
ret = memset_s(src_plain_text, strlen(src_plain_text), 0, strlen(src_plain_text));
- securec_check(ret, "\0", "\0");
- ret = memset_s(ciphertext, sizeof(ciphertext), 0, sizeof(ciphertext));
+ securec_check(ret, "\0", "\0");
+ ret = memset_s(ciphertext, cipherTextMaxLen, 0, cipherTextMaxLen);
securec_check(ret, "\0", "\0");
ret = memset_s(cipherkey, RANDOM_LEN + 1, 0, RANDOM_LEN + 1);
securec_check(ret, "\0", "\0");
pfree_ext(src_plain_text);
pfree_ext(cipherkey);
+ pfree_ext(ciphertext);
ereport(
ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), errmsg("encrypt the EC string failed!")));
}
@@ -935,10 +939,10 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c
* Step-2: Encode
* cipher text by Base64 -> encode text
*/
- encodetext = SEC_encodeBase64((char*)ciphertext, ciphertextlen + RANDOM_LEN);
+ encodetext = SEC_encodeBase64(ciphertext, ciphertextlen + RANDOM_LEN);
/* Check dest buffer length */
- if (encodetext == NULL || dest_cipher_length < strlen(EC_ENCRYPT_PREFIX) + strlen(encodetext) + 1) {
+ if (encodetext == NULL || cipherTextMaxLen < strlen(EC_ENCRYPT_PREFIX) + strlen(encodetext) + 1) {
if (encodetext != NULL) {
ret = memset_s(encodetext, strlen(encodetext), 0, strlen(encodetext));
securec_check(ret, "\0", "\0");
@@ -947,22 +951,23 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c
}
ret = memset_s(src_plain_text, strlen(src_plain_text), 0, strlen(src_plain_text));
securec_check(ret, "\0", "\0");
- ret = memset_s(ciphertext, sizeof(ciphertext), 0, sizeof(ciphertext));
+ ret = memset_s(ciphertext, cipherTextMaxLen, 0, cipherTextMaxLen);
securec_check(ret, "\0", "\0");
ret = memset_s(cipherkey, RANDOM_LEN + 1, 0, RANDOM_LEN + 1);
securec_check(ret, "\0", "\0");
pfree_ext(src_plain_text);
pfree_ext(cipherkey);
+ pfree_ext(ciphertext);
ereport(ERROR,
(errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
errmsg("Encrypt EC internal error: dest cipher length is too short.")));
}
/* Copy the encrypt string into the dest buffer */
- ret = memcpy_s(dest_cipher_text, dest_cipher_length, EC_ENCRYPT_PREFIX, strlen(EC_ENCRYPT_PREFIX));
+ ret = memcpy_s(ciphertext, cipherTextMaxLen, EC_ENCRYPT_PREFIX, strlen(EC_ENCRYPT_PREFIX));
securec_check(ret, "\0", "\0");
- ret = memcpy_s(dest_cipher_text + strlen(EC_ENCRYPT_PREFIX),
- dest_cipher_length - strlen(EC_ENCRYPT_PREFIX),
+ ret = memcpy_s(ciphertext + strlen(EC_ENCRYPT_PREFIX),
+ cipherTextMaxLen - strlen(EC_ENCRYPT_PREFIX),
encodetext,
strlen(encodetext) + 1);
securec_check(ret, "\0", "\0");
@@ -970,14 +975,13 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c
/* Clear buffer for safety's sake */
ret = memset_s(encodetext, strlen(encodetext), 0, strlen(encodetext));
securec_check(ret, "\0", "\0");
- ret = memset_s(ciphertext, sizeof(ciphertext), 0, sizeof(ciphertext));
- securec_check(ret, "\0", "\0");
ret = memset_s(cipherkey, RANDOM_LEN + 1, 0, RANDOM_LEN + 1);
securec_check(ret, "\0", "\0");
OPENSSL_free(encodetext);
encodetext = NULL;
pfree_ext(cipherkey);
+ return ciphertext;
}
/*
@@ -987,13 +991,11 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c
*
* @IN src_cipher_text: source cipher text to be decrypted
* @OUT dest_plain_text: dest buffer to be filled with plain text, this buffer
- * should be given by caller
- * @IN dest_plain_length: dest buffer length which is given by the caller
+ * should be reset and free by caller
* @IN mode: key mode
* @RETURN: bool, true if encrypt success, false if not
*/
-bool decryptECString(const char* src_cipher_text, char* dest_plain_text,
- uint32 dest_plain_length, int mode)
+bool decryptECString(const char* src_cipher_text, char** dest_plain_text, int mode)
{
GS_UCHAR* ciphertext = NULL;
GS_UINT32 ciphertextlen = 0;
@@ -1035,25 +1037,9 @@ bool decryptECString(const char* src_cipher_text, char* dest_plain_text,
ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), errmsg("decrypt the EC string failed!")));
}
- /* Check dest buffer length */
- if (plaintextlen > dest_plain_length) {
- ret = memset_s(plaintext, plaintextlen, 0, plaintextlen);
- securec_check(ret, "\0", "\0");
- ret = memset_s(cipherkey, RANDOM_LEN + 1, 0, RANDOM_LEN + 1);
- securec_check(ret, "\0", "\0");
- ret = memset_s(ciphertext, ciphertextlen, 0, ciphertextlen);
- securec_check(ret, "\0", "\0");
- pfree_ext(plaintext);
- pfree_ext(cipherkey);
- OPENSSL_free(ciphertext);
- ciphertext = NULL;
- ereport(ERROR,
- (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
- errmsg("Decrypt EC internal error: dest plain length is too short.")));
- }
-
+ *dest_plain_text = (char*)palloc0(plaintextlen + 1);
/* Copy the decrypted string into the dest buffer */
- ret = memcpy_s(dest_plain_text, dest_plain_length, plaintext, plaintextlen);
+ ret = memcpy_s(*dest_plain_text, plaintextlen + 1, plaintext, plaintextlen);
securec_check(ret, "\0", "\0");
/* Clear the buffer for safety's sake */
@@ -1102,7 +1088,6 @@ void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, in
{
int i;
char* srcString = NULL;
- char encryptString[EC_CIPHER_TEXT_LENGTH] = {0};
errno_t ret;
ListCell* cell = NULL;
bool isPrint = false;
@@ -1138,15 +1123,26 @@ void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, in
break;
}
+ /*
+ * current we only check length in subscription mode, cause datasource and user mapping
+ * could save database A or B password, which we not sure how long it can be.
+ */
+ if (mode == (int)SUBSCRIPTION_MODE && (int)strlen(srcString) > MAX_PASSWORD_LENGTH) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("Password can't contain more than %d characters.", MAX_PASSWORD_LENGTH)));
+ }
+
/* Encrypt the src string */
- encryptECString(srcString, encryptString, EC_CIPHER_TEXT_LENGTH, mode);
+ char *encryptString = encryptECString(srcString, mode);
/* Substitute the src */
def->arg = (Node*)makeString(pstrdup(encryptString));
/* Clear the encrypted string */
- ret = memset_s(encryptString, sizeof(encryptString), 0, sizeof(encryptString));
+ ret = memset_s(encryptString, strlen(encryptString), 0, strlen(encryptString));
securec_check(ret, "\0", "\0");
+ pfree_ext(encryptString);
/* Clear the src string */
ret = memset_s(srcString, strlen(srcString), 0, strlen(srcString));
@@ -1190,20 +1186,21 @@ void DecryptOptions(List *options, const char** sensitiveOptionsArray, int array
for (int i = 0; i < arrayLength; i++) {
if (pg_strcasecmp(def->defname, sensitiveOptionsArray[i]) == 0) {
- char plainText[EC_CIPHER_TEXT_LENGTH] = {0};
+ char *plainText = NULL;
/*
* If decryptECString return false, it means the stored values is not encrypted.
* This happened when user mapping was created in old gaussdb version.
*/
- if (decryptECString(str, plainText, EC_CIPHER_TEXT_LENGTH, mode)) {
+ if (decryptECString(str, &plainText, mode)) {
pfree_ext(str);
pfree_ext(def->arg);
def->arg = (Node*)makeString(pstrdup(plainText));
/* Clear buffer */
- errno_t errCode = memset_s(plainText, EC_CIPHER_TEXT_LENGTH, 0, EC_CIPHER_TEXT_LENGTH);
+ errno_t errCode = memset_s(plainText, strlen(plainText), 0, strlen(plainText));
securec_check(errCode, "\0", "\0");
+ pfree_ext(plainText);
}
break;
diff --git a/src/gausskernel/cbb/utils/partition/partitionkey.cpp b/src/gausskernel/cbb/utils/partition/partitionkey.cpp
index 6239c5f66..b3819c842 100644
--- a/src/gausskernel/cbb/utils/partition/partitionkey.cpp
+++ b/src/gausskernel/cbb/utils/partition/partitionkey.cpp
@@ -50,11 +50,10 @@
* Note :
* Review : xuzhongqing 67238
*/
-#define constIsMaxValue(value) ((value)->ismaxvalue)
-
static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partitionKeyValuesList, ParseState *pstate,
RangeTblEntry *rte);
static void CheckPartitionValuesList(Relation rel, List *subPartitionKeyValuesList);
+static char* ListRowBoundaryGetString(RowExpr* bound, const bool* isTimestamptz);
Datum transformPartitionBoundary(List* bondary, const bool* isTimestamptz)
{
@@ -178,10 +177,17 @@ Datum transformListBoundary(List* bondary, const bool* isTimestamptz)
errno_t rc = 0;
Datum datumValue = (Datum)0;
- Assert(nodeTag(partKeyFld) == T_Const);
+ Assert(nodeTag(partKeyFld) == T_Const || nodeTag(partKeyFld) == T_RowExpr);
maxValueItem = (Const*)partKeyFld;
- if (!constIsMaxValue(maxValueItem)) {
+ if (IsA(partKeyFld, RowExpr)) {
+ /*
+ * Outputs a set of key values in the bounds of a multikey list partition as a cstring array,
+ * and then outputs the array as text. This text will be an element of the boundary array.
+ */
+ maxValue = ListRowBoundaryGetString((RowExpr*)partKeyFld, isTimestamptz);
+ astate = accumArrayResult(astate, CStringGetTextDatum(maxValue), false, TEXTOID, CurrentMemoryContext);
+ } else if (!constIsMaxValue(maxValueItem)) {
/* get outfunc for consttype, excute the corresponding typeout function
* * transform Const->constvalue into string format.
* */
@@ -270,7 +276,7 @@ List* untransformPartitionBoundary(Datum options)
return result;
}
-int partitonKeyCompare(Const** value1, Const** value2, int len)
+int partitonKeyCompare(Const** value1, Const** value2, int len, bool nullEqual)
{
uint8 i = 0;
int compare = 0;
@@ -297,10 +303,19 @@ int partitonKeyCompare(Const** value1, Const** value2, int len)
break;
}
- if (v1->constisnull && v2->constisnull)
+ if (v1->constisnull && v2->constisnull) {
+ /*
+ * List partition key value can be null. In some cases, two null const should be considered equal,
+ * such as when checking list partition boundary values.
+ */
+ if (nullEqual) {
+ compare = 0;
+ continue;
+ }
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value can not be compared with null value.")));
+ }
if (v1->constisnull || v2->constisnull) {
compare = (v1->constisnull) ? 1 : -1;
break;
@@ -469,6 +484,9 @@ bool GetPartitionOidForRTE(RangeTblEntry* rte, RangeVar* relation, ParseState* p
return false;
}
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
/* relation is not partitioned table. */
if (!rte->ispartrel || rte->relkind != RELKIND_RELATION) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE),
@@ -477,10 +495,10 @@ bool GetPartitionOidForRTE(RangeTblEntry* rte, RangeVar* relation, ParseState* p
} else {
/* relation is partitioned table, from clause is partition (partition_name). */
if (PointerIsValid(relation->partitionname)) {
- partitionOid = partitionNameGetPartitionOid(rte->relid,
+ partitionOid = PartitionNameGetPartitionOid(rte->relid,
relation->partitionname,
PART_OBJ_TYPE_TABLE_PARTITION,
- AccessShareLock,
+ NoLock,
true,
false,
NULL,
@@ -522,12 +540,12 @@ static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partiti
listPartDef = makeNode(ListPartitionDefState);
listPartDef->boundary = (List *)copyObject(partitionKeyValuesList);
listPartDef->boundary = transformListPartitionValue(pstate, listPartDef->boundary, false, true);
- listPartDef->boundary = transformIntoTargetType(
- rel->rd_att->attrs, (((ListPartitionMap *)rel->partMap)->partitionKey)->values[0], listPartDef->boundary);
+ listPartDef->boundary = transformConstIntoTargetType(
+ rel->rd_att->attrs, ((ListPartitionMap *)rel->partMap)->partitionKey, listPartDef->boundary);
rte->plist = listPartDef->boundary;
- partitionOid = partitionValuesGetPartitionOid(rel, listPartDef->boundary, AccessShareLock, true, true, false);
+ partitionOid = PartitionValuesGetPartitionOid(rel, listPartDef->boundary, NoLock, true, true, false);
pfree_ext(listPartDef);
} else if (rel->partMap->type == PART_TYPE_HASH) {
@@ -540,7 +558,7 @@ static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partiti
rte->plist = hashPartDef->boundary;
- partitionOid = partitionValuesGetPartitionOid(rel, hashPartDef->boundary, AccessShareLock, true, true, false);
+ partitionOid = PartitionValuesGetPartitionOid(rel, hashPartDef->boundary, NoLock, true, true, false);
pfree_ext(hashPartDef);
} else if (rel->partMap->type == PART_TYPE_RANGE || rel->partMap->type == PART_TYPE_INTERVAL) {
@@ -555,7 +573,7 @@ static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partiti
rte->plist = rangePartDef->boundary;
- partitionOid = partitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessShareLock, true, true, false);
+ partitionOid = PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, NoLock, true, true, false);
pfree_ext(rangePartDef);
} else {
@@ -651,6 +669,9 @@ bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState
return false;
}
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
/* relation is not partitioned table. */
if (!rte->ispartrel || rte->relkind != RELKIND_RELATION || !RelationIsSubPartitioned(rel)) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE),
@@ -659,10 +680,10 @@ bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState
} else {
/* relation is partitioned table, from clause is subpartition (subpartition_name). */
if (PointerIsValid(relation->subpartitionname)) {
- subPartitionOid = partitionNameGetPartitionOid(rte->relid,
+ subPartitionOid = SubPartitionNameGetSubPartitionOid(rte->relid,
relation->subpartitionname,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- AccessShareLock,
+ NoLock,
+ NoLock,
true,
false,
NULL,
@@ -686,13 +707,13 @@ bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState
SplitValuesList(relation->partitionKeyValuesList, &partitionKeyValuesList, &subPartitionKeyValuesList, rel);
partitionOid = GetPartitionOidFromPartitionKeyValuesList(rel, partitionKeyValuesList, pstate, rte);
tmpList = rte->plist;
- Partition part = partitionOpen(rel, partitionOid, AccessShareLock);
+ Partition part = partitionOpen(rel, partitionOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
CheckPartitionValuesList(partRel, subPartitionKeyValuesList);
subPartitionOid =
GetPartitionOidFromPartitionKeyValuesList(partRel, subPartitionKeyValuesList, pstate, rte);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessShareLock);
+ partitionClose(rel, part, NoLock);
rte->plist = list_concat(tmpList, rte->plist);
}
rte->partitionOid = partitionOid;
@@ -715,12 +736,15 @@ void GetPartitionOidListForRTE(RangeTblEntry *rte, RangeVar *relation)
Oid partitionOid;
Oid subpartitionOid;
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
foreach(cell, relation->partitionNameList) {
const char* name = strVal(lfirst(cell));
- partitionOid = partitionNameGetPartitionOid(rte->relid,
+ partitionOid = PartitionNameGetPartitionOid(rte->relid,
name,
PART_OBJ_TYPE_TABLE_PARTITION,
- AccessShareLock,
+ NoLock,
true,
false,
NULL,
@@ -735,10 +759,10 @@ void GetPartitionOidListForRTE(RangeTblEntry *rte, RangeVar *relation)
}
/* name is not a partiton name, try to get oid using it as a subpartition. */
- subpartitionOid = partitionNameGetPartitionOid(rte->relid,
+ subpartitionOid = SubPartitionNameGetSubPartitionOid(rte->relid,
name,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- AccessShareLock,
+ NoLock,
+ NoLock,
true,
false,
NULL,
@@ -757,4 +781,109 @@ void GetPartitionOidListForRTE(RangeTblEntry *rte, RangeVar *relation)
rte->partitionOidList = lappend_oid(rte->partitionOidList, partitionOid);
rte->subpartitionOidList = lappend_oid(rte->subpartitionOidList, subpartitionOid);
}
+}
+
+/* function to check whether two partKey are identical */
+int ConstCompareWithNull(Const *c1, Const *c2)
+{
+ if (constIsNull(c1) && constIsNull(c2)) {
+ return 0;
+ }
+ if (constIsNull(c1) || constIsNull(c2)) {
+ return (c1->constisnull) ? -1 : 1;
+ }
+
+ int compare = -1;
+ constCompare(c1, c2, compare);
+
+ return compare;
+}
+
+int ListPartKeyCompare(PartitionKey* k1, PartitionKey* k2)
+{
+ if (k1->count != k2->count) {
+ return (k1->count < k2->count) ? 1 : -1;
+ }
+ if (constIsMaxValue(k1->values[0]) || constIsMaxValue(k2->values[0])) {
+ if (constIsMaxValue(k1->values[0]) && constIsMaxValue(k2->values[0])) {
+ return 0;
+ } else {
+ return constIsMaxValue(k1->values[0]) ? 1 : -1;
+ }
+ }
+ int res;
+ for (int i = 0; i < k1->count; i++) {
+ res = ConstCompareWithNull(k1->values[i], k2->values[i]);
+ if (res != 0) {
+ return res;
+ }
+ }
+ return 0;
+}
+
+static char* ConstBondaryGetString(Const* con, bool isTimestamptz)
+{
+ char* result;
+ int16 typlen = 0;
+ bool typbyval = false;
+ char typalign;
+ char typdelim;
+ Oid typioparam = InvalidOid;
+ Oid outfunc = InvalidOid;
+
+ /*
+ * get outfunc for consttype, excute the corresponding typeout
+ * function transform Const->constvalue into string format.
+ */
+ get_type_io_data(con->consttype,
+ IOFunc_output,
+ &typlen,
+ &typbyval,
+ &typalign,
+ &typdelim,
+ &typioparam,
+ &outfunc);
+ result = DatumGetCString(OidFunctionCall1Coll(outfunc, con->constcollid, con->constvalue));
+
+ if (isTimestamptz) {
+ int tmp = u_sess->time_cxt.DateStyle;
+ u_sess->time_cxt.DateStyle = USE_ISO_DATES;
+ Datum datumValue = DirectFunctionCall3(
+ timestamptz_in, CStringGetDatum(result), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1));
+ pfree_ext(result);
+ result = DatumGetCString(DirectFunctionCall1(timestamptz_out, datumValue));
+ u_sess->time_cxt.DateStyle = tmp;
+ }
+
+ return result;
+}
+
+static char* ListRowBoundaryGetString(RowExpr* bound, const bool* isTimestamptz)
+{
+ ArrayBuildState* astate = NULL;
+ ListCell* cell = NULL;
+ Node* item = NULL;
+ char* outValue = NULL;
+ int partKeyIdx = 0;
+ FmgrInfo flinfo;
+ Datum result;
+
+ foreach (cell, bound->args) {
+ item = (Node*)lfirst(cell);
+ Assert(IsA(item, Const));
+ if (constIsNull((Const*)item)) { /* const in RowExpr may be NULL but will not be DEFAULT */
+ astate = accumArrayResult(astate, (Datum)NULL, true, CSTRINGOID, CurrentMemoryContext);
+ } else {
+ outValue = ConstBondaryGetString((Const*)item, isTimestamptz[partKeyIdx]);
+ astate = accumArrayResult(astate, CStringGetDatum(outValue), false, CSTRINGOID, CurrentMemoryContext);
+ }
+ }
+ result = makeArrayResult(astate, CurrentMemoryContext);
+ /* output array string */
+ errno_t rc = memset_s(&flinfo, sizeof(FmgrInfo), 0, sizeof(FmgrInfo));
+ securec_check(rc, "\0", "\0");
+ flinfo.fn_mcxt = CurrentMemoryContext;
+ flinfo.fn_addr = array_out;
+ result = FunctionCall1(&flinfo, result);
+ return DatumGetCString(result);
}
\ No newline at end of file
diff --git a/src/gausskernel/cbb/utils/partition/partitionlocate.cpp b/src/gausskernel/cbb/utils/partition/partitionlocate.cpp
index a3785b597..252227259 100755
--- a/src/gausskernel/cbb/utils/partition/partitionlocate.cpp
+++ b/src/gausskernel/cbb/utils/partition/partitionlocate.cpp
@@ -33,23 +33,25 @@
#include "utils/partitionkey.h"
bool isPartKeyValuesInListPartition(
- const ListPartitionMap *partMap, Const **partKeyValues, const int partkeyColumnNum, const int partSeq)
+ ListPartitionMap *partMap, Const **partKeyValues, const int partkeyColumnNum, const int partSeq)
{
Assert(partMap && partKeyValues);
Assert(partkeyColumnNum == partMap->partitionKey->dim1);
- Const *v1 = *(partKeyValues);
- Const **boundary = partMap->listElements[partSeq].boundary;
- int len = partMap->listElements[partSeq].len;
- for (int i = 0; i < len; i++) {
- Const *v2 = *((boundary) + i);
- int compare = 0;
- constCompare(v1, v2, compare);
- if (compare == 0) {
- return true;
- }
+ int sourcePartSeq = -1;
+ Oid sourceOid = getListPartitionOid(&partMap->type, partKeyValues, partkeyColumnNum, &sourcePartSeq, true);
+ if (sourcePartSeq < 0) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("Can't find list partition oid when checking tuple is in the partition.")));
+ }
+
+ Oid targetOid = partMap->listElements[partSeq].partitionOid;
+ if (sourceOid == targetOid) {
+ return true;
+ } else {
+ return false;
}
- return false;
}
bool isPartKeyValuesInHashPartition(Relation partTableRel, const HashPartitionMap *partMap, Const **partKeyValues,
diff --git a/src/gausskernel/cbb/utils/partition/partitionmap.cpp b/src/gausskernel/cbb/utils/partition/partitionmap.cpp
old mode 100644
new mode 100755
index b9e7a9cfa..6ff967727
--- a/src/gausskernel/cbb/utils/partition/partitionmap.cpp
+++ b/src/gausskernel/cbb/utils/partition/partitionmap.cpp
@@ -88,8 +88,6 @@
#define Int2VectorSize(n) (offsetof(int2vector, values) + (n) * sizeof(int2))
-#define constIsMaxValue(value) ((value)->ismaxvalue)
-
#define int_cmp_partition(arg1, arg2, compare) \
do { \
if ((arg1) < (arg2)) { \
@@ -333,59 +331,62 @@ void constCompare(Const* value1, Const* value2, int& compare)
}
}
-#define BuildRangeElement(range, type, typelen, relid, attrno, tuple, desc, isInter) \
- do { \
- Assert(PointerIsValid(range)); \
- Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
- Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
- Assert((attrno)->dim1 <= RANGE_PARTKEYMAXNUM); \
- Assert((attrno)->dim1 == (typelen)); \
- unserializePartitionStringAttribute((range)->boundary, \
- RANGE_PARTKEYMAXNUM, \
- (type), \
- (typelen), \
- (relid), \
- (attrno), \
- (tuple), \
- Anum_pg_partition_boundaries, \
- (desc)); \
- (range)->partitionOid = HeapTupleGetOid(tuple); \
- (range)->len = (typelen); \
- (range)->isInterval = (isInter); \
+#define BuildRangeElement(range, type, typelen, relid, attrno, tuple, partitionno, desc, isInter) \
+ do { \
+ Assert(PointerIsValid(range)); \
+ Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
+ Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
+ Assert((attrno)->dim1 <= RANGE_PARTKEYMAXNUM); \
+ Assert((attrno)->dim1 == (typelen)); \
+ unserializePartitionStringAttribute((range)->boundary, \
+ RANGE_PARTKEYMAXNUM, \
+ (type), \
+ (typelen), \
+ (relid), \
+ (attrno), \
+ (tuple), \
+ Anum_pg_partition_boundaries, \
+ (desc)); \
+ (range)->partitionOid = HeapTupleGetOid(tuple); \
+ (range)->partitionno = (partitionno); \
+ (range)->len = (typelen); \
+ (range)->isInterval = (isInter); \
} while (0)
-#define buildListElement(range, type, typelen, relid, attrno, tuple, desc) \
- do { \
- Assert(PointerIsValid(range)); \
- Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
- Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
- Assert((attrno)->dim1 == (typelen)); \
- unserializeListPartitionAttribute(&((range)->len), \
- &((range)->boundary), \
- (type), \
- (typelen), \
- (relid), \
- (attrno), \
- (tuple), \
- Anum_pg_partition_boundaries, \
- (desc)); \
- (range)->partitionOid = HeapTupleGetOid(tuple); \
+#define buildListElement(range, type, typelen, relid, attrno, tuple, partitionno, desc) \
+ do { \
+ Assert(PointerIsValid(range)); \
+ Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
+ Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
+ Assert((attrno)->dim1 == (typelen)); \
+ unserializeListPartitionAttribute(&((range)->len), \
+ &((range)->boundary), \
+ (type), \
+ (typelen), \
+ (relid), \
+ (attrno), \
+ (tuple), \
+ Anum_pg_partition_boundaries, \
+ (desc)); \
+ (range)->partitionOid = HeapTupleGetOid(tuple); \
+ (range)->partitionno = (partitionno); \
} while (0)
-#define buildHashElement(range, type, typelen, relid, attrno, tuple, desc) \
- do { \
- Assert(PointerIsValid(range)); \
- Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
- Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
- Assert((attrno)->dim1 == (typelen)); \
- unserializeHashPartitionAttribute((range)->boundary, \
- RANGE_PARTKEYMAXNUM, \
- (relid), \
- (attrno), \
- (tuple), \
- Anum_pg_partition_boundaries, \
- (desc)); \
- (range)->partitionOid = HeapTupleGetOid(tuple); \
+#define buildHashElement(range, type, typelen, relid, attrno, tuple, partitionno, desc) \
+ do { \
+ Assert(PointerIsValid(range)); \
+ Assert(PointerIsValid(type) && PointerIsValid(attrno)); \
+ Assert(PointerIsValid(tuple) && PointerIsValid(desc)); \
+ Assert((attrno)->dim1 == (typelen)); \
+ unserializeHashPartitionAttribute((range)->boundary, \
+ RANGE_PARTKEYMAXNUM, \
+ (relid), \
+ (attrno), \
+ (tuple), \
+ Anum_pg_partition_boundaries, \
+ (desc)); \
+ (range)->partitionOid = HeapTupleGetOid(tuple); \
+ (range)->partitionno = (partitionno); \
} while (0)
static void RebuildListPartitionMap(ListPartitionMap* oldMap, ListPartitionMap* newMap);
@@ -406,6 +407,52 @@ static bool EqualListPartElement(const ListPartElement element1, const ListPartE
static bool EqualHashPartitonMap(const HashPartitionMap* partMap1, const HashPartitionMap* partMap2);
static bool EqualHashPartElement(const HashPartElement element1, const HashPartElement element2);
+static Const* MakeListPartitionBoundaryConst(Oid relid, AttrNumber attrnum, Oid type, Datum strvalue, bool isnull,
+ bool null_as_default)
+{
+ int16 typlen = 0;
+ bool typbyval = false;
+ char typalign;
+ char typdelim;
+ Oid typioparam = InvalidOid;
+ Oid func = InvalidOid;
+ Oid typid = InvalidOid;
+ Oid typelem = InvalidOid;
+ Oid typcollation = InvalidOid;
+ int32 typmod = -1;
+ Datum value;
+
+ /* get the oid/mod/collation/ of column i */
+ get_atttypetypmodcoll(relid, attrnum, &typid, &typmod, &typcollation);
+
+ /* deal with null */
+ if (isnull) {
+ if (null_as_default) {
+ return makeMaxConst(typid, typmod, typcollation);
+ } else {
+ return makeNullConst(typid, typmod, typcollation);
+ }
+ }
+
+ /*
+ * for interval column in pg_partition, typmod above is not correct
+ * have to get typmod from 'intervalvalue(typmod)'
+ */
+ if (INTERVALOID == type) {
+ typmod = -1;
+ }
+
+ /* get the typein function's oid of current type */
+ get_type_io_data(type, IOFunc_input, &typlen, &typbyval, &typalign, &typdelim, &typioparam, &func);
+ typelem = get_element_type(type);
+
+ /* now call the typein function with collation,string, element_type, typemod
+ * as it's parameters.
+ */
+ value = OidFunctionCall3Coll(func, typcollation, strvalue, ObjectIdGetDatum(typelem), Int32GetDatum(typmod));
+ /* save the output values */
+ return makeConst(typid, typmod, typcollation, typlen, value, false, typbyval);
+}
/*
* @@GaussDB@@
* Brief :
@@ -491,7 +538,7 @@ void unserializePartitionStringAttribute(Const** outMaxValue, int outMaxValueLen
list_free_ext(boundary);
}
-void unserializeListPartitionAttribute(int *len, Const*** listValues, Oid* partKeyDataType,
+void unserializeListPartitionAttribute(int *len, PartitionKey** listValues, Oid* partKeyDataType,
int partKeyDataTypeLen, Oid relid, int2vector* partKeyAttrNo, HeapTuple partition_tuple, int att_num,
TupleDesc pg_partition_tupledsc)
{
@@ -500,6 +547,11 @@ void unserializeListPartitionAttribute(int *len, Const*** listValues, Oid* partK
List* boundary = NULL;
ListCell* cell = NULL;
int counter = 0;
+ FmgrInfo flinfo;
+ errno_t rc = memset_s(&flinfo, sizeof(FmgrInfo), 0, sizeof(FmgrInfo));
+ securec_check(rc, "\0", "\0");
+ flinfo.fn_mcxt = CurrentMemoryContext;
+ flinfo.fn_addr = array_in;
Assert(partKeyDataTypeLen == partKeyAttrNo->dim1);
@@ -515,57 +567,40 @@ void unserializeListPartitionAttribute(int *len, Const*** listValues, Oid* partK
boundary = untransformPartitionBoundary(attribute_raw_value);
*len = list_length(boundary);
- *listValues = (Const**)palloc0(sizeof(Const*) * (*len));
- Const** values = *listValues;
+ *listValues = (PartitionKey*)palloc0(sizeof(PartitionKey) * (*len));
+ PartitionKey* partKey = *listValues;
/* Now, for each max value item, call it's typin function, save it in datum */
counter = 0;
foreach (cell, boundary) {
- int16 typlen = 0;
- bool typbyval = false;
- char typalign;
- char typdelim;
- Oid typioparam = InvalidOid;
- Oid func = InvalidOid;
- Oid typid = InvalidOid;
- Oid typelem = InvalidOid;
- Oid typcollation = InvalidOid;
- int32 typmod = -1;
- Datum value;
- Value* list_value = NULL;
- int key_count = 0;
+ Const** listkeys = NULL;
+ Value* list_value = (Value*)lfirst(cell);
- list_value = (Value*)lfirst(cell);
-
- /* get the oid/mod/collation/ of column i */
- get_atttypetypmodcoll(relid, partKeyAttrNo->values[key_count], &typid, &typmod, &typcollation);
-
- /* deal with null */
- if (!PointerIsValid(list_value->val.str)) {
- values[counter++] = makeMaxConst(typid, typmod, typcollation);
- continue;
+ if (partKeyAttrNo->dim1 == 1 || !PointerIsValid(list_value->val.str)) {
+ /* Single-key partition or default partition */
+ listkeys = (Const**)palloc0(sizeof(Const*));
+ listkeys[0] = MakeListPartitionBoundaryConst(relid, partKeyAttrNo->values[0], partKeyDataType[0],
+ CStringGetDatum(list_value->val.str), !PointerIsValid(list_value->val.str), true);
+ partKey[counter].count = 1;
+ } else {
+ /* Multi-keys partition */
+ int keycount = 0;
+ Datum* keyvalues = NULL;
+ bool* keyisnull = NULL;
+ Datum value = FunctionCall3(&flinfo, CStringGetDatum(list_value->val.str), CSTRINGOID, Int32GetDatum(-1));
+ deconstruct_array(DatumGetArrayTypeP(value), CSTRINGOID, -2, false, 'c', &keyvalues, &keyisnull, &keycount);
+ Assert(keycount == partKeyAttrNo->dim1);
+ listkeys = (Const**)palloc0(sizeof(Const*) * keycount);
+ for (int i = 0; i < keycount; i++) {
+ listkeys[i] = MakeListPartitionBoundaryConst(
+ relid, partKeyAttrNo->values[i], partKeyDataType[i], keyvalues[i], keyisnull[i], false);
+ }
+ pfree_ext(keyvalues);
+ pfree_ext(keyisnull);
+ pfree(DatumGetArrayTypeP(value));
+ partKey[counter].count = keycount;
}
-
- /*
- * for interval column in pg_partition, typmod above is not correct
- * have to get typmod from 'intervalvalue(typmod)'
- */
- if (INTERVALOID == partKeyDataType[key_count]) {
- typmod = -1;
- }
-
- /* get the typein function's oid of current type */
- get_type_io_data(
- partKeyDataType[key_count], IOFunc_input, &typlen, &typbyval, &typalign, &typdelim, &typioparam, &func);
- typelem = get_element_type(partKeyDataType[key_count]);
-
- /* now call the typein function with collation,string, element_type, typemod
- * as it's parameters.
- */
- value = OidFunctionCall3Coll(
- func, typcollation, CStringGetDatum(list_value->val.str), ObjectIdGetDatum(typelem), Int32GetDatum(typmod));
- /* save the output values */
- values[counter++] = makeConst(typid, typmod, typcollation, typlen, value, false, typbyval);
+ partKey[counter++].values = listkeys;
}
list_free_ext(boundary);
}
@@ -1091,9 +1126,14 @@ static bool EqualListPartElement(const ListPartElement element1, const ListPartE
return false;
}
for (int i = 0; i < element1.len; i++) {
- if (!equal(element1.boundary[i], element2.boundary[i])) {
+ if (element1.boundary[i].count != element2.boundary[i].count) {
return false;
}
+ for (int j = 0; j < element1.boundary[i].count; j++) {
+ if (!equal(element1.boundary[i].values[j], element2.boundary[i].values[j])) {
+ return false;
+ }
+ }
}
return true;
@@ -1146,59 +1186,49 @@ static bool EqualHashPartElement(const HashPartElement element1, const HashPartE
return true;
}
-#define PARTITIONMAP_SWAPFIELD(fieldType, fieldName) \
- do { \
- fieldType _temp = oldMap->fieldName; \
- oldMap->fieldName = newMap->fieldName; \
- newMap->fieldName = _temp; \
- } while (0);
-
void RebuildRangePartitionMap(RangePartitionMap* oldMap, RangePartitionMap* newMap)
{
RangePartitionMap tempMap;
errno_t rc = 0;
-
+ /*
+ * Only the partMap memory need to be swapped. The pointers on the partMap do not need to be swapped deeply.
+ */
rc = memcpy_s(&tempMap, sizeof(RangePartitionMap), newMap, sizeof(RangePartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(newMap, sizeof(RangePartitionMap), oldMap, sizeof(RangePartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(oldMap, sizeof(RangePartitionMap), &tempMap, sizeof(RangePartitionMap));
securec_check(rc, "\0", "\0");
-
- PARTITIONMAP_SWAPFIELD(int2vector*, partitionKey);
- PARTITIONMAP_SWAPFIELD(Oid*, partitionKeyDataType);
}
static void RebuildHashPartitionMap(HashPartitionMap* oldMap, HashPartitionMap* newMap)
{
HashPartitionMap tempMap;
errno_t rc = 0;
-
+ /*
+ * Only the partMap memory need to be swapped. The pointers on the partMap do not need to be swapped deeply.
+ */
rc = memcpy_s(&tempMap, sizeof(HashPartitionMap), newMap, sizeof(HashPartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(newMap, sizeof(HashPartitionMap), oldMap, sizeof(HashPartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(oldMap, sizeof(HashPartitionMap), &tempMap, sizeof(HashPartitionMap));
securec_check(rc, "\0", "\0");
-
- PARTITIONMAP_SWAPFIELD(int2vector*, partitionKey);
- PARTITIONMAP_SWAPFIELD(Oid*, partitionKeyDataType);
}
static void RebuildListPartitionMap(ListPartitionMap* oldMap, ListPartitionMap* newMap)
{
ListPartitionMap tempMap;
errno_t rc = 0;
-
+ /*
+ * Only the partMap memory need to be swapped. The pointers on the partMap do not need to be swapped deeply.
+ */
rc = memcpy_s(&tempMap, sizeof(ListPartitionMap), newMap, sizeof(ListPartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(newMap, sizeof(ListPartitionMap), oldMap, sizeof(ListPartitionMap));
securec_check(rc, "\0", "\0");
rc = memcpy_s(oldMap, sizeof(ListPartitionMap), &tempMap, sizeof(ListPartitionMap));
securec_check(rc, "\0", "\0");
-
- PARTITIONMAP_SWAPFIELD(int2vector*, partitionKey);
- PARTITIONMAP_SWAPFIELD(Oid*, partitionKeyDataType);
}
ListPartElement* CopyListElements(ListPartElement* src, int elementNum)
@@ -1208,15 +1238,21 @@ ListPartElement* CopyListElements(ListPartElement* src, int elementNum)
Size sizeRet = sizeof(ListPartElement) * elementNum;
ListPartElement* ret = NULL;
errno_t rc = 0;
+ int keyCount;
ret = (ListPartElement*)palloc0(sizeRet);
rc = memcpy_s(ret, sizeRet, src, sizeRet);
securec_check(rc, "\0", "\0");
for (i = 0; i < elementNum; i++) {
- ret[i].boundary = (Const**)palloc0(sizeof(Const*)*src[i].len);
+ ret[i].boundary = (PartitionKey*)palloc0(sizeof(PartitionKey) * src[i].len);
for (j = 0; j < src[i].len; j++) {
- ret[i].boundary[j] = (Const*)copyObject(src[i].boundary[j]);
+ keyCount = src[i].boundary[j].count;
+ ret[i].boundary[j].count = keyCount;
+ ret[i].boundary[j].values = (Const**)palloc0(sizeof(Const*) * keyCount);
+ for (int k = 0; k < keyCount; k++) {
+ ret[i].boundary[j].values[k] = (Const*)copyObject(src[i].boundary[j].values[k]);
+ }
}
}
@@ -1233,16 +1269,19 @@ void DestroyListElements(ListPartElement* src, int elementNum)
for (i = 0; i < elementNum; i++) {
part = &(src[i]);
for (j = 0; j < part->len; j++) {
- value = part->boundary[j];
- if (PointerIsValid(value)) {
- if (!value->constbyval && !value->constisnull &&
- PointerIsValid(DatumGetPointer(value->constvalue))) {
- pfree(DatumGetPointer(value->constvalue));
- }
+ for (int k = 0; k < part->boundary[j].count; k++) {
+ value = part->boundary[j].values[k];
+ if (PointerIsValid(value)) {
+ if (!value->constbyval && !value->constisnull &&
+ PointerIsValid(DatumGetPointer(value->constvalue))) {
+ pfree(DatumGetPointer(value->constvalue));
+ }
- pfree_ext(value);
- value = NULL;
+ pfree_ext(value);
+ value = NULL;
+ }
}
+ pfree_ext(part->boundary[j].values);
}
pfree_ext(part->boundary);
}
@@ -1339,7 +1378,7 @@ void DestroyPartitionMap(PartitionMap* partMap)
if (range_map->rangeElements) {
partitionMapDestroyRangeArray(range_map->rangeElements, range_map->rangeElementsNum);
}
- } else if (partMap->type == PART_TYPE_LIST) {
+ } else if (partMap->type == PART_TYPE_LIST) {
ListPartitionMap* list_map = (ListPartitionMap*)(partMap);
if (list_map->partitionKey) {
pfree_ext(list_map->partitionKey);
@@ -1510,8 +1549,8 @@ static char* CheckPartExprKey(HeapTuple partitioned_tuple, Relation pg_partition
return partkeystr;
}
-static void BuildElementForPartKeyExpr(void* element, HeapTuple partTuple, TupleDesc pgPartitionTupledsc,
- char* partkeystr, char partstrategy)
+static void BuildElementForPartKeyExpr(void* element, HeapTuple partTuple, int partitionno,
+ TupleDesc pgPartitionTupledsc, char* partkeystr, char partstrategy)
{
RangeElement* rangeEle = NULL;
ListPartElement* listEle = NULL;
@@ -1539,14 +1578,17 @@ static void BuildElementForPartKeyExpr(void* element, HeapTuple partTuple, Tuple
rangeEle->isInterval = false;
rangeEle->len = 1;
rangeEle->partitionOid = HeapTupleGetOid(partTuple);
+ rangeEle->partitionno = partitionno;
} else if (PART_STRATEGY_LIST == partstrategy) {
listEle = (ListPartElement*)element;
listEle->len = list_length(boundary);
- listEle->boundary = (Const**)palloc0(sizeof(Const*) * listEle->len);
+ listEle->boundary = (PartitionKey*)palloc0(sizeof(PartitionKey) * listEle->len);
listEle->partitionOid = HeapTupleGetOid(partTuple);
+ listEle->partitionno = partitionno;
} else if (PART_STRATEGY_HASH == partstrategy) {
hashEle = (HashPartElement*)element;
hashEle->partitionOid = HeapTupleGetOid(partTuple);
+ hashEle->partitionno = partitionno;
} else {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("The partstrategy %c is not supported", partstrategy)));
}
@@ -1584,10 +1626,14 @@ static void BuildElementForPartKeyExpr(void* element, HeapTuple partTuple, Tuple
max_value = (Value*)lfirst(cell);
/* deal with null */
if (!PointerIsValid(max_value->val.str) && (PART_STRATEGY_HASH != partstrategy)) {
- if (PART_STRATEGY_RANGE == partstrategy)
+ if (PART_STRATEGY_RANGE == partstrategy) {
rangeEle->boundary[counter++] = makeMaxConst(typoid, typmod, typcollation);
- else
- listEle->boundary[counter++] = makeMaxConst(typoid, typmod, typcollation);
+ } else {
+ listEle->boundary[counter].count = 1; /* There is only one expression partkey. */
+ listEle->boundary[counter].values = (Const**)palloc0(sizeof(Const*));
+ listEle->boundary[counter].values[0] = makeMaxConst(typoid, typmod, typcollation);
+ counter++;
+ }
continue;
}
@@ -1599,9 +1645,13 @@ static void BuildElementForPartKeyExpr(void* element, HeapTuple partTuple, Tuple
/* save the output values */
if (PART_STRATEGY_RANGE == partstrategy)
rangeEle->boundary[counter++] = makeConst(typoid, typmod, typcollation, typlen, value, false, typbyval);
- else if (PART_STRATEGY_LIST == partstrategy)
- listEle->boundary[counter++] = makeConst(typoid, typmod, typcollation, typlen, value, false, typbyval);
- else
+ else if (PART_STRATEGY_LIST == partstrategy) {
+ listEle->boundary[counter].count = 1; /* There is only one expression partkey. */
+ listEle->boundary[counter].values = (Const**)palloc0(sizeof(Const*));
+ listEle->boundary[counter].values[0] =
+ makeConst(typoid, typmod, typcollation, typlen, value, false, typbyval);
+ counter++;
+ } else
hashEle->boundary[counter++] = makeConst(typoid, -1, InvalidOid, sizeof(int32), value, false, true);
}
list_free_ext(boundary);
@@ -1672,6 +1722,25 @@ static void BuildListPartitionMap(Relation relation, Form_pg_partition partition
errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
errdetail("Incorrect partition strategy for partition %u", HeapTupleGetOid(partition_tuple))));
}
+
+ bool isNull;
+ Datum datum = heap_getattr(partition_tuple,
+ RelationIsPartitionOfSubPartitionTable(relation) ? Anum_pg_partition_subpartitionno :
+ Anum_pg_partition_partitionno,
+ RelationGetDescr(pg_partition),
+ &isNull);
+ int partitionno = INVALID_PARTITION_NO;
+ if (!isNull) {
+ partitionno = DatumGetInt32(datum);
+ if (partitionno <= 0) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
+ errdetail("Incorrect partitionno %d for partition %u", partitionno,
+ HeapTupleGetOid(partition_tuple))));
+ }
+ }
+ PARTITIONNO_VALID_ASSERT(partitionno);
+
if (!partkeystr || (pg_strcasecmp(partkeystr, "") == 0)) {
buildListElement(&(list_eles[list_itr]),
list_map->partitionKeyDataType,
@@ -1679,10 +1748,11 @@ static void BuildListPartitionMap(Relation relation, Form_pg_partition partition
rootPartitionOid,
list_map->partitionKey,
partition_tuple,
+ partitionno,
RelationGetDescr(pg_partition));
} else {
- BuildElementForPartKeyExpr(&(list_eles[list_itr]), partition_tuple, RelationGetDescr(pg_partition),
- partkeystr, PART_STRATEGY_LIST);
+ BuildElementForPartKeyExpr(&(list_eles[list_itr]), partition_tuple, partitionno,
+ RelationGetDescr(pg_partition), partkeystr, PART_STRATEGY_LIST);
}
list_itr++;
@@ -1718,7 +1788,7 @@ bool CheckHashPartitionMap(HashPartElement* hash_eles, int len)
}
// Constvalue must be in reverse order due to design issues.
for (int i = 0; i < len; i++) {
- if (DatumGetInt32(hash_eles[len - 1 - i].boundary[0]->constvalue) != i) {
+ if (DatumGetInt32(hash_eles[i].boundary[0]->constvalue) != i) {
return false;
}
}
@@ -1788,6 +1858,25 @@ static void BuildHashPartitionMap(Relation relation, Form_pg_partition partition
errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
errdetail("Incorrect partition strategy for partition %u", HeapTupleGetOid(partition_tuple))));
}
+
+ bool isNull;
+ Datum datum = heap_getattr(partition_tuple,
+ RelationIsPartitionOfSubPartitionTable(relation) ? Anum_pg_partition_subpartitionno :
+ Anum_pg_partition_partitionno,
+ RelationGetDescr(pg_partition),
+ &isNull);
+ int partitionno = INVALID_PARTITION_NO;
+ if (!isNull) {
+ partitionno = DatumGetInt32(datum);
+ if (partitionno <= 0) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
+ errdetail("Incorrect partitionno %d for partition %u", partitionno,
+ HeapTupleGetOid(partition_tuple))));
+ }
+ }
+ PARTITIONNO_VALID_ASSERT(partitionno);
+
if (!partkeystr || (pg_strcasecmp(partkeystr, "") == 0)) {
buildHashElement(&(hash_eles[hash_itr]),
hash_map->partitionKeyDataType,
@@ -1795,10 +1884,11 @@ static void BuildHashPartitionMap(Relation relation, Form_pg_partition partition
rootPartitionOid,
hash_map->partitionKey,
partition_tuple,
+ partitionno,
RelationGetDescr(pg_partition));
} else {
- BuildElementForPartKeyExpr(&(hash_eles[hash_itr]), partition_tuple, RelationGetDescr(pg_partition),
- partkeystr, PART_STRATEGY_HASH);
+ BuildElementForPartKeyExpr(&(hash_eles[hash_itr]), partition_tuple, partitionno,
+ RelationGetDescr(pg_partition), partkeystr, PART_STRATEGY_HASH);
}
hash_itr++;
@@ -1898,6 +1988,25 @@ static void buildRangePartitionMap(Relation relation, Form_pg_partition partitio
errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
errdetail("Incorrect partition strategy for partition %u", HeapTupleGetOid(partition_tuple))));
}
+
+ bool isNull;
+ Datum datum = heap_getattr(partition_tuple,
+ RelationIsPartitionOfSubPartitionTable(relation) ? Anum_pg_partition_subpartitionno :
+ Anum_pg_partition_partitionno,
+ RelationGetDescr(pg_partition),
+ &isNull);
+ int partitionno = INVALID_PARTITION_NO;
+ if (!isNull) {
+ partitionno = DatumGetInt32(datum);
+ if (partitionno <= 0) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("Fail to build partitionmap for partitioned table \"%u\"", partition_form->parentid),
+ errdetail("Incorrect partitionno %d for partition %u", partitionno,
+ HeapTupleGetOid(partition_tuple))));
+ }
+ }
+ PARTITIONNO_VALID_ASSERT(partitionno);
+
if (!partkeystr || (pg_strcasecmp(partkeystr, "") == 0)) {
BuildRangeElement(&(range_eles[range_itr]),
range_map->partitionKeyDataType,
@@ -1905,11 +2014,12 @@ static void buildRangePartitionMap(Relation relation, Form_pg_partition partitio
rootPartitionOid,
range_map->partitionKey,
partition_tuple,
+ partitionno,
RelationGetDescr(pg_partition),
partition_form->partstrategy == PART_STRATEGY_INTERVAL);
} else {
- BuildElementForPartKeyExpr(&(range_eles[range_itr]), partition_tuple, RelationGetDescr(pg_partition),
- partkeystr, PART_STRATEGY_RANGE);
+ BuildElementForPartKeyExpr(&(range_eles[range_itr]), partition_tuple, partitionno,
+ RelationGetDescr(pg_partition), partkeystr, PART_STRATEGY_RANGE);
}
range_itr++;
@@ -1935,7 +2045,7 @@ Const* transformDatum2ConstForPartKeyExpr(PartitionMap* partMap, Datum datumValu
if (partMap->type == PART_TYPE_RANGE)
boundary = ((RangePartitionMap*)partMap)->rangeElements[0].boundary[0];
else if (partMap->type == PART_TYPE_LIST)
- boundary = ((ListPartitionMap*)partMap)->listElements[0].boundary[0];
+ boundary = ((ListPartitionMap*)partMap)->listElements[0].boundary[0].values[0];
else if (partMap->type == PART_TYPE_HASH)
boundary = ((HashPartitionMap*)partMap)->hashElements[0].boundary[0];
else
@@ -2040,11 +2150,24 @@ List* getListPartitionBoundaryList(Relation rel, int sequence)
incre_partmap_refcount(rel->partMap);
if (sequence >= 0 && sequence < partMap->listElementsNum) {
int i = 0;
- Const** srcBound = partMap->listElements[sequence].boundary;
+ PartitionKey* partKeys = partMap->listElements[sequence].boundary;
int len = partMap->listElements[sequence].len;
for (i = 0; i < len; i++) {
- result = lappend(result, (Const*)copyObject(srcBound[i]));
+ if (partKeys[i].count == 1 || constIsMaxValue(partKeys[i].values[0])) {
+ /* Single-key partition or default partition */
+ result = lappend(result, (Const*)copyObject(partKeys[i].values[0]));
+ continue;
+ }
+ RowExpr* boundRow = makeNode(RowExpr);
+ boundRow->row_typeid = InvalidOid; /* not analyzed yet */
+ boundRow->colnames = NIL; /* to be filled in during analysis */
+ boundRow->row_format = COERCE_IMPLICIT_CAST; /* abuse */
+ boundRow->location = 0;
+ for (int j = 0; j < partKeys[i].count; j++) {
+ boundRow->args = lappend(boundRow->args, (Const*)copyObject(partKeys[i].values[j]));
+ }
+ result = lappend(result, boundRow);
}
} else {
decre_partmap_refcount(rel->partMap);
@@ -2192,14 +2315,13 @@ Oid getRangePartitionOid(PartitionMap *partitionmap, Const** partKeyValue, int32
return result;
}
-Oid getListPartitionOid(PartitionMap* partMap, Const** partKeyValue, int32* partSeq, bool topClosed)
+Oid getListPartitionOid(PartitionMap* partMap, Const** partKeyValue, int partKeyCount, int32* partSeq, bool topClosed)
{
ListPartitionMap* listPartMap = NULL;
Oid result = InvalidOid;
- int keyNums = 0;
int hit = -1;
- int compare = 0;
- Const** boundary = NULL;
+ PartitionKey partKey;
+ PartitionKey* boundary = NULL;
Oid defaultPartitionOid = InvalidOid;
bool existDefaultPartition = false;
int defaultPartitionHit = -1;
@@ -2209,21 +2331,21 @@ Oid getListPartitionOid(PartitionMap* partMap, Const** partKeyValue, int32* part
incre_partmap_refcount(partMap);
listPartMap = (ListPartitionMap*)(partMap);
- keyNums = listPartMap->partitionKey->dim1;
+ partKey.values = partKeyValue;
+ partKey.count = partKeyCount;
int i = 0;
while (i < listPartMap->listElementsNum && hit < 0) {
boundary = listPartMap->listElements[i].boundary;
int list_len = listPartMap->listElements[i].len;
- if (list_len == 1 && ((Const*)boundary[0])->ismaxvalue) {
+ if (list_len == 1 && ((Const*)boundary[0].values[0])->ismaxvalue) {
defaultPartitionOid = listPartMap->listElements[i].partitionOid;
existDefaultPartition = true;
defaultPartitionHit = i;
}
int j = 0;
while (j < list_len) {
- partitonKeyCompareForRouting(partKeyValue, boundary + j, (uint32)keyNums, compare);
- if (compare == 0) {
+ if (ListPartKeyCompare(&partKey, &boundary[j]) == 0) {
hit = i;
break;
}
@@ -2278,6 +2400,7 @@ Oid getHashPartitionOid(PartitionMap* partMap, Const** partKeyValue, int32* part
}
hit = hash_value % (uint32)(hashPartMap->hashElementsNum);
+ hit = hashPartMap->hashElementsNum - hit - 1;
if (PointerIsValid(partSeq)) {
*partSeq = hit;
@@ -2330,15 +2453,6 @@ static Const* CalcLowBoundary(const Const* upBoundary, Interval* intervalValue)
void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation rel, Oid partOid,
Relation *fakeRelation, Partition *partition, LOCKMODE lmode)
{
- if (!OidIsValid(partOid)) {
- ereport(ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open partition with OID %u", partOid),
- errdetail("Check whether DDL operations exist on the current partition in the table %s, like "
- "drop/exchange/split/merge partition",
- RelationGetRelationName(rel)),
- errcause("If there is a DDL operation, the cause is incorrect operation. Otherwise, it is a system error."),
- erraction("Wait for DDL operation to complete or Contact engineer to support.")));
- }
-
PartRelIdCacheKey _key = {partOid, -1};
Relation partParentRel = rel;
if (PointerIsValid(*partition)) {
@@ -2740,7 +2854,7 @@ int ListElementCmp(const void* a, const void* b)
const ListPartElement* reb = (const ListPartElement*)b;
/* just compare the first boundary */
- return partitonKeyCompare((Const**)rea->boundary, (Const**)reb->boundary, 1);
+ return ListPartKeyCompare(&rea->boundary[0], &reb->boundary[0]);
}
int HashElementCmp(const void* a, const void* b)
@@ -2751,9 +2865,9 @@ int HashElementCmp(const void* a, const void* b)
int32 constvalue1 = DatumGetInt32((Const*)rea->boundary[0]->constvalue);
int32 constvalue2 = DatumGetInt32((Const*)reb->boundary[0]->constvalue);
if (constvalue1 < constvalue2) {
- return 1;
- } else if (constvalue1 > constvalue2) {
return -1;
+ } else if (constvalue1 > constvalue2) {
+ return 1;
} else {
return 0;
}
@@ -2790,6 +2904,9 @@ int GetSubPartitionNumber(Relation rel)
int result = getPartitionNumber(map);
Oid partOid = InvalidOid;
int subPartNum = 0;
+
+ AcceptInvalidationMessages();
+
for (int conuter = 0; conuter < result; ++conuter) {
if (map->type == PART_TYPE_LIST) {
partOid = ((ListPartitionMap *)map)->listElements[conuter].partitionOid;
@@ -2798,11 +2915,11 @@ int GetSubPartitionNumber(Relation rel)
} else {
partOid = ((RangePartitionMap *)map)->rangeElements[conuter].partitionOid;
}
- Partition part = partitionOpen(rel, partOid, AccessShareLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
subPartNum += getPartitionNumber(partRel->partMap);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessShareLock);
+ partitionClose(rel, part, NoLock);
}
return subPartNum;
@@ -2890,7 +3007,7 @@ Oid GetNeedDegradToRangePartOid(Relation rel, Oid partOid)
return InvalidOid;
}
-bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Relation rel, Oid partOid,
+bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Relation rel, Oid partOid, int partitionno,
Relation* fakeRelation, Partition* partition, LOCKMODE lmode, bool checkSubPart)
{
PartRelIdCacheKey _key = {partOid, -1};
@@ -2899,18 +3016,29 @@ bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Rel
if (PointerIsValid(*partition)) {
return false;
}
- if (checkSubPart && RelationIsSubPartitioned(rel) && !RelationIsIndex(rel)) {
- Oid parentOid = partid_get_parentid(partOid);
- if (!OidIsValid(parentOid)) {
+
+ Oid parentOid = partid_get_parentid(partOid);
+ if (!OidIsValid(parentOid)) {
+ if (PartitionGetMetadataStatus(partOid, false) != PART_METADATA_INVISIBLE) {
ereport(ERROR,
(errcode(ERRCODE_RELATION_OPEN_ERROR),
errmsg("partition %u does not exist", partOid),
errdetail("this partition may have already been dropped")));
}
+
+ /* this partOid has just been dropped, we try to search the new partOid, if not found, just return */
+ partOid = InvisiblePartidGetNewPartid(partOid);
+ parentOid = partid_get_parentid(partOid);
+ if (!OidIsValid(parentOid)) {
+ return false;
+ }
+ }
+
+ if (checkSubPart && RelationIsSubPartitioned(rel) && !RelationIsIndex(rel)) {
if (parentOid != rel->rd_id) {
Partition partForSubPart = NULL;
- bool res = trySearchFakeReationForPartitionOid(fakeRels, cxt, rel, parentOid, &partRelForSubPart,
- &partForSubPart, lmode, false);
+ bool res = trySearchFakeReationForPartitionOid(fakeRels, cxt, rel, parentOid, INVALID_PARTITION_NO,
+ &partRelForSubPart, &partForSubPart, lmode, false);
if (!res) {
return false;
}
@@ -2925,14 +3053,7 @@ bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Rel
if (PointerIsValid(*fakeRels)) {
FakeRelationIdCacheLookup((*fakeRels), _key, *fakeRelation, *partition);
if (!RelationIsValid(*fakeRelation)) {
- *partition = tryPartitionOpen(partParentRel, partOid, lmode);
- if (*partition == NULL) {
- PartStatus currStatus = PartitionGetMetadataStatus(partOid, false);
- if (currStatus != PART_METADATA_INVISIBLE) {
- ReportPartitionOpenError(partParentRel, partOid);
- }
- return false;
- }
+ *partition = PartitionOpenWithPartitionno(partParentRel, partOid, partitionno, lmode);
*fakeRelation = partitionGetRelation(partParentRel, *partition);
FakeRelationCacheInsert((*fakeRels), (*fakeRelation), (*partition), -1);
}
@@ -2947,17 +3068,43 @@ bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Rel
ctl.hcxt = cxt;
*fakeRels = hash_create("fakeRelationCache by OID", FAKERELATIONCACHESIZE, &ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
- *partition = tryPartitionOpen(partParentRel, partOid, lmode);
- if (*partition == NULL) {
- PartStatus currStatus = PartitionGetMetadataStatus(partOid, false);
- if (currStatus != PART_METADATA_INVISIBLE) {
- ReportPartitionOpenError(partParentRel, partOid);
- }
- return false;
- }
+ *partition = PartitionOpenWithPartitionno(partParentRel, partOid, partitionno, lmode);
*fakeRelation = partitionGetRelation(partParentRel, *partition);
FakeRelationCacheInsert((*fakeRels), (*fakeRelation), (*partition), -1);
}
return true;
-}
\ No newline at end of file
+}
+
+/* Transform the Const value into the target type of partkey column, do nothing if the type is same.
+ * If the type is not same as partkey column, it may result in a wrong hash value. For example, hash('123 '::varchar) is
+ * different with hash('123'::char) */
+Const **transformConstIntoPartkeyType(FormData_pg_attribute *attrs, int2vector *partitionKey, Const **boundary, int len)
+{
+ if (unlikely(partitionKey->dim1 != len)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("number of boundary items NOT EQUAL to number of partition keys")));
+ }
+
+ int2 partKeyPos = 0;
+ Const *newBoundary = NULL;
+ for (int i = 0; i < len; i++) {
+ partKeyPos = partitionKey->values[i];
+
+ if (likely(attrs[partKeyPos - 1].atttypid == boundary[i]->consttype) || boundary[i]->ismaxvalue) {
+ continue;
+ }
+
+ newBoundary = (Const *)GetTargetValue(&attrs[partKeyPos - 1], boundary[i], false);
+ if (!PointerIsValid(newBoundary)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("partition key value must be const or const-evaluable expression")));
+ }
+ if (!OidIsValid(newBoundary->constcollid) && OidIsValid(attrs[partKeyPos - 1].attcollation)) {
+ newBoundary->constcollid = attrs[partKeyPos - 1].attcollation;
+ }
+ boundary[i] = newBoundary;
+ }
+
+ return boundary;
+}
diff --git a/src/gausskernel/dbmind/db4ai/commands/create_model.cpp b/src/gausskernel/dbmind/db4ai/commands/create_model.cpp
old mode 100644
new mode 100755
index 5920d5900..67b783c7c
--- a/src/gausskernel/dbmind/db4ai/commands/create_model.cpp
+++ b/src/gausskernel/dbmind/db4ai/commands/create_model.cpp
@@ -166,10 +166,22 @@ PlannedStmt *plan_create_model(CreateModelStmt *stmt, const char *query_string,
query = setup_for_create_model(query, query_string, params);
#ifndef ENABLE_MULTIPLE_NODES
AutoDopControl dopControl;
- dopControl.CloseSmp();
-#endif
+ PG_TRY();
+ {
+ dopControl.CloseSmp();
+ /* plan the query */
+ plan = pg_plan_query(query, 0, params);
+ }
+ PG_CATCH();
+ {
+ dopControl.ResetSmp();
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+#else
/* plan the query */
plan = pg_plan_query(query, 0, params);
+#endif
// Inject the GradientDescent node at the root of the plan
DestReceiverTrainModel *dest_train_model = (DestReceiverTrainModel *)dest;
diff --git a/src/gausskernel/dbmind/db4ai/executor/algorithms/bayes/bayes_network_internal.cpp b/src/gausskernel/dbmind/db4ai/executor/algorithms/bayes/bayes_network_internal.cpp
index 3337dbf9a..66b1f39c4 100644
--- a/src/gausskernel/dbmind/db4ai/executor/algorithms/bayes/bayes_network_internal.cpp
+++ b/src/gausskernel/dbmind/db4ai/executor/algorithms/bayes/bayes_network_internal.cpp
@@ -201,7 +201,8 @@ void extract_graph(const char *edges_of_network, int num_of_edge, int num_nodes,
}
if (!validateParamEdges(node_pos, edge_size, substr, substr_len)) {
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("Incorrect Hyperparameters (edges_of_network unmatch with num_edges), parse failed.")));
+ errmsg("Incorrect Hyperparameters (edges_of_network unmatch with num_edges), \
+ parse failed.")));
}
all_edges[node_pos++] = atoi(substr);
start = end + 1;
@@ -218,7 +219,8 @@ void extract_graph(const char *edges_of_network, int num_of_edge, int num_nodes,
substr[substr_len] = '\0';
if (!validateParamEdges(node_pos, edge_size, substr, substr_len) || node_pos != edge_size - 1) {
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("Incorrect Hyperparameters (edges_of_network should match with num_edges), parse failed.")));
+ errmsg("Incorrect Hyperparameters (edges_of_network should match with num_edges), \
+ parse failed.")));
}
all_edges[node_pos] = atoi(substr);
if (!validateParamNodes(all_edges, edge_size, num_nodes)) {
@@ -333,8 +335,9 @@ static void bayes_handle_tuple(bayesNodeState *bayes_state, ModelTuple *outer_tu
int nattrs = bayes_state->tms.tuple.ncolumns;
for (int i = 1; i < nattrs; ++i) {
if (IsContinuous(bayes_state->attrtype[i])) {
- if (outer_tuple_slot->isnull[i])
+ if (outer_tuple_slot->isnull[i]) {
continue;
+ }
cdep[i][tpos].setCard(DatumGetFloat8(outer_tuple_slot->values[i]));
cdep[i][tpos].setVariance(DatumGetFloat8(outer_tuple_slot->values[i]));
} else {
@@ -379,8 +382,7 @@ static void bayes_calculate_discrete(bayesState *bayes_net_state, DiscreteProbPa
double left_possible_distinct = 1.0 / params->coefficients[params->parentids[col_id - 1]][j];
double right_possible_distinct = 1.0 / params->coefficients[params->node_id][target_index];
double possible_distinct_for_two_col = Max(1.0, left_possible_distinct *
- right_possible_distinct *
- actual_rows / actual_rows_condition);
+ right_possible_distinct);
double sample_ratio = (double)bayes_net_state->num_total_rows /
bayes_net_state->num_sample_rows;
double coeff_jk = 1.0 /
@@ -565,13 +567,23 @@ static void copy_datum_list(bayesDatumList *mcv, bayesDatumList *bin, HTAB *valu
* Overwidth values are assumed to have been distinct.
* ----------
*/
-static uint64_t estimate_ndistinct_from_sample(uint64_t n, uint64_t N, uint64_t d, uint64_t f1)
+static uint64_t estimate_ndistinct_from_sample(double n, double N, double d, double f1)
{
- if (N > 0) {
- return (uint64_t)((double)n*d / (n - f1 + f1*(double)n/N));
- } else {
- return 0;
+ /* if n < 1. It's an empty bucket, the ndistinct becomes usesless.
+
+ */
+ if (n < 1) {
+ return 1;
}
+ /* if n >= 1, d must be more than/equal to 1.
+ */
+ Assert(n >= 1 && d >= 1);
+ /* if n >= N, we take it as they are equal because of the precision errors.
+ */
+ if (n >= N) {
+ return (uint64_t)Max(d, 1.0);
+ }
+ return (uint64_t)(n * d / (n - f1 + f1 * n / N));
}
@@ -699,8 +711,9 @@ static void do_data_descretize(TrainModelState *pstate, bayesState *bayes_net_st
bayes_net_training_vars->coefficients[i] = (double *)palloc0(sizeof(double) *
(bayes_net_state->num_bins + max_possible_extra));
for (int j = 0; j < num_statistic_value; j++) {
- if (entries[j]->value.isnull)
+ if (entries[j]->value.isnull) {
break;
+ }
pos = current_size / size_bin;
Assert(pos <= bayes_net_training_vars->bins[i].size());
if (pos == bayes_net_training_vars->bins[i].size()) {
@@ -709,9 +722,10 @@ static void do_data_descretize(TrainModelState *pstate, bayesState *bayes_net_st
ndistinct = j - value_idx;
bayes_net_training_vars->
coefficients[i][bayes_net_training_vars->bins[i].size() + upper_bound_pos_with_null] = 1.0 /
- estimate_ndistinct_from_sample(bayes_net_state->num_sample_rows,
- bayes_net_state->num_total_rows,
- ndistinct, ndistinct - nmultiple);
+ estimate_ndistinct_from_sample(
+ (double)bayes_net_state->num_sample_rows / bayes_net_state->num_bins,
+ (double)bayes_net_state->num_total_rows / bayes_net_state->num_bins,
+ ndistinct, ndistinct - nmultiple);
value_idx = j;
}
nmultiple = 0;
@@ -723,9 +737,10 @@ static void do_data_descretize(TrainModelState *pstate, bayesState *bayes_net_st
}
ndistinct = num_statistic_value - value_idx;
bayes_net_training_vars->coefficients[i][bayes_net_training_vars->bins[i].size() - 1] = 1.0 /
- estimate_ndistinct_from_sample(bayes_net_state->num_sample_rows,
- bayes_net_state->num_total_rows,
- ndistinct, ndistinct - nmultiple);
+ estimate_ndistinct_from_sample(
+ (double)bayes_net_state->num_sample_rows / bayes_net_state->num_bins,
+ (double)bayes_net_state->num_total_rows / bayes_net_state->num_bins,
+ ndistinct, ndistinct - nmultiple);
const int upper_bound_pos_without_null = -1;
if (num_null > 0) {
// Save the maximal value only once
@@ -752,14 +767,28 @@ static void do_data_descretize(TrainModelState *pstate, bayesState *bayes_net_st
qsort(entries, num_statistic_value, sizeof(ValueMapEntry *), second_cmp);
pos = 0;
bayes_net_training_vars->coefficients[i] = (double *)palloc0(sizeof(double) * (bayes_net_state->num_mcvs + 1));
- while (pos < (uint32_t)bayes_net_state->num_mcvs) {
- bayes_net_training_vars->mcvs[i].push_back(entries[pos]->value);
- bayes_net_training_vars->coefficients[i][pos] = 1.0;
+ unsigned int used_cnt = 0;
+ unsigned int nmultiple = 0;
+ unsigned int ndistinct = 0;
+ while (pos < (uint32_t)num_statistic_value) {
+ if (pos < (uint32_t)bayes_net_state->num_mcvs) {
+ used_cnt += entries[pos]->cnt;
+ bayes_net_training_vars->mcvs[i].push_back(entries[pos]->value);
+ bayes_net_training_vars->coefficients[i][pos] = 1.0;
+ } else if (entries[pos]->cnt > 1) {
+ nmultiple++;
+ } else {
+ break;
+ }
pos++;
}
- bayes_net_training_vars->coefficients[i][bayes_net_state->num_mcvs] = 1.0 /
- (num_statistic_value -
- bayes_net_state->num_mcvs);
+ ndistinct = num_statistic_value - bayes_net_state->num_mcvs;
+ double total_rows_discount =
+ (double)(bayes_net_state->num_sample_rows - used_cnt) / (double)bayes_net_state->num_sample_rows;
+ bayes_net_training_vars->coefficients[i][bayes_net_state->num_mcvs] =
+ 1.0 / estimate_ndistinct_from_sample(bayes_net_state->num_sample_rows - used_cnt,
+ ((double)bayes_net_state->num_total_rows * total_rows_discount),
+ ndistinct, ndistinct - nmultiple);
} else {
if (num_statistic_value > MAX_DISTINCT_NO_BIN_MCV) {
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS),
@@ -785,8 +814,9 @@ static void data_descretize(TrainModelState *pstate, bayesState *bayes_net_state
// Get data for discretizing
while (true) {
outer_tuple_slot = pstate->fetch(pstate->callback_data, &pstate->tuple) ? &pstate->tuple : nullptr;
- if (outer_tuple_slot == nullptr)
+ if (outer_tuple_slot == nullptr) {
break;
+ }
bayes_net_training_vars->attrtype = pstate->tuple.typid;
for (int i = 0; i < nattrs; i++) {
uint32_t ndistinct = hash_get_num_entries(bayes_net_training_vars->value_stats[i]);
@@ -844,8 +874,9 @@ static void gaussian_fit(TrainModelState *pstate, bayesState *bayes_net_state,
ModelTuple const *outer_tuple_slot = nullptr;
while (true) {
outer_tuple_slot = pstate->fetch(pstate->callback_data, &pstate->tuple) ? &pstate->tuple : nullptr;
- if (outer_tuple_slot == nullptr)
+ if (outer_tuple_slot == nullptr) {
break;
+ }
for (int nodeid = 0; nodeid < bayes_net_state->num_nodes; nodeid++) {
bayesNodeState *bayes_state = bayes_net_state->bayes_tmss[nodeid];
bayes_state->tms.tuple = select_tuple(bayes_net_training_vars->num_parents[nodeid],
@@ -906,8 +937,9 @@ static void probability_fit(TrainModelState *pstate, bayesState *bayes_net_state
while (!bayes_net_state->done) {
outer_tuple_slot = pstate->fetch(pstate->callback_data, &pstate->tuple) ? &pstate->tuple : nullptr;
- if (outer_tuple_slot == nullptr)
+ if (outer_tuple_slot == nullptr) {
break;
+ }
for (int nodeid = 0; nodeid < bayes_net_state->num_nodes; nodeid++) {
bayesNodeState *bayes_state = bayes_net_state->bayes_tmss[nodeid];
bayes_state->tms.tuple = select_tuple(bayes_net_training_vars->num_parents[nodeid],
@@ -1460,8 +1492,9 @@ void bayes_net_predict_dfs_direct(SerializedModelBayesNet *bayes_net_model, Datu
int ncolumns, int targetid, uint128 *visited, ProbVector **parent_prob)
{
uint128 target = (uint128)1 << targetid;
- if (((*visited) & target) == target)
+ if (((*visited) & target) == target) {
return;
+ }
(*visited) += target;
int num_parents = 0;
int selected_ids[bayes_net_model->num_nodes];
@@ -1500,8 +1533,9 @@ Datum bayes_net_predict_direct(AlgorithmAPI *, ModelPredictor model, Datum *valu
prob *= cum_prob;
}
}
- while ((visited & ((uint128)1 << targetid)) > 0)
+ while ((visited & ((uint128)1 << targetid)) > 0) {
targetid += 1;
+ }
}
for (int i = 0; i < ncolumns; i++) {
if (parent_prob[i]->numProb > 0) {
@@ -1545,8 +1579,9 @@ double bayes_predict_prob(SerializedModelBayesNode *bayes_model, TupleData tup,
double prob_x = 1.0;
for (int i = 1; i < ncolumns; i++) {
- if (IsContinuous(types[i]))
+ if (IsContinuous(types[i])) {
continue;
+ }
ValueInTuple value = create_value(values[i], types[i], isnull[i]);
f_index[i] = findDiscreteIndex(value, bayes_model->featuresmatrix_fornet[i], data_process[selected_ids[i - 1]]);
@@ -1618,8 +1653,9 @@ double bayes_net_predict_dfs(SerializedModelBayesNet *bayes_net_model, Datum *va
int ncolumns, int targetid, uint128 *visited)
{
uint128 target = ((uint128)1 << targetid);
- if (((*visited) & target) == target)
+ if (((*visited) & target) == target) {
return 1.0;
+ }
(*visited) += target;
int num_parents = 0;
int *selected_ids = (int *)palloc0(sizeof(int) * bayes_net_model->num_nodes);
@@ -1648,8 +1684,9 @@ Datum bayes_net_predict(AlgorithmAPI *, ModelPredictor model, Datum *values, boo
double prob = 1.0;
while (targetid < ncolumns) {
prob *= bayes_net_predict_dfs(bayes_net_model, values, isnull, types, ncolumns, targetid, &visited);
- while ((visited & ((uint128)1 << targetid)) > 0)
+ while ((visited & ((uint128)1 << targetid)) > 0) {
targetid += 1;
+ }
}
clock_gettime(CLOCK_MONOTONIC, &exec_end_time);
double predicttime = interval_to_sec(time_diff(&exec_end_time, &exec_start_time));
diff --git a/src/gausskernel/dbmind/kernel/index_advisor.cpp b/src/gausskernel/dbmind/kernel/index_advisor.cpp
old mode 100644
new mode 100755
index bdeb78ac3..b86ce0dc7
--- a/src/gausskernel/dbmind/kernel/index_advisor.cpp
+++ b/src/gausskernel/dbmind/kernel/index_advisor.cpp
@@ -652,6 +652,7 @@ StmtResult *execute_stmt(const char *query_string, bool need_result)
List *parsetree_list = NULL;
ListCell *parsetree_item = NULL;
bool snapshot_set = false;
+ DestReceiver *receiver = NULL;
parsetree_list = pg_parse_query(query_string, NULL);
Assert(list_length(parsetree_list) == 1); // ought to be one query
@@ -660,37 +661,84 @@ StmtResult *execute_stmt(const char *query_string, bool need_result)
AutoDopControl dopControl;
dopControl.CloseSmp();
#endif
- Portal portal = NULL;
+ PG_TRY();
+ {
+ Portal portal = NULL;
+
+ List *querytree_list = NULL;
+ List *plantree_list = NULL;
+ Node *parsetree = (Node *)lfirst(parsetree_item);
+ const char *commandTag = CreateCommandTag(parsetree);
+
+ if (u_sess->utils_cxt.ActiveSnapshot == NULL && analyze_requires_snapshot(parsetree)) {
+ PushActiveSnapshot(GetTransactionSnapshot());
+ snapshot_set = true;
+ }
+
+ querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0);
+ plantree_list = pg_plan_queries(querytree_list, 0, NULL);
+
+ if (snapshot_set) {
+ PopActiveSnapshot();
+ }
+
+ portal = CreatePortal(query_string, true, true);
+ portal->visible = false;
+ PortalDefineQuery(portal, NULL, query_string, commandTag, plantree_list, NULL);
+ if (need_result)
+ receiver = create_stmt_receiver();
+ else
+ receiver = CreateDestReceiver(DestNone);
+
+ PortalStart(portal, NULL, 0, NULL);
+ PortalSetResultFormat(portal, 1, &format);
+ (void)PortalRun(portal, FETCH_ALL, true, receiver, receiver, NULL);
+ PortalDrop(portal, false);
+ }
+ PG_CATCH();
+ {
+#ifndef ENABLE_MULTIPLE_NODES
+ dopControl.ResetSmp();
+#endif
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return (StmtResult *)receiver;
+}
+
+StmtResult *execute_select_into_varlist(Query *parsetree)
+{
+ int16 format = 0;
DestReceiver *receiver = NULL;
- List *querytree_list = NULL;
- List *plantree_list = NULL;
- Node *parsetree = (Node *)lfirst(parsetree_item);
- const char *commandTag = CreateCommandTag(parsetree);
- if (u_sess->utils_cxt.ActiveSnapshot == NULL && analyze_requires_snapshot(parsetree)) {
- PushActiveSnapshot(GetTransactionSnapshot());
- snapshot_set = true;
- }
+ PG_TRY();
+ {
+ Portal portal = NULL;
- querytree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0);
- plantree_list = pg_plan_queries(querytree_list, 0, NULL);
+ List *querytree_list = NULL;
+ List *plantree_list = NULL;
- if (snapshot_set) {
- PopActiveSnapshot();
- }
+ const char *commandTag = CreateCommandTag((Node *)parsetree);
- portal = CreatePortal(query_string, true, true);
- portal->visible = false;
- PortalDefineQuery(portal, NULL, query_string, commandTag, plantree_list, NULL);
- if (need_result)
+ querytree_list = pg_rewrite_query(parsetree);
+ plantree_list = pg_plan_queries(querytree_list, 0, NULL);
+
+ portal = CreatePortal("SELECT_INTO_STMT", true, true);
+ portal->visible = false;
+ PortalDefineQuery(portal, NULL, "SELECT_INTO_STMT", commandTag, plantree_list, NULL);
receiver = create_stmt_receiver();
- else
- receiver = CreateDestReceiver(DestNone);
- PortalStart(portal, NULL, 0, NULL);
- PortalSetResultFormat(portal, 1, &format);
- (void)PortalRun(portal, FETCH_ALL, true, receiver, receiver, NULL);
- PortalDrop(portal, false);
+ PortalStart(portal, NULL, 0, NULL);
+ PortalSetResultFormat(portal, 1, &format);
+ (void)PortalRun(portal, FETCH_ALL, true, receiver, receiver, NULL);
+ PortalDrop(portal, false);
+ }
+ PG_CATCH();
+ {
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
return (StmtResult *)receiver;
}
diff --git a/src/gausskernel/optimizer/aioptimizer/aianalyze.cpp b/src/gausskernel/optimizer/aioptimizer/aianalyze.cpp
index c33dcf3bc..edadbea1d 100644
--- a/src/gausskernel/optimizer/aioptimizer/aianalyze.cpp
+++ b/src/gausskernel/optimizer/aioptimizer/aianalyze.cpp
@@ -534,6 +534,7 @@ bool analyze_compute_bayesnet(int *slot_idx, Relation onerel, AnalyzeMode analyz
spec->stats->staop[*slot_idx] = 0;
(*slot_idx)++;
pfree_ext(model_name.data);
+ tss.delete_tuple();
return true;
}
diff --git a/src/gausskernel/optimizer/commands/Makefile b/src/gausskernel/optimizer/commands/Makefile
index eaa8f21de..68268f422 100644
--- a/src/gausskernel/optimizer/commands/Makefile
+++ b/src/gausskernel/optimizer/commands/Makefile
@@ -28,7 +28,8 @@ OBJS = aggregatecmds.o alter.o analyze.o async.o cluster.o comment.o \
schemacmds.o seclabel.o sec_rls_cmds.o subscriptioncmds.o tablecmds.o tablespace.o trigger.o \
tsearchcmds.o typecmds.o user.o vacuum.o vacuumlazy.o \
variable.o verifyrepair.o verify.o view.o gds_stream.o formatter.o datasourcecmds.o \
- directory.o auto_explain.o shutdown.o
+ directory.o auto_explain.o shutdown.o \
+ eventcmds.o
ifeq ($(enable_lite_mode), no)
OBJS += obs_stream.o
diff --git a/src/gausskernel/optimizer/commands/auto_explain.cpp b/src/gausskernel/optimizer/commands/auto_explain.cpp
index 1cb672aa0..37039dfb4 100644
--- a/src/gausskernel/optimizer/commands/auto_explain.cpp
+++ b/src/gausskernel/optimizer/commands/auto_explain.cpp
@@ -13,6 +13,7 @@
#include "postgres.h"
#include "knl/knl_variable.h"
#include "pgstat.h"
+#include "optimizer/ml_model.h"
#include "optimizer/streamplan.h"
#include "commands/explain.h"
#include "executor/instrument.h"
@@ -248,6 +249,7 @@ void exec_explain_plan(QueryDesc *queryDesc)
appendStringInfo(es.str, "\n---------------------------"
"-NestLevel:%d----------------------------\n", u_sess->exec_cxt.nesting_level);
ExplainQueryText(&es, queryDesc);
+ SetNullPrediction(queryDesc->planstate);
appendStringInfo(es.str, "Name: %s\n", g_instance.attr.attr_common.PGXCNodeName);
ExplainBeginOutput(&es);
MemoryContext current_ctx = CurrentMemoryContext;
@@ -304,6 +306,7 @@ void explain_querydesc(ExplainState *es, QueryDesc *queryDesc)
(IS_PGXC_COORDINATOR) ? "Coordinator" : "Datanode", g_instance.attr.attr_common.PGXCNodeName);
ExplainBeginOutput(es);
+ SetNullPrediction(queryDesc->planstate);
ExplainPrintPlan(es, queryDesc);
ExplainEndOutput(es);
diff --git a/src/gausskernel/optimizer/commands/cluster.cpp b/src/gausskernel/optimizer/commands/cluster.cpp
index 1aef4df27..c59990682 100755
--- a/src/gausskernel/optimizer/commands/cluster.cpp
+++ b/src/gausskernel/optimizer/commands/cluster.cpp
@@ -233,7 +233,7 @@ void cluster(ClusterStmt* stmt, bool isTopLevel)
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("table is not partitioned")));
}
- partOid = partitionNameGetPartitionOid(tableOid,
+ partOid = PartitionNameGetPartitionOid(tableOid,
stmt->relation->partitionname,
PART_OBJ_TYPE_TABLE_PARTITION,
ExclusiveLock,
diff --git a/src/gausskernel/optimizer/commands/copy.cpp b/src/gausskernel/optimizer/commands/copy.cpp
index 5e1541537..9287a44ba 100644
--- a/src/gausskernel/optimizer/commands/copy.cpp
+++ b/src/gausskernel/optimizer/commands/copy.cpp
@@ -234,10 +234,10 @@ static const char BinarySignature[15] = "PGCOPY\n\377\r\n\0";
/* non-export function prototypes */
static CopyState BeginCopy(bool is_from, Relation rel, Node* raw_query, const char* queryString, List* attnamelist,
- List* options, bool is_copy = true);
+ List* options, bool is_copy = true, CopyFileType filetype = S_COPYFILE);
static void EndCopy(CopyState cstate);
-CopyState BeginCopyTo(
- Relation rel, Node* query, const char* queryString, const char* filename, List* attnamelist, List* options);
+CopyState BeginCopyTo(Relation rel, Node* query, const char* queryString,
+ const char* filename, List* attnamelist, List* options, CopyFileType filetype);
void EndCopyTo(CopyState cstate);
uint64 DoCopyTo(CopyState cstate);
static uint64 CopyTo(CopyState cstate, bool isFirst, bool isLast);
@@ -271,6 +271,8 @@ static Datum CopyReadBinaryAttribute(
CopyState cstate, int column_no, FmgrInfo* flinfo, Oid typioparam, int32 typmod, bool* isnull);
static void CopyAttributeOutText(CopyState cstate, char* string);
static void CopyAttributeOutCSV(CopyState cstate, char* string, bool use_quote, bool single_attr);
+static void SelectAttributeIntoOutfile(CopyState cstate, char* string, bool is_optionally, Oid fn_oid);
+static void ProcessEnclosedChar(CopyState cstate, char* cur_char, char enclosedc, char escapedc);
static void CopyNonEncodingAttributeOut(CopyState cstate, char* string, bool use_quote);
List* CopyGetAttnums(TupleDesc tupDesc, Relation rel, List* attnamelist);
List* CopyGetAllAttnums(TupleDesc tupDesc, Relation rel);
@@ -1132,7 +1134,7 @@ uint64 DoCopy(CopyStmt* stmt, const char* queryString)
EndCopyFrom(cstate);
} else {
pgstat_set_stmt_tag(STMTTAG_READ);
- cstate = BeginCopyTo(rel, query, queryString, stmt->filename, stmt->attlist, stmt->options);
+ cstate = BeginCopyTo(rel, query, queryString, stmt->filename, stmt->attlist, stmt->options, stmt->filetype);
cstate->range_table = list_make1(rte);
processed = DoCopyTo(cstate); /* copy from database to file */
EndCopyTo(cstate);
@@ -1385,6 +1387,169 @@ void GetTransSourceStr(CopyState cstate, int beginPos, int endPos)
cstate->transform_query_string = transString;
}
+void ProcessFileOptions(CopyState cstate, bool is_from, List* options, bool is_dumpfile)
+{
+ if (is_dumpfile) {
+ cstate->is_dumpfile = true;
+ cstate->fileformat = FORMAT_TEXT;
+ cstate->is_from = is_from;
+ cstate->file_encoding = -1;
+ cstate->delim = "";
+ cstate->eol = "";
+ cstate->eol_type = EOL_UD;
+ cstate->null_print = "";
+ cstate->null_print_len = strlen(cstate->null_print);
+ if (cstate->mode == MODE_INVALID)
+ cstate->mode = MODE_NORMAL;
+ return;
+ }
+ ListCell* option = NULL;
+ cstate->file_encoding = -1;
+ cstate->fileformat = FORMAT_TEXT;
+ cstate->is_from = false;
+
+ foreach (option, options) {
+ DefElem* defel = (DefElem*)lfirst(option);
+ if (strcmp(defel->defname, "o_enclosed") == 0) {
+ if (cstate->o_enclosed)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ if (strlen(defGetString(defel)) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("enclosed must be a single one-byte character")));
+ }
+ cstate->o_enclosed = defGetString(defel);
+ } else if (strcmp(defel->defname, "enclosed") == 0) {
+ if (cstate->enclosed)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ if (strlen(defGetString(defel)) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("enclosed must be a single one-byte character")));
+ }
+ cstate->enclosed = defGetString(defel);
+ } else if (strcmp(defel->defname, "encoding") == 0) {
+ if (cstate->file_encoding >= 0)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ cstate->file_encoding = pg_char_to_encoding(defGetString(defel));
+ if (cstate->file_encoding < 0)
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("argument to option \"%s\" must be a valid CHARACTER SET name", defel->defname)));
+ } else if (strcmp(defel->defname, "delimiter") == 0) {
+ if (cstate->delim)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ cstate->delim = defGetString(defel);
+ } else if (strcmp(defel->defname, "escape") == 0) {
+ if (cstate->escape)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ if (strlen(defGetString(defel)) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("escaped must be a single one-byte character")));
+ }
+ cstate->escape = defGetString(defel);
+ } else if (strcmp(defel->defname, "eol") == 0) {
+ if (cstate->eol_type == EOL_UD)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ cstate->eol_type = EOL_UD;
+ cstate->eol = defGetString(defel);
+ size_t eol_len = strlen(defGetString(defel));
+ if (strstr(defGetString(defel), "\\r\\n") != NULL) {
+ int eol_start = eol_len - strlen("\\r\\n");
+ cstate->eol[eol_start] = '\r';
+ cstate->eol[eol_start + 1] = '\n';
+ cstate->eol[eol_start + 2] = '\0';
+ } else if (strstr(defGetString(defel), "\\n") != NULL) {
+ int eol_start = eol_len - strlen("\\n");
+ cstate->eol[eol_start] = '\n';
+ cstate->eol[eol_start + 1] = '\0';
+ } else if (strstr(defGetString(defel), "\\r") != NULL) {
+ int eol_start = eol_len - strlen("\\r");
+ cstate->eol[eol_start] = '\r';
+ cstate->eol[eol_start + 1] = '\0';
+ } else {
+ if (eol_len == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("\"%s\" is not a valid LINES TERMINATED string, "
+ "LINES TERMINATED string must not be empty",
+ defGetString(defel))));
+ else if (eol_len > EOL_MAX_LEN)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("\"%s\" is not a valid LINES TERMINATED string, "
+ "LINES TERMINATED string must not exceed the maximum length (10 bytes)",
+ defGetString(defel))));
+ }
+ } else if (strcmp(defel->defname, "line_start") == 0) {
+ if (cstate->line_start)
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ cstate->line_start = defGetString(defel);
+ }
+ }
+
+ if (!cstate->delim)
+ cstate->delim = "\t";
+ if (!cstate->escape)
+ cstate->escape = "\\";
+ if (!cstate->null_print)
+ cstate->null_print = "\\N";
+ cstate->null_print_len = strlen(cstate->null_print);
+
+ if (cstate->mode == MODE_INVALID)
+ cstate->mode = MODE_NORMAL;
+ if (cstate->eol_type != EOL_UD && !is_from)
+ cstate->eol_type = EOL_NL;
+
+ if ((cstate->delim_len = strlen(cstate->delim)) > DELIM_MAX_LEN)
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("FIELDS TERMINATED must be less than %d bytes", DELIM_MAX_LEN)));
+
+ if (cstate->o_enclosed && cstate->enclosed) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("enclosed sentence can only be specified once")));
+ }
+
+ /*
+ * check delimiter
+ */
+ if (cstate->delim && (cstate->delim_len == 1) && ((cstate->delim[0] == ' ') || (cstate->delim[0] == '?')) &&
+ (cstate->compatible_illegal_chars)) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("illegal chars conversion may confuse FIELDS TERMINATED 0x%x", cstate->delim[0])));
+ }
+
+ /*
+ * check OPTIONALLY ENCLOSED
+ */
+ if (cstate->o_enclosed && ((cstate->o_enclosed[0] == ' ') || (cstate->o_enclosed[0] == '?')) &&
+ (cstate->compatible_illegal_chars)) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("illegal chars conversion may confuse OPTIONALLY ENCLOSED 0x%x", cstate->o_enclosed[0])));
+ }
+
+ /*
+ * check ENCLOSED
+ */
+ if (cstate->enclosed && ((cstate->enclosed[0] == ' ') || (cstate->enclosed[0] == '?')) &&
+ (cstate->compatible_illegal_chars)) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("illegal chars conversion may confuse ENCLOSED 0x%x", cstate->enclosed[0])));
+ }
+
+ /*
+ * check escape
+ */
+ if (cstate->escape && ((cstate->escape[0] == ' ') || (cstate->escape[0] == '?')) &&
+ (cstate->compatible_illegal_chars)) {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("illegal chars conversion may confuse ESCAPED 0x%x", cstate->escape[0])));
+ }
+}
/*
* Process the statement option list for COPY.
@@ -2294,7 +2459,7 @@ static void ProcessCopySummaryLogSetUps(CopyState cstate)
* NULL values as .
*/
static CopyState BeginCopy(bool is_from, Relation rel, Node* raw_query, const char* queryString, List* attnamelist,
- List* options, bool is_copy)
+ List* options, bool is_copy, CopyFileType filetype)
{
CopyState cstate;
TupleDesc tupDesc;
@@ -2315,8 +2480,21 @@ static CopyState BeginCopy(bool is_from, Relation rel, Node* raw_query, const ch
cstate->source_query_string = queryString;
- /* Extract options from the statement node tree */
- ProcessCopyOptions(cstate, is_from, options);
+ switch (filetype) {
+ case S_COPYFILE:
+ /* Extract options from the statement node tree */
+ ProcessCopyOptions(cstate, is_from, options);
+ break;
+ case S_OUTFILE:
+ ProcessFileOptions(cstate, is_from, options, false);
+ break;
+ case S_DUMPFILE:
+ ProcessFileOptions(cstate, is_from, options, true);
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("undefined file type")));
+ }
+
if (is_copy)
ProcessCopyNotAllowedOptions(cstate);
/* Process the source/target relation or query */
@@ -2650,8 +2828,8 @@ static void CopyToCheck(Relation rel)
/*
* Setup CopyState to read tuples from a table or a query for COPY TO.
*/
-CopyState BeginCopyTo(
- Relation rel, Node* query, const char* queryString, const char* filename, List* attnamelist, List* options)
+CopyState BeginCopyTo(Relation rel, Node* query, const char* queryString,
+ const char* filename, List* attnamelist, List* options, CopyFileType filetype)
{
CopyState cstate;
bool pipe = (filename == NULL);
@@ -2662,7 +2840,7 @@ CopyState BeginCopyTo(
CopyToCheck(rel);
}
- cstate = BeginCopy(false, rel, query, queryString, attnamelist, options);
+ cstate = BeginCopy(false, rel, query, queryString, attnamelist, options, true, filetype);
oldcontext = MemoryContextSwitchTo(cstate->copycontext);
if (pipe) {
@@ -3160,6 +3338,9 @@ void CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum* values, const bool* nul
need_delim = true;
}
+ if (cstate->line_start) {
+ CopySendString(cstate, cstate->line_start);
+ }
if (IS_FIXED(cstate))
FixedRowOut(cstate, values, nulls);
else {
@@ -3187,7 +3368,14 @@ void CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum* values, const bool* nul
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Invalid file format")));
}
} else {
- if (!IS_BINARY(cstate)) {
+ if (cstate->enclosed || cstate->o_enclosed) {
+ string = OutputFunctionCall(&out_functions[attnum - 1], value);
+ if (cstate->enclosed) {
+ SelectAttributeIntoOutfile(cstate, string, false, out_functions[attnum -1].fn_oid);
+ } else {
+ SelectAttributeIntoOutfile(cstate, string, true, out_functions[attnum -1].fn_oid);
+ }
+ } else if (!IS_BINARY(cstate)) {
bool use_quote = cstate->force_quote_flags[attnum - 1];
string = OutputFunctionCall(&out_functions[attnum - 1], value);
switch (out_functions[attnum -1].fn_oid) {
@@ -3637,13 +3825,18 @@ void CopyFromBulkInsert(EState* estate, CopyFromBulk bulk, PageCompress* pcState
/* step 1: open PARTITION relation */
if (isPartitional) {
- searchFakeReationForPartitionOid(estate->esfRelations,
+ bool res = trySearchFakeReationForPartitionOid(&estate->esfRelations,
estate->es_query_cxt,
resultRelationDesc,
bulk->partOid,
- heaprel,
- partition,
+ RelationIsSubPartitioned(resultRelationDesc) ? GetCurrentSubPartitionNo(bulk->partOid) :
+ GetCurrentPartitionNo(bulk->partOid),
+ &heaprel,
+ &partition,
RowExclusiveLock);
+ if (!res) {
+ return;
+ }
estate->esCurrentPartition = heaprel;
insertRel = heaprel;
}
@@ -4624,7 +4817,7 @@ uint64 CopyFrom(CopyState cstate)
if (RelationIsSubPartitioned(resultRelationDesc)) {
targetOid = heapTupleGetSubPartitionId(resultRelationDesc, tuple);
} else {
- targetOid = heapTupleGetPartitionId(resultRelationDesc, tuple);
+ targetOid = heapTupleGetPartitionId(resultRelationDesc, tuple, NULL);
}
} else {
targetOid = RelationGetRelid(resultRelationDesc);
@@ -4677,7 +4870,7 @@ uint64 CopyFrom(CopyState cstate)
if (RelationIsSubPartitioned(resultRelationDesc)) {
targetPartOid = heapTupleGetSubPartitionId(resultRelationDesc, tuple);
} else {
- targetPartOid = heapTupleGetPartitionId(resultRelationDesc, tuple);
+ targetPartOid = heapTupleGetPartitionId(resultRelationDesc, tuple, NULL);
}
partitionList = list_append_unique_oid(partitionList, targetPartOid);
}
@@ -4689,19 +4882,22 @@ uint64 CopyFrom(CopyState cstate)
Partition subPart = NULL;
if (isPartitionRel) {
/* get partititon oid to insert the record */
- partitionid = heapTupleGetPartitionId(resultRelationDesc, tuple);
+ int partitionno = INVALID_PARTITION_NO;
+ partitionid = heapTupleGetPartitionId(resultRelationDesc, tuple, &partitionno);
searchFakeReationForPartitionOid(estate->esfRelations,
estate->es_query_cxt,
resultRelationDesc,
partitionid,
+ partitionno,
heaprel,
partition,
RowExclusiveLock);
if (RelationIsSubPartitioned(resultRelationDesc)) {
- partitionid = heapTupleGetPartitionId(heaprel, tuple);
+ int subpartitionno = INVALID_PARTITION_NO;
+ partitionid = heapTupleGetPartitionId(heaprel, tuple, &subpartitionno);
searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, heaprel,
- partitionid, subPartRel, subPart, RowExclusiveLock);
+ partitionid, subpartitionno, subPartRel, subPart, RowExclusiveLock);
heaprel = subPartRel;
partition = subPart;
}
@@ -7971,6 +8167,43 @@ static void CopyAttributeOutCSV(CopyState cstate, char* string, bool use_quote,
}
}
+static void ProcessEnclosedChar(CopyState cstate, char* cur_char, char enclosedc, char escapedc)
+{
+ CopySendChar(cstate, enclosedc);
+ while (*cur_char != '\0') {
+ if (*cur_char == enclosedc) {
+ CopySendChar(cstate, escapedc);
+ }
+ CopySendChar(cstate, *cur_char);
+ cur_char++;
+ }
+ CopySendChar(cstate, enclosedc);
+}
+
+static void SelectAttributeIntoOutfile(CopyState cstate, char* string, bool is_optionally, Oid fn_oid)
+{
+ char* start = string;
+ char escapedc = cstate->escape[0];
+ char enclosedc;
+
+ if (!is_optionally) {
+ enclosedc= cstate->enclosed[0];
+ ProcessEnclosedChar(cstate, start, enclosedc, escapedc);
+ } else {
+ enclosedc= cstate->o_enclosed[0];
+ switch (fn_oid) {
+ case F_BPCHAROUT:
+ case F_TEXTOUT:
+ case F_RAWOUT:
+ case F_ENUM_OUT:
+ ProcessEnclosedChar(cstate, start, enclosedc, escapedc);
+ break;
+ default:
+ CopySendString(cstate, string);
+ }
+ }
+}
+
/*
* Send text representation of one attribute, without encoding conversion.
*/
@@ -8167,6 +8400,10 @@ static void copy_dest_receive(TupleTableSlot* slot, DestReceiver* self)
DR_copy* myState = (DR_copy*)self;
CopyState cstate = myState->cstate;
+ if (cstate->is_dumpfile && myState->processed == 1) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Result consisted of more than one row")));
+ }
/* Make sure the tuple is fully deconstructed */
tableam_tslot_getallattrs(slot);
diff --git a/src/gausskernel/optimizer/commands/createas.cpp b/src/gausskernel/optimizer/commands/createas.cpp
index e1e0ed4d6..69fad1868 100644
--- a/src/gausskernel/optimizer/commands/createas.cpp
+++ b/src/gausskernel/optimizer/commands/createas.cpp
@@ -39,6 +39,7 @@
#include "optimizer/planner.h"
#include "parser/analyze.h"
#include "parser/parse_clause.h"
+#include "parser/parse_utilcmd.h"
#include "rewrite/rewriteHandler.h"
#include "storage/smgr/smgr.h"
#include "tcop/tcopprot.h"
@@ -321,6 +322,7 @@ static void intorel_startup(DestReceiver* self, int operation, TupleDesc typeinf
create->row_compress = into->row_compress;
create->tablespacename = into->tableSpaceName;
create->if_not_exists = false;
+ create->charset = PG_INVALID_ENCODING;
/* Using Materialized view only */
create->ivm = into->ivm;
@@ -368,6 +370,7 @@ static void intorel_startup(DestReceiver* self, int operation, TupleDesc typeinf
coltype->arrayBounds = NIL;
coltype->location = -1;
coltype->pct_rowtype = false;
+ coltype->charset = get_charset_by_collation(attribute->attcollation);
/*
* It's possible that the column is of a collatable type but the
diff --git a/src/gausskernel/optimizer/commands/dbcommands.cpp b/src/gausskernel/optimizer/commands/dbcommands.cpp
index 4d92e76cc..e960beb02 100644
--- a/src/gausskernel/optimizer/commands/dbcommands.cpp
+++ b/src/gausskernel/optimizer/commands/dbcommands.cpp
@@ -1217,7 +1217,7 @@ void dropdb(const char* dbname, bool missing_ok)
/*
* Must be owner or have alter privilege to alter database
*/
-static void AlterDatabasePermissionCheck(Oid dboid, const char* dbname)
+void AlterDatabasePermissionCheck(Oid dboid, const char* dbname)
{
AclResult aclresult = pg_database_aclcheck(dboid, GetUserId(), ACL_ALTER);
if (aclresult != ACLCHECK_OK && !pg_database_ownercheck(dboid, GetUserId())) {
diff --git a/src/gausskernel/optimizer/commands/eventcmds.cpp b/src/gausskernel/optimizer/commands/eventcmds.cpp
new file mode 100755
index 000000000..d0fb20353
--- /dev/null
+++ b/src/gausskernel/optimizer/commands/eventcmds.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
+ *
+ * openGauss is licensed under Mulan PSL v2.
+ * You can use this software according to the terms and conditions of the Mulan PSL v2.
+ * You may obtain a copy of Mulan PSL v2 at:
+ *
+ * http://license.coscl.org.cn/MulanPSL2
+ *
+ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
+ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+ * See the Mulan PSL v2 for more details.
+ * ---------------------------------------------------------------------------------------
+ *
+ * eventcmds.cpp
+ * Routines for CREATE/ALTER/DROP EVENT commands.
+ * IDENTIFICATION
+ * src/gausskernel/optimizer/commands/eventcmds.cpp
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+#include "postgres.h"
+#include "knl/knl_variable.h"
+
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/tableam.h"
+#include "access/sysattr.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/objectaccess.h"
+#include "catalog/pg_object.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/gs_db_privilege.h"
+#include "commands/defrem.h"
+#include "commands/proclang.h"
+#include "executor/executor.h"
+#include "knl/knl_thread.h"
+#include "miscadmin.h"
+#include "optimizer/var.h"
+#include "parser/parse_collate.h"
+#include "parser/parse_func.h"
+#include "storage/tcap.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/guc.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/rel_gs.h"
+#include "utils/syscache.h"
+#include "utils/snapmgr.h"
+#include "utils/plpgsql.h"
+#include "storage/lmgr.h"
+#include "tcop/utility.h"
+#include "catalog/pg_job.h"
+#include "tcop/dest.h"
+#include "commands/sqladvisor.h"
+#include "access/printtup.h"
+#include "access/attnum.h"
+#include "nodes/makefuncs.h"
+#include "postgres_ext.h"
+#include "catalog/gs_job_attribute.h"
+#include "utils/dbe_scheduler.h"
+#include "catalog/pg_job_proc.h"
+#include "catalog/pg_authid.h"
+#include "commands/user.h"
+#include "access/htup.h"
+#include "commands/dbcommands.h"
+
+#define SHOW_EVENT_SIZE 10
+#define INTERVAL_QUALITY_LENGTH 16
+
+typedef struct field_str_map_st {
+ int interval;
+ const char *field_str;
+} field_str_map;
+
+const field_str_map g_field_str_map[] = {
+ {INTERVAL_MASK(YEAR), " year"},
+ {INTERVAL_MASK(MONTH), " month"},
+ {INTERVAL_MASK(DAY), " day"},
+ {INTERVAL_MASK(HOUR), " hour"},
+ {INTERVAL_MASK(MINUTE), " minute"},
+ {INTERVAL_MASK(SECOND), " second"},
+ {INTERVAL_MASK(YEAR) | INTERVAL_MASK(MONTH), " year to month"},
+ {INTERVAL_MASK(DAY) | INTERVAL_MASK(HOUR), " day to hour"},
+ {INTERVAL_MASK(DAY) | INTERVAL_MASK(HOUR) | INTERVAL_MASK(MINUTE), " day to minute"},
+ {INTERVAL_MASK(DAY) | INTERVAL_MASK(HOUR) | INTERVAL_MASK(MINUTE) | INTERVAL_MASK(SECOND), " day to second"},
+ {INTERVAL_MASK(HOUR) | INTERVAL_MASK(MINUTE), " hour to minute"},
+ {INTERVAL_MASK(HOUR) | INTERVAL_MASK(MINUTE) | INTERVAL_MASK(SECOND), " hour to second"},
+ {INTERVAL_MASK(MINUTE) | INTERVAL_MASK(SECOND), " minute to second"},
+ {INTERVAL_FULL_RANGE, ""},
+};
+
+enum class job_type {
+ ARG_JOB_DEFAULT,
+ ARG_JOB_ACTION,
+ ARG_START_DATE,
+ ARG_END_DATE,
+ ARG_REPEAT_INTERVAL,
+ ARG_JOB_AUTO_DROP,
+ ARG_JOB_ENABLED,
+ ARG_JOB_COMMENTS,
+ ARG_JOB_RENAME,
+ ARG_JOB_DEFINER,
+ ARG_JOB_DEFINER_DEFAULT
+};
+
+typedef struct job_script_map_st {
+ job_type act_name;
+ const char *act_type;
+} job_script;
+
+const job_script g_job_script[] = {{job_type::ARG_JOB_ACTION, "program_action"},
+ {job_type::ARG_START_DATE, "start_date"},
+ {job_type::ARG_REPEAT_INTERVAL, "repeat_interval"},
+ {job_type::ARG_JOB_AUTO_DROP, "auto_drop"},
+ {job_type::ARG_END_DATE, "end_date"},
+ {job_type::ARG_JOB_COMMENTS, "comments"},
+ {job_type::ARG_JOB_ENABLED, "enabled"},
+ {job_type::ARG_JOB_RENAME, "rename"},
+ {job_type::ARG_JOB_DEFINER, "owner"},
+ {job_type::ARG_JOB_DEFINER_DEFAULT, "owner_default"}};
+
+bool CheckEventExists(Datum ev_name, bool miss_ok)
+{
+ bool is_exists;
+ CatCList *list = SearchSysCacheList1(JOBATTRIBUTENAME, ev_name);
+ is_exists = (list->n_members > 0) ? true : false;
+ ReleaseSysCacheList(list);
+ if (!is_exists) {
+ return false;
+ }
+ if (miss_ok) {
+ ereport(NOTICE, (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("event \"%s\" already exists, skipping", TextDatumGetCString(ev_name))));
+ return true;
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("event \"%s\" already exists", TextDatumGetCString(ev_name))));
+ }
+ return false;
+}
+
+bool CheckEventNotExists(Datum ev_name, bool miss_ok)
+{
+ bool is_exists;
+ CatCList *list = SearchSysCacheList1(JOBATTRIBUTENAME, ev_name);
+ is_exists = (list->n_members > 0) ? true : false;
+ ReleaseSysCacheList(list);
+ if (is_exists) {
+ return false;
+ }
+ if (miss_ok) {
+ ereport(NOTICE, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("event \"%s\" is not exists, skipping", TextDatumGetCString(ev_name))));
+ return true;
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("event \"%s\" is not exists", TextDatumGetCString(ev_name))));
+ }
+ return false;
+}
+
+const char *IntervalTypmodParse(A_Const *expr)
+{
+ long interval_num = expr->val.val.ival;
+ size_t size = (sizeof(g_field_str_map) / sizeof(g_field_str_map[0]));
+ for (size_t i = 0; i < size; ++i) {
+ if (g_field_str_map[i].interval == interval_num) {
+ return g_field_str_map[i].field_str;
+ }
+ }
+ return NULL;
+}
+
+Datum ParseIntevalExpr(Node *intervalNode)
+{
+ StringInfoData buf;
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "interval");
+ TypeCast *tc = (TypeCast *)intervalNode;
+ A_Const *ac = (A_Const *)tc->arg;
+ A_Const *tm = (A_Const *)lfirst(list_head(tc->typname->typmods));
+ const char *tm_str = NULL;
+ tm_str = IntervalTypmodParse(tm);
+ if (IsA(&ac->val, Integer)) {
+ char *quantity_str = (char *)palloc(INTERVAL_QUALITY_LENGTH);
+ pg_itoa((int)ac->val.val.ival, quantity_str);
+ appendStringInfo(&buf, " \'%s\' ", quantity_str);
+ } else if (IsA(&ac->val, String) || IsA(&ac->val, Float)) {
+ appendStringInfo(&buf, " \'%s\' ", (char *)ac->val.val.str);
+ }
+ appendStringInfo(&buf, "%s", tm_str);
+ return CStringGetTextDatum(buf.data);
+}
+
+Datum ExecTimeExpr(Node *node)
+{
+ /* Check whether the execution result of the time expression is of the timestamp type */
+ Oid result_type = exprType(node);
+ if (result_type != TIMESTAMPOID && result_type != TIMESTAMPTZOID) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("the format of the time expression is incorrect.")));
+ }
+
+ EState *estate = NULL;
+ ExprContext *econtext = NULL;
+ Expr *time_expr = NULL;
+ ExprState *exprstate = NULL;
+ time_expr = (Expr *)node;
+ bool is_null = false;
+ Datum result;
+
+ estate = CreateExecutorState();
+ econtext = GetPerTupleExprContext(estate);
+ exprstate = ExecPrepareExpr(time_expr, estate);
+ if (!PointerIsValid(exprstate)) {
+ ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("failed when making time expression state for constCompare.")));
+ }
+
+ result = ExecEvalExpr(exprstate, econtext, &is_null, NULL);
+ if (result_type == TIMESTAMPTZOID) {
+ result = DirectFunctionCall1(timestamptz_timestamp, DatumGetTimestampTz(result));
+ }
+ FreeExecutorState(estate);
+ return result;
+}
+
+void GetTimeExecResult(CreateEventStmt *stmt, Datum &start_time, Datum &interval_time, Datum &end_time)
+{
+ /* Parse Interval Expression */
+ Node *interval_time_expr = stmt->interval_time;
+ interval_time = (Datum)0;
+ if (interval_time_expr) {
+ interval_time = ParseIntevalExpr(interval_time_expr);
+ }
+
+ /* Parsing the time expression */
+ Node *end_time_expr = stmt->end_time_expr;
+ end_time = get_scheduler_max_timestamptz();
+ if (end_time_expr) {
+ end_time = ExecTimeExpr(end_time_expr);
+ }
+
+ Node *start_time_expr = stmt->start_time_expr;
+ start_time = DirectFunctionCall1(timestamptz_timestamp, DatumGetTimestampTz(GetCurrentTimestamp()));
+ if (start_time_expr) {
+ start_time = ExecTimeExpr(start_time_expr);
+ }
+}
+
+Datum TranslateArg(char *act_name, Node *act_node)
+{
+ Datum result = (Datum)0;
+ job_type ev_act_type = job_type::ARG_JOB_DEFAULT;
+ size_t size = (sizeof(g_job_script) / sizeof(g_job_script[0]));
+ for (size_t i = 0; i < size; ++i) {
+ if (!strcmp(g_job_script[i].act_type, act_name)) {
+ ev_act_type = g_job_script[i].act_name;
+ break;
+ }
+ }
+ switch (ev_act_type) {
+ case job_type::ARG_JOB_ACTION:
+ case job_type::ARG_JOB_COMMENTS: {
+ Value *event_arg_node = (Value *)act_node;
+ result = CStringGetTextDatum(event_arg_node->val.str);
+ break;
+ }
+ case job_type::ARG_JOB_RENAME: {
+ RangeVar *name_var = (RangeVar *)act_node;
+ result = CStringGetTextDatum(name_var->relname);
+ break;
+ }
+ case job_type::ARG_JOB_DEFINER: {
+ Value *ev_definer_node = (Value *)act_node;
+ HOLD_INTERRUPTS();
+ Oid user_oid = GetSysCacheOid1(AUTHNAME, CStringGetDatum(ev_definer_node->val.str));
+ RESUME_INTERRUPTS();
+ CHECK_FOR_INTERRUPTS();
+ if (!OidIsValid(user_oid))
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("role \"%s\" does not exist.", ev_definer_node->val.str)));
+ char *name = (char *)palloc(MAX_JOB_NAME_LEN * sizeof(char));
+ pg_ltoa((int)user_oid, name);
+ result = CStringGetTextDatum(name);
+ break;
+ }
+ case job_type::ARG_JOB_DEFINER_DEFAULT: {
+ char *username = get_role_name_str();
+ result = CStringGetTextDatum(username);
+ break;
+ }
+ case job_type::ARG_START_DATE:
+ case job_type::ARG_END_DATE: {
+ result = ExecTimeExpr(act_node);
+ break;
+ }
+ case job_type::ARG_REPEAT_INTERVAL: {
+ result = ParseIntevalExpr(act_node);
+ break;
+ }
+ case job_type::ARG_JOB_ENABLED: {
+ Value *ev_status_node = (Value *)act_node;
+ EventStatus ev_status = (EventStatus)(ev_status_node->val.ival);
+ if (ev_status == EVENT_DISABLE_ON_SLAVE || ev_status == EVENT_DISABLE) {
+ result = BoolGetDatum(0);
+ } else if (ev_status == EVENT_ENABLE) {
+ result = BoolGetDatum(1);
+ }
+ break;
+ }
+ case job_type::ARG_JOB_AUTO_DROP: {
+ Value *ev_drop_node = (Value *)act_node;
+ result = ev_drop_node->val.ival ? CStringGetTextDatum("true") : CStringGetTextDatum("false");
+ break;
+ }
+ default:
+ return (Datum)0;
+ }
+ return result;
+}
+
+Datum SetDefinerName(char *def_name, Datum program_name, char** definer_oid)
+{
+ Datum curuser = get_priv_user(program_name, CharGetDatum(JOB_INTYPE_PLAIN));
+ if (def_name) {
+ if (!superuser()) {
+ ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("The current user does not have sufficient permissions to specify the definer.")));
+ } else {
+ HeapTuple oldtuple = SearchSysCache1(AUTHNAME, CStringGetDatum(def_name));
+ Value* definer_name = makeString(def_name);
+ (*definer_oid) = TextDatumGetCString(TranslateArg("owner", (Node*)definer_name));
+ if (!HeapTupleIsValid(oldtuple)) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("role \"%s\" does not exist.", def_name)));
+ } else {
+ ReleaseSysCache(oldtuple);
+ return CStringGetDatum(def_name);
+ }
+ }
+ }
+ if (SECUREC_LIKELY(*definer_oid == NULL)) {
+ (*definer_oid) = get_role_name_str();
+ }
+ return curuser;
+}
+
+void PrepareFuncArg(CreateEventStmt *stmt, Datum ev_name, Datum schemaName, FunctionCallInfoData *ev_arg)
+{
+ /* Creating an Inline Program */
+ char* job_definer_oid = NULL;
+ Datum definer = SetDefinerName(stmt->def_name, ev_name, &job_definer_oid);
+ Datum job_action = CStringGetTextDatum(stmt->event_query_str);
+ char *job_type_string = PLSQL_JOB_TYPE;
+ text *job_type_text = cstring_to_text_with_len(job_type_string, strlen(job_type_string));
+ Datum job_type = CStringGetTextDatum(text_to_cstring(job_type_text));
+ char *c_schedule_name = get_inline_schedule_name(ev_name);
+ Datum schedule_name = CStringGetTextDatum(c_schedule_name);
+ char *c_program_name =
+ CreateEventInlineProgram(ev_name, job_type, job_action, CStringGetTextDatum(job_definer_oid));
+ Datum program_name = CStringGetTextDatum(c_program_name);
+
+ Datum start_time;
+ Datum interval_time;
+ Datum end_time;
+
+ /* Parsing the time expression */
+ GetTimeExecResult(stmt, start_time, interval_time, end_time);
+
+ ev_arg->arg[ARG_0] = ev_name;
+ ev_arg->arg[ARG_1] = program_name;
+ ev_arg->arg[ARG_2] = schedule_name;
+ ev_arg->arg[ARG_3] = CStringGetTextDatum("DEFAULT_JOB_CLASS");
+ ev_arg->arg[ARG_4] = (stmt->event_status == EVENT_ENABLE) ? BoolGetDatum(1) : BoolGetDatum(0);
+ ev_arg->arg[ARG_5] = BoolGetDatum(stmt->complete_preserve);
+ ev_arg->arg[ARG_6] = (stmt->event_comment_str == NULL) ? (Datum)0 : CStringGetTextDatum(stmt->event_comment_str);
+ ev_arg->arg[ARG_7] = CStringGetTextDatum("REGULAR");
+ ev_arg->arg[ARG_8] = (Datum)0;
+ ev_arg->arg[ARG_9] = (Datum)0;
+ ev_arg->arg[ARG_10] = CharGetDatum(JOB_INTYPE_PLAIN);
+ ev_arg->arg[ARG_11] = TimeStampToText(start_time);
+ ev_arg->arg[ARG_12] = interval_time;
+ ev_arg->arg[ARG_13] = TimeStampToText(end_time);
+ ev_arg->arg[ARG_14] = job_action;
+ ev_arg->arg[ARG_15] = job_type;
+ ev_arg->arg[ARG_16] = definer;
+ ev_arg->arg[ARG_17] = schemaName;
+ ev_arg->arg[ARG_18] = CStringGetTextDatum(job_definer_oid);
+}
+
+void CreateEventCommand(CreateEventStmt *stmt)
+{
+ Datum schema_name = (stmt->event_name->schemaname)
+ ? CStringGetDatum(stmt->event_name->schemaname)
+ : DirectFunctionCall1(namein, CStringGetDatum(get_real_search_schema()));
+ Datum ev_name = CStringGetTextDatum(stmt->event_name->relname);
+
+ FunctionCallInfoData ev_arg;
+ const short nrgs_job = ARG_19;
+
+ if (CheckEventExists(ev_name, stmt->if_not_exists)) {
+ return;
+ }
+
+ InitFunctionCallInfoData(ev_arg, NULL, nrgs_job, InvalidOid, NULL, NULL);
+ errno_t rc = memset_s(ev_arg.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum));
+ securec_check(rc, "\0", "\0");
+ rc = memset_s(ev_arg.argnull, nrgs_job * sizeof(bool), 0, nrgs_job * sizeof(bool));
+ securec_check(rc, "\0", "\0");
+
+ /* Obtains the event parameter. */
+ PrepareFuncArg(stmt, ev_name, schema_name, &ev_arg);
+
+ create_job_raw(&ev_arg);
+}
+
+Datum GetInlineJobName(Datum ev_name)
+{
+ errno_t rc;
+ char *c_program_name = (char *)palloc(sizeof(char) * MAX_JOB_NAME_LEN);
+ rc = strcpy_s(c_program_name, MAX_JOB_NAME_LEN, INLINE_JOB_PROGRAM_PREFIX);
+ securec_check(rc, "\0", "\0");
+ rc = strcat_s(c_program_name, MAX_JOB_NAME_LEN, TextDatumGetCString(ev_name));
+ securec_check(rc, "\0", "\0");
+ return CStringGetTextDatum(c_program_name);
+}
+
+void UpdateMultiAttribute(Datum ev_name, Datum attValue, Datum new_name, Datum att_name)
+{
+ Datum values[Natts_gs_job_attribute] = {0};
+ bool nulls[Natts_gs_job_attribute] = {0};
+ bool replaces[Natts_gs_job_attribute] = {0};
+
+ replaces[Anum_gs_job_attribute_attribute_value - 1] = true;
+ values[Anum_gs_job_attribute_attribute_value - 1] = attValue;
+ nulls[Anum_gs_job_attribute_attribute_value - 1] = false;
+ replaces[Anum_gs_job_attribute_job_name - 1] = true;
+ values[Anum_gs_job_attribute_job_name - 1] = new_name;
+ nulls[Anum_gs_job_attribute_job_name - 1] = false;
+ Relation gs_job_attribute_rel = heap_open(GsJobAttributeRelationId, RowExclusiveLock);
+ HeapTuple oldtuple = SearchSysCache2(JOBATTRIBUTENAME, ev_name, att_name);
+ if (oldtuple == NULL) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("Fail to update attribute."),
+ errdetail("Attribute entry %s not found.", TextDatumGetCString(att_name)),
+ errcause("attribute is not exist"), erraction("Please check object_name")));
+ }
+ HeapTuple newtuple = heap_modify_tuple(oldtuple, RelationGetDescr(gs_job_attribute_rel), values, nulls, replaces);
+ simple_heap_update(gs_job_attribute_rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(gs_job_attribute_rel, newtuple);
+ heap_close(gs_job_attribute_rel, NoLock);
+ ReleaseSysCache(oldtuple);
+ heap_freetuple_ext(newtuple);
+}
+
+/* Update the job_name and attribute_value columns in the gs_job_attribute table. */
+void UpdateAttributeMultiColum(Datum ev_name, char *attribute_name[], int attr_pos, DefElem *para_def,
+ Datum new_name_result)
+{
+ attribute_name[attr_pos] = para_def->defname;
+ Datum arg_result = TranslateArg(para_def->defname, para_def->arg);
+ if (!strcmp(para_def->defname, "owner_default")) {
+ UpdateMultiAttribute(ev_name, arg_result, new_name_result, CStringGetTextDatum("owner"));
+ } else {
+ UpdateMultiAttribute(ev_name, arg_result, new_name_result, CStringGetTextDatum(para_def->defname));
+ }
+}
+
+/* Update the attribute_value columns in the gs_job_attribute table. */
+void UpdateSingleAttribute(Datum ev_name, DefElem *para_def)
+{
+ Datum arg_result = TranslateArg(para_def->defname, para_def->arg);
+ Datum att_name = strcmp(para_def->defname, "owner_default") ? CStringGetTextDatum(para_def->defname)
+ : CStringGetTextDatum("owner");
+ update_attribute(ev_name, att_name, arg_result);
+}
+
+void UpdateMultiRows(AlterEventStmt *stmt, int attr_pos, char *attribute_name[], Datum ev_name)
+{
+ Datum values[Natts_gs_job_attribute] = {0};
+ bool nulls[Natts_gs_job_attribute] = {0};
+ bool replaces[Natts_gs_job_attribute] = {0};
+
+ Datum arg_result = TranslateArg(stmt->new_name->defname, (Node *)stmt->new_name->arg);
+ replaces[Anum_gs_job_attribute_job_name - 1] = true;
+ values[Anum_gs_job_attribute_job_name - 1] = arg_result;
+ nulls[Anum_gs_job_attribute_job_name - 1] = false;
+ Relation gs_job_attribute_rel = heap_open(GsJobAttributeRelationId, RowExclusiveLock);
+ CatCList *list = SearchSysCacheList1(JOBATTRIBUTENAME, ev_name);
+ bool is_null = false;
+ Datum enattr;
+ char *enattname;
+ bool is_repeat = false;
+ for (int i = 0; i < list->n_members; i++) {
+ HeapTuple enum_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i);
+ enattr = heap_getattr(enum_tup, Anum_gs_job_attribute_attribute_name, RelationGetDescr(gs_job_attribute_rel),
+ &is_null);
+ enattname = TextDatumGetCString(enattr);
+ if (strcmp(enattname, "program_name") == 0) {
+ replaces[Anum_gs_job_attribute_attribute_value - 1] = true;
+ values[Anum_gs_job_attribute_attribute_value - 1] = GetInlineJobName(arg_result);
+ nulls[Anum_gs_job_attribute_attribute_value - 1] = false;
+ }
+ if (strcmp(enattname, "schedule_name") == 0) {
+ replaces[Anum_gs_job_attribute_attribute_value - 1] = true;
+ values[Anum_gs_job_attribute_attribute_value - 1] =
+ CStringGetTextDatum(get_inline_schedule_name(arg_result));
+ nulls[Anum_gs_job_attribute_attribute_value - 1] = false;
+ }
+ for (int j = 0; j < attr_pos; ++j) {
+ if (!strcmp(attribute_name[j], "owner_default")) {
+ attribute_name[j] = "owner";
+ };
+ if (strcmp(enattname, attribute_name[j]) == 0) {
+ is_repeat = true;
+ break;
+ }
+ }
+ if (is_repeat) {
+ is_repeat = false;
+ continue;
+ }
+ HeapTuple old_tup = enum_tup;
+ enum_tup = heap_copytuple(old_tup);
+
+ HeapTuple newtuple =
+ heap_modify_tuple(enum_tup, RelationGetDescr(gs_job_attribute_rel), values, nulls, replaces);
+ if (strcmp(enattname, "program_name") == 0) {
+ replaces[Anum_gs_job_attribute_attribute_value - 1] = false;
+ values[Anum_gs_job_attribute_attribute_value - 1] = (Datum)0;
+ nulls[Anum_gs_job_attribute_attribute_value - 1] = true;
+ }
+ if (strcmp(enattname, "schedule_name") == 0) {
+ replaces[Anum_gs_job_attribute_attribute_value - 1] = false;
+ values[Anum_gs_job_attribute_attribute_value - 1] = (Datum)0;
+ nulls[Anum_gs_job_attribute_attribute_value - 1] = true;
+ }
+ simple_heap_update(gs_job_attribute_rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(gs_job_attribute_rel, newtuple);
+ }
+ ReleaseSysCacheList(list);
+
+ list = SearchSysCacheList1(JOBATTRIBUTENAME, GetInlineJobName(ev_name));
+ values[Anum_gs_job_attribute_job_name - 1] = GetInlineJobName(arg_result);
+
+ for (int i = 0; i < list->n_members; i++) {
+ HeapTuple enum_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i);
+ enattr = heap_getattr(enum_tup, Anum_gs_job_attribute_attribute_name, RelationGetDescr(gs_job_attribute_rel),
+ &is_null);
+ enattname = TextDatumGetCString(enattr);
+ for (int j = 0; j < attr_pos; ++j) {
+ if (!strcmp(attribute_name[j], "owner_default")) {
+ attribute_name[j] = "owner";
+ };
+ if (strcmp(enattname, attribute_name[j]) == 0 && strcmp(enattname, "comments") != 0) {
+ is_repeat = true;
+ break;
+ }
+ }
+ if (is_repeat) {
+ is_repeat = false;
+ continue;
+ }
+ HeapTuple old_tup = enum_tup;
+ enum_tup = heap_copytuple(old_tup);
+
+ HeapTuple newtuple =
+ heap_modify_tuple(enum_tup, RelationGetDescr(gs_job_attribute_rel), values, nulls, replaces);
+ simple_heap_update(gs_job_attribute_rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(gs_job_attribute_rel, newtuple);
+ }
+ ReleaseSysCacheList(list);
+ heap_close(gs_job_attribute_rel, RowExclusiveLock);
+}
+
+void UpdateAttributeParam(AlterEventStmt *stmt, Datum ev_name)
+{
+ bool need_rename = (stmt->new_name == NULL) ? false : true;
+ Datum new_name_result = (Datum)0;
+ if (need_rename) {
+ new_name_result = TranslateArg(stmt->new_name->defname, stmt->new_name->arg);
+ }
+
+ const int attribute_num = ARG_17;
+ int attr_pos = 0;
+ char *attribute_name[attribute_num] = {0};
+ Datum inline_name = GetInlineJobName(ev_name);
+ if (stmt->event_comment_str) {
+ if (need_rename) {
+ UpdateAttributeMultiColum(ev_name, attribute_name, attr_pos++, stmt->event_comment_str, new_name_result);
+ } else {
+ UpdateSingleAttribute(ev_name, stmt->event_comment_str);
+ }
+ }
+ if (stmt->complete_preserve) {
+ if (need_rename) {
+ UpdateAttributeMultiColum(ev_name, attribute_name, attr_pos++, stmt->complete_preserve, new_name_result);
+ } else {
+ UpdateSingleAttribute(ev_name, stmt->complete_preserve);
+ }
+ }
+ if (stmt->def_name) {
+ if (need_rename) {
+ UpdateAttributeMultiColum(ev_name, attribute_name, attr_pos++, stmt->def_name, new_name_result);
+ UpdateAttributeMultiColum(inline_name, attribute_name, attr_pos++, stmt->def_name,
+ GetInlineJobName(new_name_result));
+ } else {
+ UpdateSingleAttribute(ev_name, stmt->def_name);
+ UpdateSingleAttribute(inline_name, stmt->def_name);
+ }
+ } else {
+ DefElem* defaute_definer_def = makeDefElem("owner_default", NULL);
+ if (need_rename) {
+ UpdateAttributeMultiColum(ev_name, attribute_name, attr_pos++, defaute_definer_def, new_name_result);
+ UpdateAttributeMultiColum(inline_name, attribute_name, attr_pos++, defaute_definer_def,
+ GetInlineJobName(new_name_result));
+ } else {
+ UpdateSingleAttribute(ev_name, defaute_definer_def);
+ UpdateSingleAttribute(inline_name, defaute_definer_def);
+ }
+ }
+ if (stmt->event_query_str) {
+ if (need_rename) {
+ UpdateAttributeMultiColum(inline_name, attribute_name, attr_pos++, stmt->event_query_str,
+ GetInlineJobName(new_name_result));
+ } else {
+ UpdateSingleAttribute(inline_name, stmt->event_query_str);
+ }
+ }
+ if (need_rename) {
+ /* Update other columns that need to be renamed in the job table. */
+ UpdateMultiRows(stmt, attr_pos, attribute_name, ev_name);
+ }
+}
+
+void UpdateMultiProc(Datum attValue, Datum nameValue, Datum jobName)
+{
+ Datum values[Natts_pg_job_proc] = {0};
+ bool nulls[Natts_pg_job_proc] = {0};
+ bool replaces[Natts_pg_job_proc] = {0};
+
+ Relation rel = heap_open(PgJobProcRelationId, RowExclusiveLock);
+
+ replaces[Anum_pg_job_proc_what - 1] = true;
+ values[Anum_pg_job_proc_what - 1] = attValue;
+ nulls[Anum_pg_job_proc_what - 1] = false;
+ replaces[Anum_pg_job_proc_job_name - 1] = true;
+ values[Anum_pg_job_proc_job_name - 1] = nameValue;
+ nulls[Anum_pg_job_proc_job_name - 1] = false;
+
+ HeapTuple oldtuple = search_from_pg_job_proc(rel, jobName);
+
+ HeapTuple newtuple = heap_modify_tuple(oldtuple, RelationGetDescr(rel), values, nulls, replaces);
+ simple_heap_update(rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(rel, newtuple);
+
+ heap_close(rel, RowExclusiveLock);
+ heap_freetuple_ext(oldtuple);
+ heap_freetuple_ext(newtuple);
+}
+
+void UpdatePgJobProcName(Datum job_name, DefElem *new_name)
+{
+ Datum values[Natts_pg_job_proc] = {0};
+ bool nulls[Natts_pg_job_proc] = {0};
+ bool replaces[Natts_pg_job_proc] = {0};
+
+ Relation rel = heap_open(PgJobProcRelationId, RowExclusiveLock);
+ Datum arg_result = TranslateArg(new_name->defname, new_name->arg);
+ replaces[Anum_pg_job_proc_job_name - 1] = true;
+ values[Anum_pg_job_proc_job_name - 1] = arg_result;
+ nulls[Anum_pg_job_proc_job_name - 1] = false;
+
+ HeapTuple oldtuple = search_from_pg_job_proc(rel, job_name);
+
+ HeapTuple newtuple = heap_modify_tuple(oldtuple, RelationGetDescr(rel), values, nulls, replaces);
+ simple_heap_update(rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(rel, newtuple);
+
+ heap_close(rel, RowExclusiveLock);
+ heap_freetuple_ext(oldtuple);
+ heap_freetuple_ext(newtuple);
+}
+
+void UpdatePgJobProcParam(AlterEventStmt *stmt, Datum ev_name)
+{
+ if (stmt->event_query_str) {
+ Datum arg_result = TranslateArg(stmt->event_query_str->defname, stmt->event_query_str->arg);
+ if (stmt->new_name) {
+ Datum ev_new_name = TranslateArg(stmt->new_name->defname, stmt->new_name->arg);
+ UpdateMultiProc(arg_result, ev_new_name, ev_name);
+ } else {
+ dbe_update_pg_job_proc(arg_result, ev_name);
+ }
+ } else {
+ if (stmt->new_name) {
+ UpdatePgJobProcName(ev_name, stmt->new_name);
+ }
+ }
+}
+
+void UpdateMultiJob(Datum job_name, Datum *values, bool *nulls, bool *replaces)
+{
+ Relation pg_job_rel = heap_open(PgJobRelationId, RowExclusiveLock);
+ HeapTuple oldtuple = search_from_pg_job(pg_job_rel, job_name);
+ if (!HeapTupleIsValid(oldtuple)) {
+ heap_close(pg_job_rel, NoLock);
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE),
+ errmsg("event \"%s\" does not exist", TextDatumGetCString(job_name))));
+ }
+ HeapTuple newtuple = heap_modify_tuple(oldtuple, RelationGetDescr(pg_job_rel), values, nulls, replaces);
+ simple_heap_update(pg_job_rel, &newtuple->t_self, newtuple);
+ CatalogUpdateIndexes(pg_job_rel, newtuple);
+ heap_close(pg_job_rel, NoLock);
+ heap_freetuple_ext(newtuple);
+ heap_freetuple_ext(oldtuple);
+}
+
+void UpdatePgJobParam(AlterEventStmt *stmt, Datum ev_name)
+{
+ Datum arg_result;
+ Datum values[Natts_pg_job] = {0};
+ bool nulls[Natts_pg_job] = {0};
+ bool replaces[Natts_pg_job] = {0};
+
+ Datum definer = DirectFunctionCall1(namein, CStringGetDatum(GetUserNameFromId(GetUserId())));
+ if (stmt->def_name) {
+ Value *definerVal = (Value *)stmt->def_name->arg;
+ definer = DirectFunctionCall1(namein, CStringGetDatum(definerVal->val.str));
+ }
+ values[Anum_pg_job_log_user - 1] = definer;
+ nulls[Anum_pg_job_log_user - 1] = false;
+ replaces[Anum_pg_job_log_user - 1] = true;
+ values[Anum_pg_job_priv_user - 1] = definer;
+ nulls[Anum_pg_job_priv_user - 1] = false;
+ replaces[Anum_pg_job_priv_user - 1] = true;
+ if (stmt->end_time_expr) {
+ arg_result = TranslateArg(stmt->end_time_expr->defname, stmt->end_time_expr->arg);
+ values[Anum_pg_job_end_date - 1] = arg_result;
+ nulls[Anum_pg_job_end_date - 1] = false;
+ replaces[Anum_pg_job_end_date - 1] = true;
+ }
+ if (stmt->start_time_expr) {
+ arg_result = TranslateArg(stmt->start_time_expr->defname, stmt->start_time_expr->arg);
+ values[Anum_pg_job_start_date - 1] = arg_result;
+ nulls[Anum_pg_job_start_date - 1] = false;
+ replaces[Anum_pg_job_start_date - 1] = true;
+ values[Anum_pg_job_next_run_date - 1] = arg_result;
+ nulls[Anum_pg_job_next_run_date - 1] = false;
+ replaces[Anum_pg_job_next_run_date - 1] = true;
+ }
+ if (stmt->interval_time) {
+ arg_result = (stmt->interval_time->arg == NULL)
+ ? CStringGetTextDatum("null")
+ : TranslateArg(stmt->interval_time->defname, stmt->interval_time->arg);
+ values[Anum_pg_job_interval - 1] = arg_result;
+ nulls[Anum_pg_job_interval - 1] = false;
+ replaces[Anum_pg_job_interval - 1] = true;
+ }
+ if (stmt->event_status) {
+ arg_result = TranslateArg(stmt->event_status->defname, stmt->event_status->arg);
+ values[Anum_pg_job_enable - 1] = arg_result;
+ nulls[Anum_pg_job_enable - 1] = false;
+ replaces[Anum_pg_job_enable - 1] = true;
+ }
+ if (stmt->new_name) {
+ Datum schema_name;
+ Datum new_ev_name;
+ RangeVar *event_name_var = (RangeVar *)stmt->new_name->arg;
+ schema_name = (event_name_var->schemaname)
+ ? DirectFunctionCall1(namein, CStringGetDatum(event_name_var->schemaname))
+ : DirectFunctionCall1(namein, CStringGetDatum(get_real_search_schema()));
+ new_ev_name = CStringGetTextDatum(event_name_var->relname);
+ values[Anum_pg_job_nspname - 1] = schema_name;
+ nulls[Anum_pg_job_nspname - 1] = false;
+ replaces[Anum_pg_job_nspname - 1] = true;
+ values[Anum_pg_job_job_name - 1] = new_ev_name;
+ nulls[Anum_pg_job_job_name - 1] = false;
+ replaces[Anum_pg_job_job_name - 1] = true;
+ }
+ UpdateMultiJob(ev_name, values, nulls, replaces);
+}
+
+void AlterEventCommand(AlterEventStmt *stmt)
+{
+ Datum ev_name = CStringGetTextDatum(stmt->event_name->relname);
+
+ /* Check if object is visible for current user. */
+ check_object_is_visible(ev_name, false);
+ if (stmt->def_name) {
+ if (!superuser()) {
+ ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("The current user does not have sufficient permissions to specify the definer.")));
+ }
+ }
+
+ UpdateAttributeParam(stmt, ev_name);
+ UpdatePgJobProcParam(stmt, ev_name);
+ UpdatePgJobParam(stmt, ev_name);
+}
+
+void DropEventCommand(DropEventStmt *stmt)
+{
+ Datum ev_name = CStringGetTextDatum(stmt->event_name->relname);
+ if (CheckEventNotExists(ev_name, stmt->missing_ok)) {
+ return;
+ }
+
+ FunctionCallInfoData ev_arg;
+ const short nrgs_job = ARG_3;
+ InitFunctionCallInfoData(ev_arg, NULL, nrgs_job, InvalidOid, NULL, NULL);
+ errno_t rc = memset_s(ev_arg.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum));
+ securec_check(rc, "\0", "\0");
+ rc = memset_s(ev_arg.argnull, nrgs_job * sizeof(bool), 0, nrgs_job * sizeof(bool));
+ securec_check(rc, "\0", "\0");
+ ev_arg.arg[ARG_0] = ev_name;
+ ev_arg.arg[ARG_1] = BoolGetDatum(0);
+ ev_arg.arg[ARG_2] = BoolGetDatum(0);
+ drop_single_job_internal(&ev_arg);
+}
+
+StmtResult *SearchEventInfo(ShowEventStmt *stmt)
+{
+ StringInfoData buf;
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "SELECT ");
+ appendStringInfo(&buf,
+ "job_name,nspname,log_user,priv_user,job_status,start_date,interval,end_date,enable,failure_msg ");
+ appendStringInfo(&buf, "FROM PG_JOB ");
+
+ /* Concatenate where clause */
+ appendStringInfo(&buf, "WHERE dbname=\'%s\' AND ", get_database_name(u_sess->proc_cxt.MyDatabaseId));
+ if (stmt->from_clause) {
+ A_Const *fc = (A_Const *)stmt->from_clause;
+ appendStringInfo(&buf, "nspname=\'%s\' ", (char *)fc->val.val.str);
+ } else {
+ char *schema_name = get_real_search_schema();
+ appendStringInfo(&buf, "nspname=\'%s\' ", schema_name);
+ }
+ if (stmt->where_clause) {
+ appendStringInfo(&buf, " AND ");
+ appendStringInfo(&buf, " %s ", stmt->where_clause);
+ }
+
+ return execute_stmt(buf.data, true);
+}
+
+void ShowEventCommand(ShowEventStmt *stmt, DestReceiver *dest)
+{
+ TupOutputState *tstate = NULL;
+ TupleDesc tupdesc;
+ Datum values[SHOW_EVENT_SIZE] = {0};
+ bool isnull[SHOW_EVENT_SIZE] = {false};
+
+ /* need a tuple descriptor representing three TEXT columns */
+ tupdesc = CreateTemplateTupleDesc(SHOW_EVENT_SIZE, false, TableAmHeap);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_1, "job_name", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_2, "schema_name", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_3, "log_user", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_4, "priv_user", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_5, "job_status", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_6, "start_date", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_7, "interval", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_8, "end_date", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_9, "enable", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_10, "failure_msg", TEXTOID, -1, 0);
+
+ /* prepare for projection of tuples */
+ tstate = begin_tup_output_tupdesc(dest, tupdesc);
+
+ /* Create a buffer to store the select statement. */
+ StmtResult *res = SearchEventInfo(stmt);
+
+ List *eventList = res->tuples;
+ ListCell *eventCell = NULL;
+
+ foreach (eventCell, eventList) {
+ List *eventTuple = (List *)eventCell->data.ptr_value;
+ ListCell *eventDesc = NULL;
+ char *ival;
+ int eventPos = 0;
+ foreach (eventDesc, eventTuple) {
+ ival = (char *)eventDesc->data.ptr_value;
+ values[eventPos++] = PointerGetDatum(cstring_to_text(ival));
+ }
+ do_tup_output(tstate, values, SHOW_EVENT_SIZE, isnull, SHOW_EVENT_SIZE);
+ for (int i = 0; i < SHOW_EVENT_SIZE; ++i) {
+ pfree(DatumGetPointer(values[i]));
+ }
+ }
+ end_tup_output(tstate);
+}
+
+TupleDesc GetEventResultDesc()
+{
+ TupleDesc tupdesc;
+ /* need a tuple descriptor representing ten TEXT columns */
+ tupdesc = CreateTemplateTupleDesc(SHOW_EVENT_SIZE, false, TableAmHeap);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_1, "job_name", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_2, "schema_name", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_3, "log_user", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_4, "priv_user", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_5, "job_status", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_6, "start_date", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_7, "interval", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_8, "end_date", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_9, "enable", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber)ARG_10, "failure_msg", TEXTOID, -1, 0);
+
+ return tupdesc;
+}
diff --git a/src/gausskernel/optimizer/commands/explain.cpp b/src/gausskernel/optimizer/commands/explain.cpp
index 6c24f617a..4c985481b 100755
--- a/src/gausskernel/optimizer/commands/explain.cpp
+++ b/src/gausskernel/optimizer/commands/explain.cpp
@@ -772,11 +772,18 @@ void ExplainOneUtility(
*/
static void ExecRemoteprocessPlan(EState* estate)
{
+#ifdef ENABLE_MULTIPLE_NODES
ListCell* lc = NULL;
foreach (lc, estate->es_remotequerystates) {
PlanState* ps = (PlanState*)lfirst(lc);
ExecEndRemoteQuery((RemoteQueryState*)ps, true);
}
+#else
+ if (u_sess->stream_cxt.global_obj) {
+ u_sess->stream_cxt.global_obj->SigStreamThreadClose();
+ StreamNodeGroup::syncQuit(STREAM_COMPLETE);
+ }
+#endif
}
/*
@@ -1636,8 +1643,6 @@ static StringInfo get_subpartition_pruning_info(Scan* scanplan, List* rtable)
RangeTblEntry* rte = rt_fetch(scanplan->scanrelid, rtable);
Relation rel = heap_open(rte->relid, NoLock);
List* subpartList = RelationGetSubPartitionOidListList(rel);
- /* Concurrent DDL may change the partition map. Do not check for 'ALL' if it is known to be misarranged */
- bool checkAll = (pr->partMap != NULL) && (getPartitionNumber(pr->partMap) == list_length(subpartList));
int idx = 0;
foreach (lc, pr->ls_selectedSubPartitions) {
@@ -1648,7 +1653,10 @@ static StringInfo get_subpartition_pruning_info(Scan* scanplan, List* rtable)
SubPartitionPruningResult* spr = (SubPartitionPruningResult*)lfirst(lc);
/* check if all subpartition is selected */
int selected = list_length(spr->ls_selectedSubPartitions);
- if (checkAll) {
+ /* the partseq may be dislocationed if parallel DDL commits, even out of range */
+ if (spr->partSeq >= list_length(subpartList)) {
+ all = false;
+ } else {
int count = list_length((List*)list_nth(subpartList, spr->partSeq));
all &= (selected == count);
}
@@ -1659,7 +1667,7 @@ static StringInfo get_subpartition_pruning_info(Scan* scanplan, List* rtable)
}
}
- if (checkAll && all) {
+ if (all) {
resetStringInfo(strif);
appendStringInfo(strif, "ALL");
}
diff --git a/src/gausskernel/optimizer/commands/extension.cpp b/src/gausskernel/optimizer/commands/extension.cpp
index fb3a0cbe4..2521bfad0 100644
--- a/src/gausskernel/optimizer/commands/extension.cpp
+++ b/src/gausskernel/optimizer/commands/extension.cpp
@@ -1353,6 +1353,7 @@ void CreateExtension(CreateExtensionStmt* stmt)
csstmt->schemaname = schemaName;
csstmt->authid = NULL; /* will be created by current user */
csstmt->schemaElts = NIL;
+ csstmt->charset = PG_INVALID_ENCODING;
#ifdef PGXC
CreateSchemaCommand(csstmt, NULL, true);
#else
diff --git a/src/gausskernel/optimizer/commands/foreigncmds.cpp b/src/gausskernel/optimizer/commands/foreigncmds.cpp
index 62fc36309..d7125c39e 100644
--- a/src/gausskernel/optimizer/commands/foreigncmds.cpp
+++ b/src/gausskernel/optimizer/commands/foreigncmds.cpp
@@ -1822,6 +1822,7 @@ void CreateForeignTable(CreateForeignTableStmt* stmt, Oid relid)
u_sess->upg_cxt.binary_upgrade_next_etbl_toast_pg_class_oid = 0;
}
+ errorStmt->charset = PG_INVALID_ENCODING;
errorStmt->relation =
makeRangeVar(schemaname, pstrdup(error_relation->relname), ((CreateStmt*)stmt)->relation->location);
col = makeColumnDef("nodeid", "int4");
diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp
index 0c63f576b..d670bd794 100644
--- a/src/gausskernel/optimizer/commands/indexcmds.cpp
+++ b/src/gausskernel/optimizer/commands/indexcmds.cpp
@@ -337,11 +337,11 @@ static void CheckPartitionUniqueKey(Relation rel, int2vector *partKey, IndexStmt
}
}
-static void CheckPartitionIndexDef(IndexStmt* stmt, List *partitionTableList)
+static void CheckPartitionIndexDef(IndexStmt* stmt, List *partitionidlist)
{
List *partitionIndexdef = (List*)stmt->partClause;
- int partitionLens = list_length(partitionTableList);
+ int partitionLens = list_length(partitionidlist);
int idfLens = list_length(partitionIndexdef);
if (partitionLens > idfLens) {
@@ -361,8 +361,7 @@ static void CheckPartitionIndexDef(IndexStmt* stmt, List *partitionTableList)
/*
* Extract SubPartitionIdfs when CREATE INDEX with subpartitions.
*/
-static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *partitionList,
- List *subPartitionList, List *partitionIndexdef)
+static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *subPartitionOidList, List *partitionIndexdef)
{
ListCell *lc1 = NULL;
ListCell *lc2 = NULL;
@@ -375,16 +374,31 @@ static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *partitionList,
int partitionLen = list_length(partitionIndexdef);
/* Fast check partition length */
- if (partitionLen != partitionList->length) {
+ if (partitionLen != list_length(subPartitionOidList)) {
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("Wrong number of partitions when create index specify subpartition.")));
}
+ /* if part contain tablespace, and subpart has no tablespace definition, set subpart tablespace */
+ foreach(lc1, partitionIndexdef) {
+ RangePartitionindexDefState *idxPartdef = (RangePartitionindexDefState*)lfirst(lc1);
+ if (idxPartdef->tablespace == NULL) {
+ continue;
+ }
+
+ foreach(lc2, idxPartdef->sublist) {
+ RangePartitionindexDefState *idxSubPartdef = (RangePartitionindexDefState*)lfirst(lc2);
+ if (idxSubPartdef->tablespace == NULL) {
+ idxSubPartdef->tablespace = strdup(idxPartdef->tablespace);
+ }
+ }
+ }
+
/* Next check specify subpartition with metadata in pg_partition */
- foreach(lc1, subPartitionList) {
- List *subPartitions = (List *)lfirst(lc1);
- int subLens = list_length(subPartitions);
+ foreach(lc1, subPartitionOidList) {
+ List *subPartOids = (List *)lfirst(lc1);
+ int subLens = list_length(subPartOids);
foreach(lc2, partitionIndexdef) {
RangePartitionindexDefState *idxDef = (RangePartitionindexDefState*)lfirst(lc2);
@@ -397,7 +411,7 @@ static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *partitionList,
}
}
- expectedSubLens += subPartitions->length;
+ expectedSubLens += list_length(subPartOids);
}
/* Fail exactly match if partitionIndexdef */
@@ -642,7 +656,7 @@ static bool parseVisibleStateFromOptions(List* options)
* Returns the OID of the created index.
*/
Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_alter_table, bool check_rights,
- bool skip_build, bool quiet)
+ bool skip_build, bool quiet, bool is_modify_primary)
{
char* indexRelationName = NULL;
char* accessMethodName = NULL;
@@ -672,7 +686,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
LOCKMODE lockmode;
Snapshot snapshot;
int i = 0;
- List* partitionTableList = NIL;
List* partitionIndexdef = NIL;
List* partitiontspList = NIL;
char relPersistence;
@@ -682,7 +695,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
int crossbucketopt = -1;
List *subPartTspList = NULL;
List *subPartitionIndexDef = NULL;
- List *subPartitionTupleList = NULL;
List *subPartitionOidList = NULL;
List *partitionOidList = NULL;
Oid root_save_userid;
@@ -981,106 +993,50 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
errmsg("It is not supported to create index on DFS tablespace.")));
}
+ if (RELATION_IS_PARTITIONED(rel)) {
+ partitionOidList = relationGetPartitionOidList(rel);
+ }
+ if (RelationIsSubPartitioned(rel)) {
+ subPartitionOidList = RelationGetSubPartitionOidListList(rel);
+ }
+
/* Check permissions except when using database's default */
if (stmt->isPartitioned && !stmt->isGlobal) {
/* LOCAL partition index check */
ListCell* cell = NULL;
- /* Get the partition tuples in order by inserted time. */
- partitionTableList =
- searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relationId, BackwardScanDirection);
-
- if (!PointerIsValid(partitionTableList)) {
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("when creating partitioned index, get table partitions failed")));
- }
-
- if (RelationIsSubPartitioned(rel)) {
- subPartitionTupleList = searchPgSubPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- partitionTableList, BackwardScanDirection);
- }
-
if (PointerIsValid(stmt->partClause)) {
if (RelationIsSubPartitioned(rel)) {
- ListCell* lc1 = NULL;
- ListCell* lc2 = NULL;
- List* subPartitions = NIL;
-
partitionIndexdef = (List*)stmt->partClause;
- subPartitionIndexDef = ExtractSubPartitionIdf(stmt,
- partitionTableList,
- subPartitionTupleList,
- partitionIndexdef);
-
- /* Fill partitionOidList */
- foreach (lc1, partitionTableList) {
- HeapTuple tuple = (HeapTuple)lfirst(lc1);
- partitionOidList = lappend_oid(partitionOidList, HeapTupleGetOid(tuple));
- }
-
- /* Fill subPartitionOidList */
- foreach (lc1, subPartitionTupleList) {
- subPartitions = (List*)lfirst(lc1);
-
- List* subPartitionOids = NIL;
- foreach (lc2, subPartitions) {
- HeapTuple tuple = (HeapTuple)lfirst(lc2);
- subPartitionOids = lappend_oid(subPartitionOids, HeapTupleGetOid(tuple));
- }
- subPartitionOidList = lappend(subPartitionOidList, subPartitionOids);
- }
+ subPartitionIndexDef = ExtractSubPartitionIdf(stmt, subPartitionOidList, partitionIndexdef);
} else {
partitionIndexdef = (List*)stmt->partClause;
/* index partition's number must no less than table partition's number */
- CheckPartitionIndexDef(stmt, partitionTableList);
+ CheckPartitionIndexDef(stmt, partitionOidList);
}
} else {
if (!RelationIsSubPartitioned(rel)) {
/* construct the index list */
- for (i = 0; i < partitionTableList->length; i++) {
+ for (i = 0; i < list_length(partitionOidList); i++) {
RangePartitionindexDefState* def = makeNode(RangePartitionindexDefState);
partitionIndexdef = lappend(partitionIndexdef, def);
}
} else {
- int j = 0;
/* construct the index list */
- foreach (cell, subPartitionTupleList) {
- List *sub = (List *)lfirst(cell);
+ foreach (cell, subPartitionOidList) {
+ List *subPartOids = (List *)lfirst(cell);
List *partSubIndexDef = NULL;
- for (j = 0; j < sub->length; j++) {
+ for (i = 0; i < list_length(subPartOids); i++) {
RangePartitionindexDefState *def = makeNode(RangePartitionindexDefState);
partSubIndexDef = lappend(partSubIndexDef, def);
}
subPartitionIndexDef = lappend(subPartitionIndexDef, partSubIndexDef);
}
- foreach (cell, partitionTableList) {
- HeapTuple tuple = (HeapTuple)lfirst(cell);
- Oid partOid = HeapTupleGetOid(tuple);
- partitionOidList = lappend_oid(partitionOidList, partOid);
- }
- foreach (cell, subPartitionTupleList) {
- List* subPartTuples = (List*)lfirst(cell);
- ListCell* lc = NULL;
- List* subPartOids = NIL;
- foreach (lc, subPartTuples) {
- HeapTuple tuple = (HeapTuple)lfirst(lc);
- Oid subPartOid = HeapTupleGetOid(tuple);
- subPartOids = lappend_oid(subPartOids, subPartOid);
- }
- subPartitionOidList = lappend(subPartitionOidList, subPartOids);
- }
}
}
- freePartList(partitionTableList);
-
- if (subPartitionTupleList != NULL) {
- freeSubPartList(subPartitionTupleList);
- }
-
if (!RelationIsSubPartitioned(rel)) {
foreach (cell, partitionIndexdef) {
RangePartitionindexDefState* def = (RangePartitionindexDefState*)lfirst(cell);
@@ -1094,12 +1050,12 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
}
}
} else {
- for (i = 0; i < subPartitionIndexDef->length; i++) {
- List *sub = (List *)list_nth(subPartitionIndexDef, i);
- partitiontspList = NULL;
- foreach (cell, sub)
- {
- RangePartitionindexDefState* def = (RangePartitionindexDefState*)lfirst(cell);
+ foreach(cell, subPartitionIndexDef) {
+ List *sub = (List *)lfirst(cell);
+ ListCell *subcell;
+ partitiontspList = NIL;
+ foreach (subcell, sub) {
+ RangePartitionindexDefState* def = (RangePartitionindexDefState*)lfirst(subcell);
if (NULL != def->tablespace) {
/* use partition tablespace if user defines */
partitiontspList = lappend_oid(partitiontspList, get_tablespace_oid(def->tablespace, false));
@@ -1428,7 +1384,7 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
* If be informational constraint, we are not to set not null in pg_attribute.
*/
if (stmt->primary && !stmt->internal_flag)
- index_check_primary_key(rel, indexInfo, is_alter_table);
+ index_check_primary_key(rel, indexInfo, is_alter_table, is_modify_primary);
/*
* Report index creation if appropriate (delay this till after most of the
@@ -1632,6 +1588,7 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
ListCell* tspcell = NULL;
ListCell* indexcell = NULL;
ListCell* partitioncell = NULL;
+ ListCell* partoidcell = NULL;
Oid partitionid = InvalidOid;
Oid partIndexFileNode = InvalidOid;
PartIndexCreateExtraArgs partExtra;
@@ -1640,7 +1597,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
Partition partition = NULL;
Oid partitiontspid = InvalidOid;
RangePartitionindexDefState* indexdef = NULL;
- List* partitionidlist = NIL;
Oid toastid = InvalidOid;
Relation pg_partition_rel = NULL;
int indexnum = 0;
@@ -1653,8 +1609,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("fail to get index info when create index partition")));
}
-
- partitionidlist = relationGetPartitionOidList(rel);
} else {
if (!PointerIsValid(subPartitionIndexDef)) {
ereport(ERROR,
@@ -1668,7 +1622,7 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
oldMemContext = MemoryContextSwitchTo(partitionIndexMemContext);
if (!RelationIsSubPartitioned(rel)) {
- forthree(tspcell, partitiontspList, indexcell, partitionIndexdef, partitioncell, partitionidlist)
+ forthree(tspcell, partitiontspList, indexcell, partitionIndexdef, partitioncell, partitionOidList)
{
partitiontspid = lfirst_oid(tspcell);
indexdef = (RangePartitionindexDefState*)lfirst(indexcell);
@@ -1734,15 +1688,14 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
ListCell *subTspCell = NULL;
ListCell *subIndexCell = NULL;
ListCell *subPartCell = NULL;
- int partIdx = 0;
- forthree(subTspCell, subPartTspList, subIndexCell, subPartitionIndexDef, subPartCell,
- subPartitionOidList)
+ forfour(subTspCell, subPartTspList, subIndexCell, subPartitionIndexDef, subPartCell,
+ subPartitionOidList, partoidcell, partitionOidList)
{
partitiontspList = (List *)lfirst(subTspCell);
partitionIndexdef = (List *)lfirst(subIndexCell);
List *subpartitionidlist = (List *)lfirst(subPartCell);
+ Oid partid = lfirst_oid(partoidcell);
- Oid partid = list_nth_oid(partitionOidList, partIdx++);
Partition p = partitionOpen(rel, partid, ShareLock);
Relation partRel = partitionGetRelation(rel, p);
@@ -1805,17 +1758,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
releaseDummyRelation(&partRel);
partitionClose(rel, p, ShareLock);
}
- if (subPartitionOidList != NULL) {
- ListCell* lc = NULL;
- foreach(lc, subPartitionOidList) {
- List* tmpList = (List*)lfirst(lc);
- list_free_ext(tmpList);
- }
- list_free_ext(subPartitionOidList);
- }
- if (partitionOidList != NULL) {
- list_free_ext(partitionOidList);
- }
}
// delete memory context
@@ -1835,6 +1777,13 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("unsupport partitioned strategy")));
}
+ if (RELATION_IS_PARTITIONED(rel)) {
+ releasePartitionOidList(&partitionOidList);
+ }
+ if (RelationIsSubPartitioned(rel)) {
+ ReleaseSubPartitionOidList(&subPartitionOidList);
+ }
+
/* Roll back any GUC changes executed by index functions. */
AtEOXact_GUC(false, root_save_nestlevel);
@@ -1847,6 +1796,13 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
return indexRelationId;
}
+ if (RELATION_IS_PARTITIONED(rel)) {
+ releasePartitionOidList(&partitionOidList);
+ }
+ if (RelationIsSubPartitioned(rel)) {
+ ReleaseSubPartitionOidList(&subPartitionOidList);
+ }
+
/* Roll back any GUC changes executed by index functions. */
AtEOXact_GUC(false, root_save_nestlevel);
@@ -2999,7 +2955,7 @@ void ReindexIndex(RangeVar* indexRelation, const char* partition_name, AdaptMem*
index_close(irel, NoLock);
if (partition_name != NULL)
- indPartOid = partitionNameGetPartitionOid(indOid,
+ indPartOid = PartitionNameGetPartitionOid(indOid,
partition_name,
PART_OBJ_TYPE_INDEX_PARTITION,
concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock, // lock on index partition
@@ -3209,7 +3165,7 @@ void ReindexTable(RangeVar* relation, const char* partition_name, AdaptMem* mem_
heap_close(rel, NoLock);
TrForbidAccessRbObject(RelationRelationId, heapOid, relation->relname);
- heapPartOid = partitionNameGetPartitionOid(
+ heapPartOid = PartitionNameGetPartitionOid(
heapOid, partition_name, PART_OBJ_TYPE_TABLE_PARTITION, concurrent ? ShareUpdateExclusiveLock : ShareLock, false, false, NULL, NULL, NoLock);
if(concurrent)
ReindexRelationConcurrently(heapOid, heapPartOid, mem_info);
@@ -3264,7 +3220,7 @@ void ReindexInternal(RangeVar* relation, const char* partition_name)
if (partition_name != NULL) {
Oid PartOid;
/* The lock level used here should match reindexPartition(). */
- PartOid = partitionNameGetPartitionOid(heapOid,
+ PartOid = PartitionNameGetPartitionOid(heapOid,
partition_name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessShareLock,
@@ -3713,7 +3669,7 @@ static void prepareReindexTableConcurrently(Oid relationOid, Oid relationPartOid
/* get add interval partition lock, unlock after transaction commit */
if (RelationIsPartitioned(heapRelation) && heapRelation->partMap->type == PART_TYPE_INTERVAL)
- LockRelationForAddIntervalPartition(heapRelation);
+ LockPartitionObject(heapRelation->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
/* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context);
@@ -3862,7 +3818,7 @@ static void prepareReindexIndexConcurrently(Oid relationOid, Oid relationPartOid
/* Add all the valid indexes of relation to list */
if (RelationIsPartitioned(heapRelation) && heapRelation->partMap->type == PART_TYPE_INTERVAL)
- LockRelationForAddIntervalPartition(heapRelation);
+ LockPartitionObject(heapRelation->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
checkIndexForReindexConcurrently(indexRelation, true);
@@ -4781,7 +4737,7 @@ static bool ReindexRelationConcurrently(Oid relationOid, Oid relationPartOid, Ad
/* get add interval partition lock, unlock after transaction commit */
if (RelationIsPartitioned(heapRelation) && heapRelation->partMap->type == PART_TYPE_INTERVAL)
- LockRelationForAddIntervalPartition(heapRelation);
+ LockPartitionObject(heapRelation->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
heap_close(heapRelation, AccessShareLock);
}
@@ -4888,7 +4844,7 @@ void addIndexForPartition(Relation partitionedRelation, Oid partOid)
return;
}
- partition = partitionOpen(partitionedRelation, partOid, ShareLock);
+ partition = partitionOpen(partitionedRelation, partOid, ShareUpdateExclusiveLock);
pg_partition_rel = heap_open(PartitionRelationId, RowExclusiveLock);
foreach (cell, indelist) {
@@ -4991,7 +4947,7 @@ void addIndexForPartition(Relation partitionedRelation, Oid partOid)
#ifndef ENABLE_MULTIPLE_NODES
if (RelationIsCUFormat(partitionedRelation) && indexForm->indisunique) {
if (!PointerIsValid(partitionDelta)) {
- partitionDelta = heap_open(partition->pd_part->reldeltarelid, ShareLock);
+ partitionDelta = heap_open(partition->pd_part->reldeltarelid, ShareUpdateExclusiveLock);
}
char partDeltaIdxName[NAMEDATALEN] = {0};
error_t ret = snprintf_s(partDeltaIdxName, sizeof(partDeltaIdxName),
@@ -5458,7 +5414,7 @@ CheckWhetherForbiddenFunctionalIdx(Oid relationId, Oid namespaceId, List* indexP
/* Currently, there is only one element in the forbidden list.
* Hence we can determine it using the following method briefly.
* */
- if (unlikely(namespaceId == PG_DB4AI_NAMESPACE)) {
+ if (unlikely(namespaceId == PG_DB4AI_NAMESPACE || namespaceId == PG_SNAPSHOT_NAMESPACE)) {
return true;
}
diff --git a/src/gausskernel/optimizer/commands/portalcmds.cpp b/src/gausskernel/optimizer/commands/portalcmds.cpp
index 6a73efc2a..9b1475bed 100644
--- a/src/gausskernel/optimizer/commands/portalcmds.cpp
+++ b/src/gausskernel/optimizer/commands/portalcmds.cpp
@@ -28,6 +28,7 @@
#include "commands/portalcmds.h"
#include "executor/executor.h"
#include "executor/tstoreReceiver.h"
+#include "distributelayer/streamCore.h"
#include "pgxc/execRemote.h"
#include "tcop/pquery.h"
#include "utils/memutils.h"
@@ -288,6 +289,17 @@ void PortalCleanup(Portal portal)
t_thrd.utils_cxt.CurrentResourceOwner = portal->resowner;
ExecutorFinish(queryDesc);
ExecutorEnd(queryDesc);
+#ifndef ENABLE_MULTIPLE_NODES
+ /*
+ * estate is under the queryDesc, and stream threads use it.
+ * we should wait all stream threads exit to cleanup queryDesc.
+ */
+ if (!StreamThreadAmI()) {
+ portal->streamInfo.AttachToSession();
+ StreamNodeGroup::ReleaseStreamGroup(true);
+ portal->streamInfo.Reset();
+ }
+#endif
FreeQueryDesc(queryDesc);
}
PG_CATCH();
diff --git a/src/gausskernel/optimizer/commands/prepare.cpp b/src/gausskernel/optimizer/commands/prepare.cpp
index a83afe0d6..153fb62e4 100755
--- a/src/gausskernel/optimizer/commands/prepare.cpp
+++ b/src/gausskernel/optimizer/commands/prepare.cpp
@@ -240,10 +240,12 @@ void PrepareQuery(PrepareStmt* stmt, const char* queryString)
case CMD_MERGE:
/* OK */
break;
- default:
- if (IsA(query->utilityStmt, VariableMultiSetStmt)) {
+ case CMD_UTILITY:
+ if (IsA(query->utilityStmt, VariableMultiSetStmt) ||
+ IsA(query->utilityStmt, CopyStmt)) {
break;
}
+ default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PSTATEMENT_DEFINITION), errmsg("utility statements cannot be prepared")));
break;
diff --git a/src/gausskernel/optimizer/commands/schemacmds.cpp b/src/gausskernel/optimizer/commands/schemacmds.cpp
index 151f987aa..01cf02690 100644
--- a/src/gausskernel/optimizer/commands/schemacmds.cpp
+++ b/src/gausskernel/optimizer/commands/schemacmds.cpp
@@ -104,6 +104,7 @@ void CreateSchemaCommand(CreateSchemaStmt* stmt, const char* queryString)
int save_sec_context;
AclResult aclresult;
char* queryStringwithinfo = (char*)queryString;
+ Oid coll_oid = InvalidOid;
ObjectAddress address;
GetUserIdAndSecContext(&saved_uid, &save_sec_context);
@@ -236,8 +237,12 @@ void CreateSchemaCommand(CreateSchemaStmt* stmt, const char* queryString)
}
}
+ if (stmt->collate || stmt->charset != PG_INVALID_ENCODING) {
+ coll_oid = transform_default_collation(stmt->collate, stmt->charset);
+ }
+
/* Create the schema's namespace */
- namespaceId = NamespaceCreate(schemaName, owner_uid, false, hasBlockChain);
+ namespaceId = NamespaceCreate(schemaName, owner_uid, false, hasBlockChain, coll_oid);
/* Advance cmd counter to make the namespace visible */
CommandCounterIncrement();
@@ -360,6 +365,8 @@ void AlterSchemaCommand(AlterSchemaStmt* stmt)
Assert(nspName != NULL);
bool withBlockchain = stmt->hasBlockChain;
bool nspIsBlockchain = false;
+ Oid colloid = InvalidOid;
+ Oid nspcollation = InvalidOid;
HeapTuple tup;
Relation rel;
AclResult aclresult;
@@ -398,45 +405,67 @@ void AlterSchemaCommand(AlterSchemaStmt* stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("The system schema \"%s\" doesn't allow to alter to blockchain schema", nspName)));
- /*
- * If the any table exists in the schema, do not change to ledger schema.
- */
- StringInfo existTbl = TableExistInSchema(HeapTupleGetOid(tup), TABLE_TYPE_ANY);
- if (existTbl->len != 0) {
- if (withBlockchain) {
- ereport(ERROR,
- (errcode(ERRCODE_RESERVED_NAME),
- errmsg("It is not supported to change \"%s\" to blockchain schema which includes tables.",
- nspName)));
+
+ Datum new_record[Natts_pg_namespace] = {0};
+ bool new_record_nulls[Natts_pg_namespace] = {false};
+ bool new_record_repl[Natts_pg_namespace] = {false};
+
+ Datum datum;
+ bool is_null = true;
+ /* For B format, the default collation of a schema can be changed. */
+ if (stmt->collate || stmt->charset != PG_INVALID_ENCODING) {
+ colloid = transform_default_collation(stmt->collate, stmt->charset);
+ datum = SysCacheGetAttr(NAMESPACENAME, tup, Anum_pg_namespace_nspcollation, &is_null);
+ if (!is_null) {
+ nspcollation = ObjectIdGetDatum(datum);
+ }
+ if (nspcollation != colloid) {
+ new_record[Anum_pg_namespace_nspcollation - 1] = ObjectIdGetDatum(colloid);
+ new_record_repl[Anum_pg_namespace_nspcollation - 1] = true;
} else {
- ereport(ERROR,
- (errcode(ERRCODE_RESERVED_NAME),
- errmsg("It is not supported to change \"%s\" to normal schema which includes tables.",
- nspName)));
+ heap_close(rel, NoLock);
+ tableam_tops_free_tuple(tup);
+ return;
+ }
+ } else {
+ /*
+ * If the any table exists in the schema, do not change to ledger schema.
+ */
+ StringInfo existTbl = TableExistInSchema(HeapTupleGetOid(tup), TABLE_TYPE_ANY);
+ if (existTbl->len != 0) {
+ if (withBlockchain) {
+ ereport(ERROR,
+ (errcode(ERRCODE_RESERVED_NAME),
+ errmsg("It is not supported to change \"%s\" to blockchain schema which includes tables.",
+ nspName)));
+ } else {
+ ereport(ERROR,
+ (errcode(ERRCODE_RESERVED_NAME),
+ errmsg("It is not supported to change \"%s\" to normal schema which includes tables.",
+ nspName)));
+ }
}
+ /* modify nspblockchain attribute */
+ datum = SysCacheGetAttr(NAMESPACENAME, tup, Anum_pg_namespace_nspblockchain, &is_null);
+ if (!is_null) {
+ nspIsBlockchain = DatumGetBool(datum);
+ }
+ if (nspIsBlockchain != withBlockchain) {
+ new_record[Anum_pg_namespace_nspblockchain - 1] = BoolGetDatum(withBlockchain);
+ new_record_repl[Anum_pg_namespace_nspblockchain - 1] = true;
+ } else {
+ heap_close(rel, NoLock);
+ tableam_tops_free_tuple(tup);
+ return;
+ }
}
- /* modify nspblockchain attribute */
- bool is_null = true;
- Datum datum = SysCacheGetAttr(NAMESPACENAME, tup, Anum_pg_namespace_nspblockchain, &is_null);
- if (!is_null) {
- nspIsBlockchain = DatumGetBool(datum);
- }
- if (nspIsBlockchain != withBlockchain) {
- Datum new_record[Natts_pg_namespace] = {0};
- bool new_record_nulls[Natts_pg_namespace] = {false};
- bool new_record_repl[Natts_pg_namespace] = {false};
-
- new_record[Anum_pg_namespace_nspblockchain - 1] = BoolGetDatum(withBlockchain);
- new_record_repl[Anum_pg_namespace_nspblockchain - 1] = true;
-
- HeapTuple new_tuple = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(rel),
- new_record, new_record_nulls, new_record_repl);
- simple_heap_update(rel, &tup->t_self, new_tuple);
- /* Update indexes */
- CatalogUpdateIndexes(rel, new_tuple);
- }
+ HeapTuple new_tuple = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(rel),
+ new_record, new_record_nulls, new_record_repl);
+ simple_heap_update(rel, &tup->t_self, new_tuple);
+ /* Update indexes */
+ CatalogUpdateIndexes(rel, new_tuple);
heap_close(rel, NoLock);
tableam_tops_free_tuple(tup);
diff --git a/src/gausskernel/optimizer/commands/sequence/sequence.cpp b/src/gausskernel/optimizer/commands/sequence/sequence.cpp
index 784f556d2..fb3ce4f0a 100644
--- a/src/gausskernel/optimizer/commands/sequence/sequence.cpp
+++ b/src/gausskernel/optimizer/commands/sequence/sequence.cpp
@@ -993,6 +993,7 @@ static void DefineSequence(CreateSeqStmt* seq)
stmt->oncommit = ONCOMMIT_NOOP;
stmt->tablespacename = NULL;
stmt->if_not_exists = false;
+ stmt->charset = PG_INVALID_ENCODING;
char rel_kind = large ? RELKIND_LARGE_SEQUENCE : RELKIND_SEQUENCE;
seqoid = DefineRelation(stmt, rel_kind, seq->ownerId);
Assert(seqoid != InvalidOid);
diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp
old mode 100644
new mode 100755
index 59929f0f4..19901aa12
--- a/src/gausskernel/optimizer/commands/tablecmds.cpp
+++ b/src/gausskernel/optimizer/commands/tablecmds.cpp
@@ -58,12 +58,14 @@
#include "catalog/pg_partition_fn.h"
#include "catalog/pg_hashbucket.h"
#include "catalog/pg_hashbucket_fn.h"
+#include "catalog/pg_rewrite.h"
#include "catalog/pg_synonym.h"
#include "catalog/pg_tablespace.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_type.h"
#include "catalog/pg_type_fn.h"
#include "catalog/pg_uid_fn.h"
+#include "catalog/pg_rlspolicy.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "catalog/toasting.h"
@@ -89,6 +91,7 @@
#include "commands/verify.h"
#include "commands/matview.h"
#include "commands/view.h"
+#include "commands/view.h"
#include "executor/executor.h"
#include "executor/node/nodeModifyTable.h"
#include "foreign/fdwapi.h"
@@ -119,6 +122,7 @@
#include "rewrite/rewriteHandler.h"
#include "rewrite/rewriteManip.h"
#include "rewrite/rewriteRlsPolicy.h"
+#include "rewrite/rewriteSupport.h"
#include "replication/slot.h"
#include "storage/buf/bufmgr.h"
#include "storage/freespace.h"
@@ -168,6 +172,7 @@
#include "fmgr.h"
#include "pgstat.h"
#include "postmaster/rbcleaner.h"
+#include "catalog/gs_utf8_collation.h"
#ifdef ENABLE_MULTIPLE_NODES
#include "tsdb/utils/ts_relcache.h"
#include "tsdb/common/ts_tablecmds.h"
@@ -264,6 +269,11 @@ static const char* ORCSupportOption[] = {"orientation", "compression", "version"
#define AT_NUM_PASSES 10
#endif
+typedef struct ViewInfoForAdd {
+ Oid ev_class;
+ char *query_string;
+} ViewInfoForAdd;
+
typedef struct AlteredTableInfo {
/* Information saved before any work commences: */
Oid relid; /* Relation to work on */
@@ -284,6 +294,14 @@ typedef struct AlteredTableInfo {
List* changedIndexOids; /* OIDs of indexes to rebuild */
List* changedIndexDefs; /* string definitions of same */
bool isDeltaTable; /* delta table or not */
+ List* changedGeneratedCols; /* attribute number of generated column to rebuild */
+ List* changedRLSPolicies; /* oid of RLSPolicies to rebuild */
+ List* changedViewOids; /* OIDs of views to rebuild */
+ List* changedViewDefs; /* string definitions of same */
+ List* changedTriggerOids; /* OIDs of triggers to rebuild */
+ List* changedTriggerDefs; /* string definitions of same */
+ bool is_first_after; /* modify first|after and add firs|after */
+ bool is_modify_primary; /* modify column first|after with primary key, we should pre-record AT_SetNotNull */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
@@ -311,6 +329,13 @@ typedef struct NewColumnValue {
ExprState* exprstate; /* execution state */
bool is_generated; /* is it a GENERATED expression? */
bool is_autoinc;
+ bool is_addloc; /* is add column first or after */
+ AttrNumber newattnum; /* is modify column first or after
+ -1 denote add;
+ 0 denote modify without first|after;
+ > 0 denote modify with first|after */
+ char *col_name;
+ AttrNumber generate_attnum;
} NewColumnValue;
/*
@@ -451,6 +476,15 @@ typedef OldToNewChunkIdMappingData* OldToNewChunkIdMapping;
((kind) == RELKIND_VIEW || (kind) == RELKIND_FOREIGN_TABLE || (kind) == RELKIND_SEQUENCE || \
(kind) == RELKIND_COMPOSITE_TYPE || (kind) == RELKIND_STREAM || (kind) == RELKIND_CONTQUERY)
+#define PARTITION_DDL_CMD(cmd) \
+ ((cmd) == AT_AddPartition || (cmd) == AT_AddSubPartition || \
+ (cmd) == AT_DropPartition || (cmd) == AT_DropSubPartition || \
+ (cmd) == AT_ExchangePartition || \
+ (cmd) == AT_TruncatePartition || (cmd) == AT_TruncateSubPartition || \
+ (cmd) == AT_SetPartitionTableSpace || \
+ (cmd) == AT_SplitPartition || (cmd) == AT_SplitSubPartition || \
+ (cmd) == AT_MergePartition)
+
static bool CStoreSupportATCmd(AlterTableType cmdtype);
static bool CStoreSupportConstraint(Constraint* cons);
static List* MergeAttributes(
@@ -521,16 +555,45 @@ static void ExecChangeTableSpaceForRowPartition(AlteredTableInfo*, LOCKMODE);
static void ExecChangeTableSpaceForCStoreTable(AlteredTableInfo*, LOCKMODE);
static void ExecChangeTableSpaceForCStorePartition(AlteredTableInfo*, LOCKMODE);
+static int GetAfterColumnAttnum(Oid attrelid, const char *after_name);
+static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgAttributeFirstAfter(Relation attr_rel, Oid attrelid, int startattnum, int endattnum,
+ bool is_increase);
+static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgConstraintFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgConstraintConfkeyFirstAfter(Relation rel, int startattnum, int endattnum,
+ bool is_increase);
+static void UpdateGenerateColFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdateIndexFirstAfter(Relation rel);
+static void UpdatePgAttrdefFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgDependFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase,
+ bool is_modified, bool *hasPartition);
+static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase);
+static ViewInfoForAdd *GetViewInfoFirstAfter(Relation rel, Oid objid, bool keep_star = false);
+static List *CheckPgRewriteFirstAfter(Relation rel);
+static void ReplaceViewQueryFirstAfter(List *query_str);
+static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum,
+ bool *has_depend);
+static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum);
+static void UpdateAttrdefAdnumFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum,
+ bool *has_default);
+static void UpdatePartitionPartkeyFirstAfter(Oid myrelid, int curattnum, int newattnum);
+static void AlterColumnToFirstAfter(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd,
+ int curattnum);
+static bool CheckIndexIsConstraint(Relation dep_rel, Oid objid, Oid *refobjid);
+
static AlteredTableInfo* ATGetQueueEntry(List** wqueue, Relation rel, bool isDeltaTable = false);
static void ATSimplePermissions(Relation rel, int allowed_targets);
static void ATWrongRelkindError(Relation rel, int allowed_targets);
static void ATSimpleRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, LOCKMODE lockmode);
static void ATTypedTableRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode);
static List* find_typed_table_dependencies(Oid typeOid, const char* typname, DropBehavior behavior);
-static void ATPrepAddColumn(
- List** wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode);
+static void ATPrepAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse,
+ bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode);
static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, ColumnDef* colDef, bool isOid,
- bool recurse, bool recursing, LOCKMODE lockmode);
+ bool recurse, bool recursing, bool is_first, char *after_name, LOCKMODE lockmode);
static void check_for_column_name_collision(Relation rel, const char* colname);
static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid);
static void add_column_collation_dependency(Oid relid, int32 attnum, Oid collid);
@@ -562,8 +625,11 @@ static void ATExecDropConstraint(Relation rel, const char* constrName, DropBehav
bool recursing, bool missing_ok, LOCKMODE lockmode);
static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, bool recursing,
AlterTableCmd* cmd, LOCKMODE lockmode);
+static void ATPrepAlterModifyColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse,
+ bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode);
static bool ATColumnChangeRequiresRewrite(Node* expr, AttrNumber varattno);
static void ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode);
+static void ATExecAlterModifyColumn(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd);
static void ATExecAlterColumnGenericOptions(Relation rel, const char* colName, List* options, LOCKMODE lockmode);
static void ATPostAlterTypeCleanup(List** wqueue, AlteredTableInfo* tab, LOCKMODE lockmode);
static void ATPostAlterTypeParse(
@@ -605,6 +671,11 @@ static void AtExecDeleteNode(Relation rel, List* options);
static void AtExecCopySlice(CatCList* sliceList, Oid tabOid, Relation pgxcSliceRel);
static void AtExecUpdateSliceLike(Relation rel, const RangeVar* refTableName);
static void ATCheckCmd(Relation rel, AlterTableCmd* cmd);
+static void sqlcmd_alter_exec_set_charsetcollate(Relation rel, CharsetCollateOptions* cc, LOCKMODE lockmode);
+static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation rel,
+ AlterTableCmd* cmd, LOCKMODE lockmode);
+static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation rel,
+ CharsetCollateOptions* cc, LOCKMODE lockmode);
static DFSFileType getSetFormatNewFormat(AlterTableCmd* cmd);
static bool checkColumnTypeIsBytea(Relation rel);
static DFSFileType getFormatByDefElem(DefElem* opt);
@@ -640,7 +711,6 @@ static void CheckIntervalPartitionKeyType(FormData_pg_attribute* attrs, List* po
static void CheckIntervalValue(
const FormData_pg_attribute* attrs, const List* pos, const IntervalPartitionDefState* intervalPartDef);
static void CheckPartitionTablespace(const char* spcname, Oid owner);
-static Const* GetListPartitionValue(Form_pg_attribute attrs, List* value, bool partkeyIsFunc = false);
static bool ConfirmTypeInfo(Oid* target_oid, int* target_mod, Const* src, Form_pg_attribute attrs, bool isinterval);
static void ATPrepAddPartition(Relation rel);
@@ -650,8 +720,7 @@ static void ATPrepDropSubPartition(Relation rel);
static void ATPrepUnusableIndexPartition(Relation rel);
static void ATPrepUnusableAllIndexOnPartition(Relation rel);
static void ATExecAddPartition(Relation rel, AddPartitionState *partState);
-static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState);
-static void ATExecAddListPartition(Relation rel, AddPartitionState *partState);
+static void ATExecAddPartitionInternal(Relation rel, AddPartitionState *partState);
static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartState);
static void CheckForAddPartition(Relation rel, List *partDefStateList);
static void CheckForAddSubPartition(Relation rel, Relation partrel, List *subpartDefStateList);
@@ -736,6 +805,8 @@ static void ATPrepExchangePartition(Relation rel);
static void ATPrepMergePartition(Relation rel);
static void ATPrepSplitPartition(Relation rel);
static void ATPrepSplitSubPartition(Relation rel);
+static void ATPrepResetPartitionno(Relation rel);
+static void ATExecResetPartitionno(Relation rel);
static void replaceRepeatChunkId(HTAB* chunkIdHashTable, List* srcPartToastRels);
static bool checkChunkIdRepeat(List* srcPartToastRels, int index, Oid chunkId);
static void addCudescTableForNewPartition(Relation relation, Oid newPartId);
@@ -763,8 +834,11 @@ static void ATCheckDuplicateColumn(const AlterTableCmd* cmd, const List* tabCmds
static void ATCheckNotNullConstr(const AlterTableCmd* cmd, const AlteredTableInfo* tab);
static void DelDependencONDataType(Relation rel, Relation depRel, const Form_pg_attribute attTup);
static void ATExecEncryptionKeyRotation(Relation rel, LOCKMODE lockmode);
-static Datum GetAutoIncrementDatum(Relation rel, TupleDesc desc);
static void CopyTempAutoIncrement(Relation oldrel, Relation newrel);
+static void ATAlterCheckModifiyColumnRepeatedly(const AlterTableCmd* cmd, const List* tab_cmds);
+static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber attnum, Datum* value, bool* is_null);
+static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc);
+static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod);
inline static bool CStoreSupportATCmd(AlterTableType cmdtype)
{
@@ -774,6 +848,7 @@ inline static bool CStoreSupportATCmd(AlterTableType cmdtype)
case AT_ExchangePartition:
case AT_TruncatePartition:
case AT_DropPartition:
+ case AT_ResetPartitionno:
case AT_AddConstraint:
case AT_DropConstraint:
case AT_AddNodeList:
@@ -1745,10 +1820,12 @@ static void add_partiton(CreateStmt* stmt, StdRdOptions* std_opt)
part1 = makeNode(RangePartitionDefState);
part1->partitionName = "default_part_1";
part1->boundary = list_make1(con1);
+ part1->partitionno = 1;
part2 = makeNode(RangePartitionDefState);
part2->partitionName = "default_part_2";
part2->boundary = list_make1(con2);
+ part1->partitionno = 2;
part_state = makeNode(PartitionState);
part_state->partitionStrategy = 'r';
@@ -1893,42 +1970,30 @@ static void CheckPartitionKeyForCreateTable(PartitionState *partTableState, List
list_free_ext(pos);
}
-static List *GetSubpPartitionDefList(PartitionState *partTableState, ListCell *cell)
+static List *GetSubPartitionDefList(PartitionState *partTableState, ListCell *cell)
{
- List *subPartitionList = NIL;
- if (partTableState->partitionStrategy == PART_STRATEGY_RANGE) {
- RangePartitionDefState *subPartitionDefState = (RangePartitionDefState *)lfirst(cell);
- subPartitionList = subPartitionDefState->subPartitionDefState;
- if (subPartitionList == NIL) {
- Const *boundaryDefault = makeNode(Const);
- boundaryDefault->ismaxvalue = true;
- boundaryDefault->location = -1;
+ PartitionDefState *partitionDefState = (PartitionDefState *)lfirst(cell);
+ List *subPartitionList = partitionDefState->subPartitionDefState;
+
+ if (subPartitionList == NIL) {
+ Const *boundaryDefault = makeNode(Const);
+ boundaryDefault->ismaxvalue = true;
+ boundaryDefault->location = -1;
+
+ if (partTableState->partitionStrategy == PART_STRATEGY_RANGE) {
RangePartitionDefState *tmpSubPartitionDefState = makeNode(RangePartitionDefState);
tmpSubPartitionDefState->boundary = list_make1(boundaryDefault);
subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState);
- }
- } else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) {
- ListPartitionDefState *subPartitionDefState = (ListPartitionDefState *)lfirst(cell);
- subPartitionList = subPartitionDefState->subPartitionDefState;
- if (subPartitionList == NIL) {
- Const *boundaryDefault = makeNode(Const);
- boundaryDefault->ismaxvalue = true;
- boundaryDefault->location = -1;
+ } else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) {
ListPartitionDefState *tmpSubPartitionDefState = makeNode(ListPartitionDefState);
tmpSubPartitionDefState->boundary = list_make1(boundaryDefault);
subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState);
- }
- } else if (partTableState->partitionStrategy == PART_STRATEGY_HASH) {
- HashPartitionDefState *subPartitionDefState = (HashPartitionDefState *)lfirst(cell);
- subPartitionList = subPartitionDefState->subPartitionDefState;
- if (subPartitionList == NIL) {
- Const *boundaryDefault = makeNode(Const);
- boundaryDefault->ismaxvalue = true;
- boundaryDefault->location = -1;
+ } else if (partTableState->partitionStrategy == PART_STRATEGY_HASH) {
HashPartitionDefState *tmpSubPartitionDefState = makeNode(HashPartitionDefState);
tmpSubPartitionDefState->boundary = list_make1(boundaryDefault);
subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState);
}
+
}
return subPartitionList;
@@ -2028,6 +2093,8 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
HashBucketInfo* bucketinfo = NULL;
DistributionType distType;
bool relhasuids = false;
+ Oid nspdefcoll = InvalidOid;
+ Oid rel_coll_oid = InvalidOid;
/*
* isalter is true, change the owner of the objects as the owner of the
@@ -2172,6 +2239,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
errmsg("cannot create table under blockchain namspace.")));
}
isInLedgerNsp = IsLedgerNameSpace(namespaceId);
+ nspdefcoll = get_nsp_default_collation(namespaceId);
/*
* Select tablespace to use. If not specified, use default tablespace
@@ -2279,6 +2347,11 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
"specified by a regular table");
}
+ /* relation collation is stored using stmt->options. */
+ if (DB_IS_CMPT(B_FORMAT) && relkind == RELKIND_RELATION) {
+ (void)fill_relation_collation(stmt->collate, stmt->charset, &stmt->options, nspdefcoll);
+ }
+
fillTdeRelOptions(stmt->options, relkind);
reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false);
@@ -2309,6 +2382,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
errdetail("wal_level must >= WAL_LEVEL_ARCHIVE")));
}
}
+ rel_coll_oid = std_opt->collate;
hashbucket = std_opt->hashbucket;
bucketcnt = std_opt->bucketcnt;
storage_type = (std_opt->segment == true) ? SEGMENT_PAGE : HEAP_DISK;
@@ -2566,7 +2640,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
if (relkind == RELKIND_COMPOSITE_TYPE)
descriptor = BuildDescForRelation(schema, orientedFrom, relkind);
else
- descriptor = BuildDescForRelation(schema, orientedFrom);
+ descriptor = BuildDescForRelation(schema, orientedFrom, '\0', rel_coll_oid);
/* Must specify at least one column when creating a table. */
if (descriptor->natts == 0 && relkind != RELKIND_COMPOSITE_TYPE) {
@@ -2580,24 +2654,9 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS)
Assert(list_length(stmt->partTableState->partitionKey) == 1);
Assert(list_length(stmt->partTableState->subPartitionState->partitionKey) == 1);
- ColumnRef *partitionKeyRef = (ColumnRef *)linitial(stmt->partTableState->partitionKey);
- ColumnRef *subPartitionKeyRef =
- (ColumnRef *)linitial(stmt->partTableState->subPartitionState->partitionKey);
- if (IsA(partitionKeyRef,ColumnRef) && IsA(subPartitionKeyRef,ColumnRef)) {
- char *partitonKeyName = ((Value *)linitial(partitionKeyRef->fields))->val.str;
- char *subPartitonKeyName = ((Value *)linitial(subPartitionKeyRef->fields))->val.str;
- if (!strcmp(partitonKeyName, subPartitonKeyName)) {
- ereport(
- ERROR,
- (errmodule(MOD_COMMAND), errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("The two partition keys of a subpartition partition table are the same."), errdetail("N/A"),
- errcause("The two partition keys of a subpartition partition table cannot be the same."),
- erraction("Partition keys cannot be the same column.")));
- }
- }
foreach (cell, stmt->partTableState->partitionList) {
stmt->partTableState->subPartitionState->partitionList =
- GetSubpPartitionDefList(stmt->partTableState, cell);
+ GetSubPartitionDefList(stmt->partTableState, cell);
CheckPartitionKeyForCreateTable(stmt->partTableState->subPartitionState, schema, descriptor);
}
stmt->partTableState->subPartitionState->partitionList = NIL;
@@ -6428,6 +6487,10 @@ void RenameRelation(RenameStmt* stmt)
RenameTableFeature(stmt);
} else {
Oid relid;
+ HeapTuple tuple;
+ Datum name;
+ bool isnull = false;
+ char *relname = NULL;
/*
* Grab an exclusive lock on the target table, index, sequence or view,
@@ -6450,6 +6513,20 @@ void RenameRelation(RenameStmt* stmt)
return;
}
+ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(tuple)) {
+ ereport(ERROR,
+ (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid)));
+ }
+ name = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relname, &isnull);
+ Assert(!isnull);
+ relname = DatumGetName(name)->data;
+ if (ISMLOG(relname) || ISMATMAP(relname)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errmsg("Un-support feature"),
+ errdetail("%s table doesn't support this ALTER yet.", ISMLOG(relname) ? "mlog" : "matviewmap"))));
+ }
+ ReleaseSysCache(tuple);
+
TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname);
/* If table has history table, we need rename corresponding history table */
if (is_ledger_usertable(relid)) {
@@ -6541,15 +6618,15 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname)
relform = (Form_pg_class)GETSTRUCT(reltup);
- /*
- * Check relation name to ensure that it doesn't conflict with existing synonym.
- */
- if (!IsInitdb && GetSynonymOid(newrelname, namespaceId, true) != InvalidOid) {
- ereport(ERROR,
- (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
- get_namespace_name(namespaceId))));
- }
-
+ /*
+ * Check relation name to ensure that it doesn't conflict with existing synonym.
+ */
+ if (!IsInitdb && GetSynonymOid(newrelname, namespaceId, true) != InvalidOid) {
+ ereport(ERROR,
+ (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
+ get_namespace_name(namespaceId))));
+ }
+
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname)));
@@ -6777,7 +6854,7 @@ void renamePartition(RenameStmt* stmt)
* 1. If rename partition by name.
*/
if (PointerIsValid(stmt->subname)) {
- partitionOid = partitionNameGetPartitionOid(partitionedTableOid,
+ partitionOid = PartitionNameGetPartitionOid(partitionedTableOid,
stmt->subname,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -6808,7 +6885,7 @@ void renamePartition(RenameStmt* stmt)
rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, rangePartDef->boundary);
partitionOid =
- partitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, true, false);
+ PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, true, false);
pfree_ext(pstate);
list_free_deep(rangePartDef->boundary);
@@ -6832,6 +6909,11 @@ void renamePartition(RenameStmt* stmt)
errmsg("partition \"%s\" of relation \"%s\" already exists", stmt->newname, stmt->relation->relname)));
}
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(rel)) {
+ LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
/* Do the work */
renamePartitionInternal(partitionedTableOid, partitionOid, stmt->newname);
@@ -6885,7 +6967,7 @@ void renamePartitionIndex(RenameStmt* stmt)
TrForbidAccessRbObject(RelationRelationId, partitionedTableIndexOid, stmt->relation->relname);
/* get partition index oid */
- partitionIndexOid = partitionNameGetPartitionOid(partitionedTableIndexOid,
+ partitionIndexOid = PartitionNameGetPartitionOid(partitionedTableIndexOid,
stmt->subname,
PART_OBJ_TYPE_INDEX_PARTITION,
AccessExclusiveLock,
@@ -7659,21 +7741,11 @@ static LOCKMODE set_lockmode(LOCKMODE mode, LOCKMODE cmd_mode)
#ifndef ENABLE_MULTIPLE_NODES
static LOCKMODE GetPartitionLockLevel(AlterTableType subType)
{
- LOCKMODE cmdLockMode;
- switch (subType) {
- case AT_AddPartition:
- case AT_AddSubPartition:
- case AT_DropPartition:
- case AT_DropSubPartition:
- case AT_ExchangePartition:
- case AT_TruncatePartition:
- cmdLockMode = ShareUpdateExclusiveLock;
- break;
- default:
- cmdLockMode = AccessExclusiveLock;
- break;
+ if (PARTITION_DDL_CMD(subType)) {
+ return ShareUpdateExclusiveLock;
+ } else {
+ return AccessExclusiveLock;
}
- return cmdLockMode;
}
#endif
@@ -7739,6 +7811,18 @@ LOCKMODE AlterTableGetLockLevel(List* cmds)
#ifndef ENABLE_MULTIPLE_NODES
cmd_lockmode = GetPartitionLockLevel(cmd->subtype);
#endif
+ /* if the partitionno is set first time in upgrade mode, we set lockmode to ShareUpdateExclusiveLock */
+ if (cmd->subtype == AT_ResetPartitionno) {
+ if (list_length(cmds) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("RESET PARTITIONNO cannot be performed during multiple subcommands")));
+ }
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ cmd_lockmode = AccessExclusiveLock;
+ } else {
+ cmd_lockmode = ShareUpdateExclusiveLock;
+ }
+ }
/* update with the higher lock mode */
lockmode = set_lockmode(lockmode, cmd_lockmode);
}
@@ -7869,7 +7953,7 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu
switch (cmd->subtype) {
case AT_AddColumn: /* ADD COLUMN */
ATSimplePermissions(rel, ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE | ATT_SEQUENCE);
- ATPrepAddColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
+ ATPrepAddColumn(wqueue, tab, rel, recurse, recursing, cmd, lockmode);
/* Recursion occurs during execution phase */
pass = AT_PASS_ADD_COL;
break;
@@ -7888,7 +7972,7 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu
case AT_AddColumnToView: /* add column via CREATE OR REPLACE
* VIEW */
ATSimplePermissions(rel, ATT_VIEW);
- ATPrepAddColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
+ ATPrepAddColumn(wqueue, NULL, rel, recurse, recursing, cmd, lockmode);
/* Recursion occurs during execution phase */
pass = AT_PASS_ADD_COL;
break;
@@ -8133,11 +8217,17 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu
case AT_AddOf: /* OF */
case AT_DropOf: /* NOT OF */
case AT_SetAutoIncrement:
+ case AT_SetCharsetCollate:
ATSimplePermissions(rel, ATT_TABLE);
/* These commands never recurse */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
+ case AT_ConvertCharset:
+ ATSimplePermissions(rel, ATT_TABLE);
+ sqlcmd_alter_prep_convert_charset(tab, rel, cmd, lockmode);
+ pass = AT_PASS_MISC;
+ break;
case AT_GenericOptions:
ATSimplePermissions(rel, ATT_FOREIGN_TABLE);
/* No command-specific prep needed */
@@ -8189,6 +8279,17 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu
ATPrepSplitSubPartition(rel);
pass = AT_PASS_MISC;
break;
+ case AT_ResetPartitionno:
+ ATSimplePermissions(rel, ATT_TABLE);
+ ATPrepResetPartitionno(rel);
+ pass = AT_PASS_MISC;
+ break;
+ case AT_ModifyColumn:
+ ATSimplePermissions(rel, ATT_TABLE);
+ ATPrepAlterModifyColumn(wqueue, tab, rel, recurse, recursing, cmd, lockmode);
+ pass = AT_PASS_ALTER_TYPE;
+ ATAlterCheckModifiyColumnRepeatedly(cmd, tab->subcmds[pass]);
+ break;
#ifdef PGXC
case AT_DistributeBy:
case AT_SubCluster:
@@ -8229,6 +8330,7 @@ static bool ATCheckLedgerTableCmd(Relation rel, AlterTableCmd* cmd)
case AT_AddColumn: /* ADD COLUMN */
case AT_DropColumn: /* DROP COLUMN */
case AT_AlterColumnType: /* ALTER COLUMN TYPE */
+ case AT_ModifyColumn: /* MODIFY/CHANGE COLUMN */
case AT_ExchangePartition: /* EXCHANGE PARTITION */
case AT_DropPartition: /* DROP PARTITION */
case AT_DropSubPartition: /* DROP PARTITION */
@@ -8272,6 +8374,87 @@ static void ATCheckNotNullConstr(const AlterTableCmd* cmd, const AlteredTableInf
ATCheckDuplicateColumn(cmd, tab->subcmds[AT_PASS_DROP]);
}
+static Node* GetGeneratedAdbin(Relation rel, AttrNumber myattnum)
+{
+ Oid atttype = rel->rd_att->attrs[myattnum - 1].atttypid;
+ int32 atttypmod = rel->rd_att->attrs[myattnum - 1].atttypmod;
+ ScanKeyData key[2];
+ HeapTuple def_tuple;
+ Relation def_rel;
+ SysScanDesc scan;
+ Oid exprtype;
+ Node *expr = NULL;
+
+ def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
+ ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum));
+
+ scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 2, key);
+
+ while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ Datum adbin_datum;
+ char *adbin_string = NULL;
+
+ adbin_datum = fastgetattr(def_tuple, Anum_pg_attrdef_adbin, def_rel->rd_att, &is_null);
+ AssertEreport(!is_null, MOD_OPT, "");
+ adbin_string = TextDatumGetCString(adbin_datum);
+ expr = (Node *)stringToNode_skip_extern_fields(adbin_string);
+
+ exprtype = exprType(expr);
+
+ expr = coerce_to_target_type(NULL, /* no UNKNOWN params here */
+ expr,
+ exprtype,
+ atttype,
+ atttypmod,
+ COERCION_ASSIGNMENT,
+ COERCE_IMPLICIT_CAST,
+ -1);
+
+ /*
+ * If there is nextval FuncExpr, we should lock the quoted sequence to avoid deadlock, this has beed done in
+ * transformFuncExpr. See sqlcmd_lock_nextval_on_cn for more details.
+ */
+ (void)lockNextvalWalker(expr, NULL);
+
+ pfree_ext(adbin_string);
+ }
+ systable_endscan(scan);
+ heap_close(def_rel, RowExclusiveLock);
+
+ return expr;
+}
+
+static void UpdateGeneratedExpr(AlteredTableInfo* tab)
+{
+ ListCell* l = NULL;
+ foreach(l, tab->newvals) {
+ NewColumnValue* ex = (NewColumnValue*)lfirst(l);
+ Relation rel;
+ AttrNumber attnum;
+
+ if (!ex->is_generated) {
+ continue;
+ }
+
+ rel = relation_open(tab->relid, NoLock);
+
+ attnum = get_attnum(RelationGetRelid(rel), ex->col_name);
+ if (attnum <= InvalidAttrNumber) { /* shouldn't happen */
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist", ex->col_name, RelationGetRelationName(rel))));
+ }
+
+ Expr *defval = (Expr *)GetGeneratedAdbin(rel, attnum);
+ ex->expr = expression_planner(defval);
+ ex->generate_attnum = attnum;
+
+ relation_close(rel, NoLock);
+ }
+}
+
/*
* ATRewriteCatalogs
*
@@ -8326,6 +8509,10 @@ static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode)
foreach (ltab, *wqueue) {
AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab);
+ if (tab->is_first_after) {
+ UpdateGeneratedExpr(tab);
+ }
+
if (get_rel_persistence(tab->relid) == RELPERSISTENCE_GLOBAL_TEMP) {
gtt_create_storage_files(tab->relid);
}
@@ -8345,9 +8532,19 @@ static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode)
AlterTableCreateToastTable(tab->relid, toast_reloptions);
relation_close(rel, NoLock);
}
+ /* check auto_increment indexes after rewrite catalogs */
if (tab->relkind == RELKIND_RELATION) {
CheckRelAutoIncrementIndex(tab->relid, NoLock);
}
+ /* recreate every table triggers */
+ foreach_cell(def_item, tab->changedTriggerDefs) {
+ char* cmd_str = (char*)lfirst(def_item);
+ List* raw_parsetree_list = raw_parser(cmd_str);
+ Node* stmt = (Node*)linitial(raw_parsetree_list);
+ Assert(IsA(stmt, CreateTrigStmt));
+ (void)CreateTrigger(
+ (CreateTrigStmt*)stmt, cmd_str, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false);
+ }
}
}
@@ -8370,6 +8567,122 @@ static void ATExecSetAutoIncrement(Relation rel, Value* value)
}
}
+static void sqlcmd_alter_exec_set_charsetcollate(Relation rel, CharsetCollateOptions* cc, LOCKMODE lockmode)
+{
+ List* new_reloption = NULL;
+
+ (void)fill_relation_collation(cc->collate, cc->charset, &new_reloption);
+ ATExecSetRelOptions(rel, new_reloption, AT_SetRelOptions, lockmode, false);
+}
+
+static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd,
+ LOCKMODE lockmode)
+{
+ CharsetCollateOptions* cc = (CharsetCollateOptions*)cmd->def;
+ Node* transform;
+ Oid targetcollid = InvalidOid;
+ Oid targettypid = InvalidOid;
+ ParseState* pstate = make_parsestate(NULL);
+ CatCList *catlist = NULL;
+
+ if (tab->relkind != RELKIND_RELATION)
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" should be a normal table", RelationGetRelationName(rel))));
+
+ if (cc->charset == PG_INVALID_ENCODING) {
+ cc->charset = get_charset_by_collation(get_nsp_default_collation(RelationGetNamespace(rel)));
+ if (cc->charset == PG_INVALID_ENCODING) {
+ cc->charset = GetDatabaseEncoding();
+ }
+ }
+ targetcollid = transform_default_collation(cc->collate, cc->charset);
+
+ catlist = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(rel->rd_id));
+ for (int i = 0; i < catlist->n_members; i++) {
+ HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i);
+ Form_pg_attribute attTup = (Form_pg_attribute)GETSTRUCT(tuple);
+ int attnum = attTup->attnum;
+ if (attnum <= 0 || attTup->attisdropped || !type_is_collatable(attTup->atttypid) ||
+ get_charset_by_collation(attTup->attcollation) == cc->charset)
+ continue;
+
+ transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0);
+ /* When the charset is converted to the binary, the string type is converted to the corresponding binary type */
+ targettypid = binary_need_transform_typeid(attTup->atttypid, &targetcollid);
+ if (targettypid != attTup->atttypid) {
+ transform = coerce_to_target_type(pstate,
+ transform,
+ exprType(transform),
+ targettypid,
+ attTup->atttypmod,
+ COERCION_ASSIGNMENT,
+ COERCE_IMPLICIT_CAST,
+ -1);
+ if (transform == NULL)
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("column \"%s\" cannot be cast automatically to type %s", NameStr(attTup->attname),
+ format_type_be(targettypid))));
+ }
+
+ transform = coerce_to_target_charset(transform, cc->charset, targettypid);
+
+ exprSetCollation(transform, targetcollid);
+
+ /* Fix collations after all else */
+ assign_expr_collations(pstate, transform);
+
+ /* Plan the expr now so we can accurately assess the need to rewrite. */
+ transform = (Node*)expression_planner((Expr*)transform);
+
+ /*
+ * Add a work queue item to make ATRewriteTable update the column
+ * contents.
+ */
+ NewColumnValue* newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue));
+ newval->attnum = attnum;
+ newval->expr = (Expr*)transform;
+ newval->is_generated = false;
+ newval->is_autoinc = false;
+
+ tab->newvals = lappend(tab->newvals, newval);
+ tab->rewrite = true;
+ }
+
+ ReleaseSysCacheList(catlist);
+}
+
+static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation rel, CharsetCollateOptions* cc,
+ LOCKMODE lockmode)
+{
+ List* new_reloption = NULL;
+ ListCell* lc = NULL;
+ HeapTuple heapTup;
+ Form_pg_attribute attTup;
+ Relation attrelation;
+
+ int target_charset = cc->charset;
+ Oid target_coll_oid = fill_relation_collation(cc->collate, target_charset, &new_reloption);
+
+ ATExecSetRelOptions(rel, new_reloption, AT_SetRelOptions, lockmode, false);
+
+ attrelation = heap_open(AttributeRelationId, RowExclusiveLock);
+ foreach(lc, tab->newvals) {
+ NewColumnValue* newval = (NewColumnValue*)lfirst(lc);
+ heapTup = SearchSysCacheCopy2(ATTNUM, RelationGetRelid(rel), newval->attnum);
+ attTup = (Form_pg_attribute)GETSTRUCT(heapTup);
+ attTup->attcollation = target_coll_oid;
+ attTup->atttypid = binary_need_transform_typeid(attTup->atttypid, &target_coll_oid);
+
+ simple_heap_update(attrelation, &heapTup->t_self, heapTup);
+ CatalogUpdateIndexes(attrelation, heapTup);
+ add_column_collation_dependency(RelationGetRelid(rel), newval->attnum, target_coll_oid);
+
+ tableam_tops_free_tuple(heapTup);
+ }
+
+ heap_close(attrelation, RowExclusiveLock);
+}
+
static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef)
{
List *columnOptions = columnDef->columnOptions;
@@ -8391,14 +8704,24 @@ static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef)
static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode)
{
elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype);
+
+ if (PARTITION_DDL_CMD(cmd->subtype) && RELATION_IS_PARTITIONED(rel)) {
+ int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id));
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ RelationResetPartitionno(rel->rd_id, ShareUpdateExclusiveLock);
+ }
+ }
+
switch (cmd->subtype) {
case AT_AddColumn: /* ADD COLUMN */
case AT_AddColumnToView: /* add column via CREATE OR REPLACE
* VIEW */
- ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, false, false, lockmode);
+ ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, false, false,
+ cmd->is_first, cmd->after_name, lockmode);
break;
case AT_AddColumnRecurse:
- ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, true, false, lockmode);
+ ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, true, false,
+ cmd->is_first, cmd->after_name, lockmode);
break;
case AT_AddPartition: /* add partition */
ATExecAddPartition(rel, (AddPartitionState*)cmd->def);
@@ -8509,12 +8832,12 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT
case AT_AddOids: /* SET WITH OIDS */
/* Use the ADD COLUMN code, unless prep decided to do nothing */
if (cmd->def != NULL)
- ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, false, false, lockmode);
+ ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, false, false, false, NULL, lockmode);
break;
case AT_AddOidsRecurse: /* SET WITH OIDS */
/* Use the ADD COLUMN code, unless prep decided to do nothing */
if (cmd->def != NULL)
- ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, true, false, lockmode);
+ ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, true, false, false, NULL, lockmode);
break;
case AT_DropOids: /* SET WITHOUT OIDS */
@@ -8640,6 +8963,18 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT
case AT_SetAutoIncrement:
ATExecSetAutoIncrement(rel, (Value*)cmd->def);
break;
+ case AT_ResetPartitionno:
+ ATExecResetPartitionno(rel);
+ break;
+ case AT_ModifyColumn:
+ ATExecAlterModifyColumn(tab, rel, cmd);
+ break;
+ case AT_SetCharsetCollate:
+ sqlcmd_alter_exec_set_charsetcollate(rel, (CharsetCollateOptions*)cmd->def, lockmode);
+ break;
+ case AT_ConvertCharset: /* CONVERT TO CHARACTER SET */
+ sqlcmd_alter_exec_convert_charset(tab, rel, (CharsetCollateOptions*)cmd->def, lockmode);
+ break;
#ifdef PGXC
case AT_DistributeBy:
@@ -8679,6 +9014,15 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT
UpdatePgObjectMtime(tab->relid, objectType);
}
+ /* take ExclusiveLock to avoid PARTITION DDL COMMIT until we finish the InitPlan. Oid info will be masked here, and
+ * be locked in CommitTransaction. Distribute mode doesn't support partition DDL/DML parallel work, no need this
+ * action */
+#ifndef ENABLE_MULTIPLE_NODES
+ if (PARTITION_DDL_CMD(cmd->subtype)) {
+ AddPartitionDDLInfo(RelationGetRelid(rel));
+ }
+#endif
+
/*
* Bump the command counter to ensure the next subcommand in the sequence
* can see the changes so far
@@ -8944,11 +9288,12 @@ static T EvaluateGenExpr(AlteredTableInfo* tab, T tuple,
hasGenCol = true;
- values[ex->attnum - 1]
- = ExecEvalExpr(ex->exprstate,
- econtext,
- &isnull[ex->attnum - 1],
- NULL);
+ if (tab->is_first_after) {
+ values[ex->generate_attnum - 1] = ExecEvalExpr(ex->exprstate, econtext,
+ &isnull[ex->generate_attnum - 1], NULL);
+ } else {
+ values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL);
+ }
}
if (hasGenCol) {
@@ -8966,6 +9311,50 @@ static T EvaluateGenExpr(AlteredTableInfo* tab, T tuple,
return tup;
}
+/*
+ * update values and isnull after modify column to a new loaction.
+ * newattnum > 0 denotes modify with first or after column or add generated column.
+ */
+static void UpdateValueModifyFirstAfter(NewColumnValue *ex, Datum* values, bool* isnull)
+{
+ if (ex->newattnum > 0 && !ex->is_addloc) {
+ Datum valuesTemp = values[ex->attnum - 1];
+ bool isnullTemp = isnull[ex->attnum - 1];
+ if (ex->newattnum > ex->attnum) {
+ for (int i = ex->attnum; i <= ex->newattnum - 1; i++) {
+ values[i - 1] = values[i];
+ isnull[i - 1] = isnull[i];
+ }
+ } else {
+ for (int i = ex->attnum - 1; i >= ex->newattnum; i--) {
+ values[i] = values[i - 1];
+ isnull[i] = isnull[i - 1];
+ }
+ }
+ values[ex->newattnum - 1] = valuesTemp;
+ isnull[ex->newattnum - 1] = isnullTemp;
+ }
+}
+
+static void UpdateGeneratedColumnIsnull(AlteredTableInfo* tab, bool* isnull, bool has_generated)
+{
+ ListCell* l = NULL;
+
+ if (!has_generated) {
+ return;
+ }
+
+ foreach (l, tab->newvals) {
+ NewColumnValue *ex = (NewColumnValue*)lfirst(l);
+
+ if (!ex->is_generated) {
+ continue;
+ }
+
+ isnull[ex->generate_attnum - 1] = true;
+ }
+}
+
/*
* change ATRewriteTable() input: oid->rel
*/
@@ -9072,6 +9461,11 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
List* dropped_attrs = NIL;
ListCell* lc = NULL;
errno_t rc = EOK;
+ int128 autoinc = 0;
+ bool need_autoinc = false;
+ bool has_generated = false;
+ AttrNumber autoinc_attnum = (newTupDesc->constr && newTupDesc->constr->cons_autoinc) ?
+ newTupDesc->constr->cons_autoinc->attnum : 0;
isUstore = RelationIsUstoreFormat(oldrel);
@@ -9163,13 +9557,10 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
if (tab->rewrite)
{
+ int newvals_num = 0;
/* Extract data from old tuple */
tableam_tops_deform_tuple(utuple, oldTupDesc, values, isnull);
- /* Set dropped attributes to null in new tuple */
- foreach(lc, dropped_attrs)
- isnull[lfirst_int(lc)] = true;
-
/*
* Process supplied expressions to replace selected columns.
*
@@ -9180,16 +9571,49 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
foreach(l, tab->newvals)
{
- NewColumnValue *ex = (NewColumnValue*)lfirst(l);
+ NewColumnValue *ex = (NewColumnValue*)lfirst(l);
- if (ex->is_generated) {
- continue;
+ if (ex->is_addloc) {
+ for (i = oldTupDesc->natts + newvals_num - 1; i >= ex->attnum - 1; i--) {
+ values[i + 1] = values[i];
+ isnull[i + 1] = isnull[i];
}
+ newvals_num++;
+ }
- values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
- econtext,
- &isnull[ex->attnum - 1],
- NULL);
+ if (ex->is_generated) {
+ if (tab->is_first_after) {
+ UpdateValueModifyFirstAfter(ex, values, isnull);
+ has_generated = true;
+ } else {
+ isnull[ex->attnum - 1] = true;
+ }
+ continue;
+ }
+
+ values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL);
+
+ if (ex->is_autoinc) {
+ need_autoinc = (autoinc_attnum > 0);
+ }
+
+ if (tab->is_first_after) {
+ UpdateValueModifyFirstAfter(ex, values, isnull);
+ }
+ }
+
+ /* generated column */
+ UpdateGeneratedColumnIsnull(tab, isnull, has_generated);
+
+ /* auto_increment */
+ if (need_autoinc) {
+ autoinc = EvaluateAutoIncrement(oldrel, newTupDesc,
+ autoinc_attnum, &values[autoinc_attnum - 1], &isnull[autoinc_attnum - 1]);
+ }
+
+ /* Set dropped attributes to null in new tuple */
+ foreach(lc, dropped_attrs) {
+ isnull[lfirst_int(lc)] = true;
}
/*
@@ -9250,9 +9674,14 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
}
/* Write the tuple out to the new relation */
- if (newrel)
+ if (newrel) {
(void)tableam_tuple_insert(newrel, utuple, mycid, hi_options, bistate);
+ if (autoinc > 0) {
+ SetRelAutoIncrement(oldrel, newTupDesc, autoinc);
+ }
+ }
+
/*
* We need to reset the flags of slot before entering the next loop so that inplaceindex_getnextslot
* will not try to clear it after we reset the context. Note that we don't explicitly pfree its
@@ -9274,16 +9703,13 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) {
if (tab->rewrite) {
Oid tupOid = InvalidOid;
+ int newvals_num = 0;
/* Extract data from old tuple */
tableam_tops_deform_tuple(tuple, oldTupDesc, values, isnull);
if (oldTupDesc->tdhasoid)
tupOid = HeapTupleGetOid(tuple);
- /* Set dropped attributes to null in new tuple */
- foreach (lc, dropped_attrs)
- isnull[lfirst_int(lc)] = true;
-
/*
* Process supplied expressions to replace selected columns.
*
@@ -9296,16 +9722,46 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
foreach (l, tab->newvals) {
NewColumnValue* ex = (NewColumnValue*)lfirst(l);
+ if (ex->is_addloc) {
+ for (i = oldTupDesc->natts + newvals_num - 1; i >= ex->attnum - 1; i--) {
+ values[i + 1] = values[i];
+ isnull[i + 1] = isnull[i];
+ }
+ newvals_num++;
+ }
+
if (ex->is_generated) {
+ if (tab->is_first_after) {
+ UpdateValueModifyFirstAfter(ex, values, isnull);
+ has_generated = true;
+ } else {
+ isnull[ex->attnum - 1] = true;
+ }
continue;
}
- if (ex->is_autoinc && newTupDesc->attrs[ex->attnum - 1].attnotnull) {
- values[ex->attnum - 1] = GetAutoIncrementDatum(oldrel, newTupDesc);
- isnull[ex->attnum - 1] = false;
- } else {
- values[ex->attnum - 1] =
- ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL);
+
+ values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL);
+ if (ex->is_autoinc) {
+ need_autoinc = (autoinc_attnum > 0);
}
+
+ if (tab->is_first_after) {
+ UpdateValueModifyFirstAfter(ex, values, isnull);
+ }
+ }
+
+ /* generated column */
+ UpdateGeneratedColumnIsnull(tab, isnull, has_generated);
+
+ /* auto_increment */
+ if (need_autoinc) {
+ autoinc = EvaluateAutoIncrement(oldrel, newTupDesc,
+ autoinc_attnum, &values[autoinc_attnum - 1], &isnull[autoinc_attnum - 1]);
+ }
+
+ /* Set dropped attributes to null in new tuple */
+ foreach (lc, dropped_attrs) {
+ isnull[lfirst_int(lc)] = true;
}
/*
@@ -9365,6 +9821,10 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat
/* Write the tuple out to the new relation */
if (newrel) {
(void)tableam_tuple_insert(newrel, tuple, mycid, hi_options, bistate);
+
+ if (autoinc > 0) {
+ SetRelAutoIncrement(oldrel, newTupDesc, autoinc);
+ }
}
ResetExprContext(econtext);
@@ -9913,8 +10373,8 @@ void check_of_type(HeapTuple typetuple)
* check this in a static pre-pass because it won't handle multiple inheritance
* situations correctly.)
*/
-static void ATPrepAddColumn(
- List** wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode)
+static void ATPrepAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse,
+ bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode)
{
if (rel->rd_rel->reloftype && !recursing)
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot add column to typed table")));
@@ -9958,6 +10418,10 @@ static void ATPrepAddColumn(
if (recurse)
cmd->subtype = AT_AddColumnRecurse;
+
+ if ((cmd->is_first || cmd->after_name != NULL) && (tab != NULL)) {
+ tab->rewrite = true;
+ }
}
static bool contain_columndef_walker(Node* node, void* context)
@@ -9982,20 +10446,1247 @@ static void ATPrepCheckDefault(Node* node)
}
static FORCE_INLINE void ATExecAppendDefValExpr(_in_ AttrNumber attnum, _in_ Expr* defval, _out_ AlteredTableInfo* tab,
- ColumnDef *colDef, bool is_autoinc)
+ ColumnDef *colDef, bool is_autoinc, bool is_addloc)
{
NewColumnValue* newval;
newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue));
newval->attnum = attnum;
newval->expr = expression_planner(defval);
+ newval->is_addloc = is_addloc;
+ newval->newattnum = is_addloc ? -1 : 0;
+ newval->generate_attnum = 0;
tab->newvals = lappend(tab->newvals, newval);
newval->is_generated = (colDef->generatedCol != '\0');
+ newval->col_name = pstrdup(colDef->colname);
newval->is_autoinc = is_autoinc;
tab->rewrite = true;
}
+static int GetAfterColumnAttnum(Oid attrelid, const char *after_name)
+{
+ int afterattnum = -1;
+ HeapTuple tuple;
+
+ tuple = SearchSysCacheAttName(attrelid, after_name);
+ if (!HeapTupleIsValid(tuple)) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("The %s column of relation %u is not exists.", after_name, attrelid)));
+ }
+
+ afterattnum = ((Form_pg_attribute)GETSTRUCT(tuple))->attnum + 1;
+ ReleaseSysCache(tuple);
+ return afterattnum;
+}
+
+static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endattnum, bool is_increase)
+{
+ if (node == NULL) {
+ return node;
+ }
+
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ switch (nodeTag(node)) {
+ case T_Var: {
+ Var *var = (Var *)node;
+ Var *new_var = (Var *)copyObject(var);
+ if (var->varattno >= startattnum && var->varattno <= endattnum) {
+ new_var->varattno = is_increase ? (var->varattno + 1) : (var->varattno - 1);
+ new_var->varoattno = is_increase ? (var->varoattno + 1) : (var->varoattno - 1);
+ } else if (var->varattno == curattnum) {
+ new_var->varattno = newattnum;
+ new_var->varoattno = newattnum;
+ }
+ return (Node *)new_var;
+ }
+ case T_Const:
+ case T_Param:
+ case T_Rownum:
+ case T_CoerceToDomainValue:
+ case T_CaseTestExpr: {
+ return node;
+ }
+ case T_TypeCast: {
+ TypeCast *expr = (TypeCast *)node;
+ TypeCast *newexpr = (TypeCast *)copyObject(expr);
+ Node *expr_arg = UpdateVarattnoAfterAddColumn(expr->arg, startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_ArrayExpr: {
+ ArrayExpr *expr = (ArrayExpr *)node;
+ ArrayExpr *newexpr = (ArrayExpr *)copyObject(expr);
+ List *expr_elements = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->elements,
+ startattnum, endattnum, is_increase);
+ newexpr->elements = expr_elements;
+ return (Node *)newexpr;
+ }
+ case T_FuncExpr: {
+ FuncExpr *expr = (FuncExpr *)node;
+ FuncExpr *newexpr = (FuncExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_OpExpr:
+ case T_DistinctExpr:
+ case T_NullIfExpr: {
+ OpExpr *expr = (OpExpr *)node;
+ OpExpr *newexpr = (OpExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_BoolExpr: {
+ BoolExpr *expr = (BoolExpr *)node;
+ BoolExpr *newexpr = (BoolExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_ScalarArrayOpExpr: {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *)node;
+ ScalarArrayOpExpr *newexpr = (ScalarArrayOpExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_ArrayRef: {
+ ArrayRef *expr = (ArrayRef *)node;
+ ArrayRef *newexpr = (ArrayRef *)copyObject(expr);
+ List *expr_refupperindexpr = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->refupperindexpr,
+ startattnum, endattnum, is_increase);
+ List *expr_reflowerindexpr = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->reflowerindexpr,
+ startattnum, endattnum, is_increase);
+ Expr *expr_refexpr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->refexpr,
+ startattnum, endattnum, is_increase);
+ Expr *expr_refassgnexpr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->refassgnexpr,
+ startattnum, endattnum, is_increase);
+ newexpr->refupperindexpr = expr_refupperindexpr;
+ newexpr->reflowerindexpr = expr_reflowerindexpr;
+ newexpr->refexpr = expr_refexpr;
+ newexpr->refassgnexpr = expr_refassgnexpr;
+ return (Node *)newexpr;
+ }
+ case T_RowCompareExpr: {
+ RowCompareExpr *expr = (RowCompareExpr *)node;
+ RowCompareExpr *newexpr = (RowCompareExpr *)copyObject(expr);
+ List *expr_largs = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->largs,
+ startattnum, endattnum, is_increase);
+ List *expr_rargs = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->rargs,
+ startattnum, endattnum, is_increase);
+ newexpr->largs = expr_largs;
+ newexpr->rargs = expr_rargs;
+ return (Node *)newexpr;
+ }
+ case T_ConvertRowtypeExpr: {
+ ConvertRowtypeExpr *expr = (ConvertRowtypeExpr *)node;
+ ConvertRowtypeExpr *newexpr = (ConvertRowtypeExpr *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_FieldStore: {
+ FieldStore *expr = (FieldStore *)node;
+ FieldStore *newexpr = (FieldStore *)node;
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ List *expr_newvals = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->newvals,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ newexpr->newvals = expr_newvals;
+ return (Node *)newexpr;
+ }
+ case T_FieldSelect: {
+ FieldSelect *expr = (FieldSelect *)node;
+ FieldSelect *newexpr = (FieldSelect *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_MinMaxExpr: {
+ MinMaxExpr *expr = (MinMaxExpr *)node;
+ MinMaxExpr *newexpr = (MinMaxExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_BooleanTest: {
+ BooleanTest *expr = (BooleanTest *)node;
+ BooleanTest *newexpr = (BooleanTest *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_RowExpr: {
+ RowExpr *expr = (RowExpr *)node;
+ RowExpr *newexpr = (RowExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_XmlExpr: {
+ XmlExpr *expr = (XmlExpr *)node;
+ XmlExpr *newExpr = (XmlExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ List *expr_name_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->named_args,
+ startattnum, endattnum, is_increase);
+ newExpr->args = expr_args;
+ newExpr->named_args = expr_name_args;
+ return (Node *)newExpr;
+ }
+ case T_RelabelType: {
+ RelabelType *expr = (RelabelType *)node;
+ RelabelType *newexpr =(RelabelType *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_WindowFunc: {
+ WindowFunc *expr = (WindowFunc *)node;
+ WindowFunc *newexpr = (WindowFunc *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_ArrayCoerceExpr: {
+ ArrayCoerceExpr *expr = (ArrayCoerceExpr *)node;
+ ArrayCoerceExpr *newexpr = (ArrayCoerceExpr *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_PredictByFunction: {
+ PredictByFunction *expr = (PredictByFunction *)node;
+ PredictByFunction *newexpr = (PredictByFunction *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->model_args,
+ startattnum, endattnum, is_increase);
+ newexpr->model_args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_NamedArgExpr: {
+ NamedArgExpr *expr = (NamedArgExpr *)node;
+ NamedArgExpr *newexpr = (NamedArgExpr *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_CoerceViaIO: {
+ CoerceViaIO *expr = (CoerceViaIO *)node;
+ CoerceViaIO *newexpr = (CoerceViaIO *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_CoerceToDomain: {
+ CoerceToDomain *expr = (CoerceToDomain *)node;
+ CoerceToDomain *newexpr = (CoerceToDomain *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_CoalesceExpr: {
+ CoalesceExpr* expr = (CoalesceExpr *)node;
+ CoalesceExpr* newexpr = (CoalesceExpr *)copyObject(expr);
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ newexpr->args = expr_args;
+ return (Node *)newexpr;
+ }
+ case T_NullTest: {
+ NullTest *expr = (NullTest *)node;
+ NullTest *newexpr = (NullTest *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ case T_CaseExpr: {
+ CaseExpr *expr = (CaseExpr *)node;
+ CaseExpr *newExpr = (CaseExpr *)copyObject(expr);
+
+ List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args,
+ startattnum, endattnum, is_increase);
+ // case_default
+ Expr *expr_defresult = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->defresult,
+ startattnum, endattnum, is_increase);
+ // case_arg
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newExpr->arg = expr_arg;
+ newExpr->args = expr_args;
+ newExpr->defresult = expr_defresult;
+ return (Node *)newExpr;
+ }
+ case T_CaseWhen: {
+ CaseWhen *expr = (CaseWhen *)node;
+ CaseWhen *newexpr = (CaseWhen *)copyObject(expr);
+ Expr *expr_expr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->expr,
+ startattnum, endattnum, is_increase);
+ Expr *expr_result = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->result,
+ startattnum, endattnum, is_increase);
+ newexpr->expr = expr_expr;
+ newexpr->result = expr_result;
+ return (Node *)newexpr;
+ }
+ case T_List: {
+ List *reslist = NIL;
+ ListCell *temp = NULL;
+
+ foreach(temp, (List *)node) {
+ reslist = lappend(reslist,
+ UpdateVarattnoAfterAddColumn((Node *)lfirst(temp),
+ startattnum, endattnum, is_increase));
+ }
+ return (Node *)reslist;
+ }
+ case T_PrefixKey: {
+ PrefixKey *expr = (PrefixKey *)node;
+ PrefixKey *newexpr = (PrefixKey *)copyObject(expr);
+ Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg,
+ startattnum, endattnum, is_increase);
+ newexpr->arg = expr_arg;
+ return (Node *)newexpr;
+ }
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE),
+ errmsg("unrecognized node type: %d for first|after col_name", (int)nodeTag(node))));
+ break;
+ }
+ return NULL;
+}
+
+/*
+ * update pg_attribute.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgAttributeFirstAfter(Relation attr_rel, Oid attrelid, int startattnum, int endattnum,
+ bool is_increase)
+{
+ ScanKeyData key[2];
+ HeapTuple attr_tuple;
+ SysScanDesc scan;
+ Form_pg_attribute attr_form;
+
+ for (int i = (is_increase ? endattnum : startattnum);
+ (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) {
+ AttrNumber myattnum = (AttrNumber)i;
+ ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(attrelid));
+ ScanKeyInit(&key[1], Anum_pg_attribute_attnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum));
+
+ scan = systable_beginscan(attr_rel, AttributeRelidNumIndexId, true, NULL, 2, key);
+
+ /* only one */
+ while (HeapTupleIsValid(attr_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_attribute] = { 0 };
+ bool nulls[Natts_pg_attribute] = { 0 };
+ bool replaces[Natts_pg_attribute] = { 0 };
+ errno_t rc = 0;
+ HeapTuple new_attr_tuple;
+ char newattname[NAMEDATALEN];
+
+ attr_form = (Form_pg_attribute)GETSTRUCT(attr_tuple);
+
+ // update pg_attribute_attnum
+ if (is_increase) {
+ values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attr_form->attnum + 1);
+ replaces[Anum_pg_attribute_attnum - 1] = true;
+ } else {
+ values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attr_form->attnum - 1);
+ replaces[Anum_pg_attribute_attnum - 1] = true;
+ }
+
+ // if exists dropped column, update pg_attribute_attname of dropped column
+ if (attr_form->attisdropped) {
+ if (is_increase) {
+ rc = snprintf_s(newattname, sizeof(newattname),
+ sizeof(newattname) - 1, "........pg.dropped.%d........", attr_form->attnum + 1);
+ securec_check_ss(rc, "\0", "\0");
+ } else {
+ rc = snprintf_s(newattname, sizeof(newattname),
+ sizeof(newattname) - 1, "........pg.dropped.%d........", attr_form->attnum - 1);
+ securec_check_ss(rc, "\0", "\0");
+ }
+
+ values[Anum_pg_attribute_attname - 1] = NameGetDatum(newattname);
+ replaces[Anum_pg_attribute_attname - 1] = true;
+ }
+
+ new_attr_tuple = heap_modify_tuple(attr_tuple, RelationGetDescr(attr_rel), values, nulls, replaces);
+ simple_heap_update(attr_rel, &new_attr_tuple->t_self, new_attr_tuple);
+ CatalogUpdateIndexes(attr_rel, new_attr_tuple);
+
+ heap_freetuple_ext(new_attr_tuple);
+ }
+ systable_endscan(scan);
+ }
+}
+
+/*
+ * update pg_index.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ Relation index_rel;
+ HeapTuple index_tuple;
+ ScanKeyData key;
+ SysScanDesc scan;
+ Form_pg_index index_form;
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ /* Prepare to scan pg_index for entries having indrelid = this rel. */
+ ScanKeyInit(&key, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ index_rel = heap_open(IndexRelationId, RowExclusiveLock);
+ scan = systable_beginscan(index_rel, IndexIndrelidIndexId, true, NULL, 1, &key);
+
+ while (HeapTupleIsValid(index_tuple = systable_getnext(scan))) {
+ int numatts;
+ bool is_null = false;
+ Datum values[Natts_pg_index] = { 0 };
+ bool nulls[Natts_pg_index] = { 0 };
+ bool replaces[Natts_pg_index] = { 0 };
+ int2vector *indkey = NULL;
+ int2vector *new_indkey = NULL;
+ HeapTuple new_index_tuple;
+
+ index_form = (Form_pg_index)GETSTRUCT(index_tuple);
+ numatts = index_form->indnatts;
+
+ // update pg_index_indkey
+ Datum indkey_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indkey, &is_null);
+ AssertEreport(!is_null, MOD_OPT, "");
+ indkey = (int2vector *)DatumGetPointer(indkey_datum);
+ Assert(indkey->dim1 == numatts);
+ new_indkey = buildint2vector(NULL, numatts);
+ for (int i = 0; i < numatts; i++) {
+ if (indkey->values[i] >= startattnum && indkey->values[i] <= endattnum) {
+ new_indkey->values[i] = is_increase ? (indkey->values[i] + 1) : (indkey->values[i] - 1);
+ } else if (indkey->values[i] == curattnum) {
+ new_indkey->values[i] = newattnum;
+ } else {
+ new_indkey->values[i] = indkey->values[i];
+ }
+ }
+ values[Anum_pg_index_indkey - 1] = PointerGetDatum(new_indkey);
+ replaces[Anum_pg_index_indkey - 1] = true;
+
+ // udpate pg_index_indexprs
+ if (!heap_attisnull(index_tuple, Anum_pg_index_indexprs, NULL)) {
+ Datum exprs_datum;
+ List *indexprs = NIL;
+ List *new_indexprs = NIL;
+ char* exprs_string = NULL;
+
+ exprs_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indexprs, &is_null);
+ AssertEreport(!is_null, MOD_OPT, "");
+ exprs_string = TextDatumGetCString(exprs_datum);
+ indexprs = (List *)stringToNode(exprs_string);
+
+ new_indexprs = (List *)UpdateVarattnoAfterAddColumn((Node *)indexprs,
+ startattnum, endattnum, is_increase);
+ exprs_string = nodeToString(new_indexprs);
+ values[Anum_pg_index_indexprs - 1] = CStringGetTextDatum(exprs_string);
+ replaces[Anum_pg_index_indexprs - 1] = true;
+ pfree_ext(exprs_string);
+ }
+
+ // update pg_index_indpred
+ if (!heap_attisnull(index_tuple, Anum_pg_index_indpred, NULL)) {
+ Datum pred_datum;
+ List *indpred = NIL;
+ List *new_indpred = NIL;
+ char *pred_string = NULL;
+
+ pred_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indpred, &is_null);
+ AssertEreport(!is_null, MOD_OPT, "");
+ pred_string = TextDatumGetCString(pred_datum);
+ indpred = (List *)stringToNode(pred_string);
+
+ new_indpred = (List *)UpdateVarattnoAfterAddColumn((Node *)indpred,
+ startattnum, endattnum, is_increase);
+ pred_string = nodeToString(new_indpred);
+ values[Anum_pg_index_indpred - 1] = CStringGetTextDatum(pred_string);
+ replaces[Anum_pg_index_indpred - 1] = true;
+ pfree_ext(pred_string);
+ }
+
+ new_index_tuple = heap_modify_tuple(index_tuple, RelationGetDescr(index_rel), values, nulls, replaces);
+ simple_heap_update(index_rel, &new_index_tuple->t_self, new_index_tuple);
+ CatalogUpdateIndexes(index_rel, new_index_tuple);
+
+ pfree_ext(new_indkey);
+ heap_freetuple_ext(new_index_tuple);
+ }
+
+ systable_endscan(scan);
+ heap_close(index_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_constraint.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgConstraintFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ ScanKeyData key;
+ HeapTuple con_tuple;
+ Relation con_rel;
+ SysScanDesc scan;
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ ScanKeyInit(&key, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ con_rel = heap_open(ConstraintRelationId, RowExclusiveLock);
+ scan = systable_beginscan(con_rel, ConstraintRelidIndexId, true, NULL, 1, &key);
+
+ while (HeapTupleIsValid(con_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ ArrayType *conkey_array = NULL;
+ ArrayType *conincluding_array = NULL;
+ Datum values[Natts_pg_constraint] = { 0 };
+ bool nulls[Natts_pg_constraint] = { 0 };
+ bool replaces[Natts_pg_constraint] = { 0 };
+ HeapTuple new_con_tuple;
+
+ // update pg_constraint_conkey
+ Datum conkeyDatum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conkey, &is_null);
+ if (!is_null) {
+ ArrayType* con_key_arr = DatumGetArrayTypeP(conkeyDatum);
+ int con_key_num = ARR_DIMS(con_key_arr)[0];
+ int16 *con_key_attnums = (int16 *)ARR_DATA_PTR(con_key_arr);
+ Datum *conkey = (Datum *)palloc(con_key_num * sizeof(Datum));
+
+ for (int i = 0; i < con_key_num; i++) {
+ if (con_key_attnums[i] >= startattnum && con_key_attnums[i] <= endattnum) {
+ con_key_attnums[i] = is_increase ? (con_key_attnums[i] + 1) : (con_key_attnums[i] - 1);
+ } else if (con_key_attnums[i] == curattnum) {
+ con_key_attnums[i] = newattnum;
+ }
+ conkey[i] = Int16GetDatum(con_key_attnums[i]);
+ }
+ conkey_array = construct_array(conkey, con_key_num, INT2OID, 2, true, 's');
+ values[Anum_pg_constraint_conkey - 1] = PointerGetDatum(conkey_array);
+ replaces[Anum_pg_constraint_conkey - 1] = true;
+ }
+
+ // update pg_constraint_conincluding
+ Datum con_including_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conincluding, &is_null);
+ if (!is_null) {
+ ArrayType* con_including_arr = DatumGetArrayTypeP(con_including_datum);
+ int con_including_num = ARR_DIMS(con_including_arr)[0];
+ int16* con_including_attnums = (int16 *)ARR_DATA_PTR(con_including_arr);
+ Datum* conincluding = (Datum *)palloc(con_including_num * sizeof(Datum));
+
+ for (int i = 0; i < con_including_num; i++) {
+ if (con_including_attnums[i] >= startattnum && con_including_attnums[i] <= endattnum) {
+ con_including_attnums[i] = is_increase ?
+ (con_including_attnums[i] + 1) : (con_including_attnums[i] - 1);
+ } else if (con_including_attnums[i] == curattnum) {
+ con_including_attnums[i] = newattnum;
+ }
+ conincluding[i] = Int16GetDatum(con_including_attnums[i]);
+ }
+ conincluding_array = construct_array(conincluding, con_including_num, INT2OID, 2, true, 's');
+ values[Anum_pg_constraint_conincluding - 1] = PointerGetDatum(conincluding_array);
+ replaces[Anum_pg_constraint_conincluding - 1] = true;
+ }
+
+ // update pg_constraint_conbin
+ Datum conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conbin, &is_null);
+ if (!is_null) {
+ char *conbin_string = NULL;
+ Node *conbin = NULL;
+ Node *new_conbin = NULL;
+
+ conbin_string = TextDatumGetCString(conbin_datum);
+ conbin = (Node*)stringToNode(conbin_string);
+
+ new_conbin = UpdateVarattnoAfterAddColumn(conbin, startattnum, endattnum, is_increase);
+ conbin_string = nodeToString(new_conbin);
+ values[Anum_pg_constraint_conbin - 1] = CStringGetTextDatum(conbin_string);
+ replaces[Anum_pg_constraint_conbin - 1] = true;
+ pfree_ext(conbin_string);
+ }
+
+ new_con_tuple = heap_modify_tuple(con_tuple, RelationGetDescr(con_rel), values, nulls, replaces);
+ simple_heap_update(con_rel, &new_con_tuple->t_self, new_con_tuple);
+ CatalogUpdateIndexes(con_rel, new_con_tuple);
+
+ pfree_ext(conkey_array);
+ pfree_ext(conincluding_array);
+ heap_freetuple_ext(new_con_tuple);
+ }
+
+ systable_endscan(scan);
+ heap_close(con_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_constraint confkey.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgConstraintConfkeyFirstAfter(Relation rel, int startattnum, int endattnum,
+ bool is_increase)
+{
+ ScanKeyData key;
+ HeapTuple con_tuple;
+ Relation con_rel;
+ SysScanDesc scan;
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ ScanKeyInit(&key, Anum_pg_constraint_confrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ con_rel = heap_open(ConstraintRelationId, RowExclusiveLock);
+ scan = systable_beginscan(con_rel, InvalidOid, false, NULL, 1, &key);
+
+ while (HeapTupleIsValid(con_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ ArrayType* confkey_array = NULL;
+ Datum values[Natts_pg_constraint] = { 0 };
+ bool nulls[Natts_pg_constraint] = { 0 };
+ bool replaces[Natts_pg_constraint] = { 0 };
+ HeapTuple new_con_tuple;
+
+ // update pg_constraint_confkey
+ Datum confkey_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_confkey, &is_null);
+ if (!is_null) {
+ ArrayType* conf_key_rr = DatumGetArrayTypeP(confkey_datum);
+ int confkey_num = ARR_DIMS(conf_key_rr)[0];
+ int16 *confkey_attnums = (int16 *)ARR_DATA_PTR(conf_key_rr);
+ Datum *confkey = (Datum *)palloc(confkey_num * sizeof(Datum));
+
+ for (int i = 0; i < confkey_num; i++) {
+ if (confkey_attnums[i] >= startattnum && confkey_attnums[i] <= endattnum) {
+ confkey_attnums[i] = is_increase ? (confkey_attnums[i] + 1) : (confkey_attnums[i] - 1);
+ } else if (confkey_attnums[i] == curattnum) {
+ confkey_attnums[i] = newattnum;
+ }
+ confkey[i] = Int16GetDatum(confkey_attnums[i]);
+ }
+ confkey_array = construct_array(confkey, confkey_num, INT2OID, 2, true, 's');
+ values[Anum_pg_constraint_confkey - 1] = PointerGetDatum(confkey_array);
+ replaces[Anum_pg_constraint_confkey - 1] = true;
+ }
+
+ new_con_tuple = heap_modify_tuple(con_tuple, RelationGetDescr(con_rel), values, nulls, replaces);
+ simple_heap_update(con_rel, &new_con_tuple->t_self, new_con_tuple);
+ CatalogUpdateIndexes(con_rel, new_con_tuple);
+
+ pfree_ext(confkey_array);
+ heap_freetuple_ext(new_con_tuple);
+ }
+
+ systable_endscan(scan);
+ heap_close(con_rel, RowExclusiveLock);
+}
+
+/*
+ * update generated column information for pg_attrdef.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdateGenerateColFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ ScanKeyData key;
+ HeapTuple def_tuple;
+ Relation def_rel;
+ SysScanDesc scan;
+
+ def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
+ ScanKeyInit(&key, Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 1, &key);
+ while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ char generated_col = '\0';
+ Datum values[Natts_pg_attrdef] = { 0 };
+ bool nulls[Natts_pg_attrdef] = { 0 };
+ bool replaces[Natts_pg_attrdef] = { 0 };
+ HeapTuple new_def_tuple;
+
+ Datum adgencol = fastgetattr(def_tuple, Anum_pg_attrdef_adgencol, def_rel->rd_att, &is_null);
+ if (!is_null) {
+ generated_col = DatumGetChar(adgencol);
+ }
+
+ // update pg_attrdef_adbin
+ if (generated_col == ATTRIBUTE_GENERATED_STORED) {
+ Datum adbin_datum;
+ Node *adbin = NULL;
+ Node *new_adbin = NULL;
+ char *adbin_string = NULL;
+
+ adbin_datum = fastgetattr(def_tuple, Anum_pg_attrdef_adbin, def_rel->rd_att, &is_null);
+ AssertEreport(!is_null, MOD_OPT, "");
+ adbin_string = TextDatumGetCString(adbin_datum);
+ adbin = (Node *)stringToNode(adbin_string);
+
+ new_adbin = UpdateVarattnoAfterAddColumn(adbin, startattnum, endattnum, is_increase);
+ adbin_string = nodeToString(new_adbin);
+ values[Anum_pg_attrdef_adbin - 1] = CStringGetTextDatum(adbin_string);
+ replaces[Anum_pg_attrdef_adbin - 1] = true;
+ pfree_ext(adbin_string);
+ } else {
+ continue;
+ }
+
+ new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(def_rel), values, nulls, replaces);
+ simple_heap_update(def_rel, &new_def_tuple->t_self, new_def_tuple);
+ CatalogUpdateIndexes(def_rel, new_def_tuple);
+
+ heap_freetuple_ext(new_def_tuple);
+ }
+ systable_endscan(scan);
+ heap_close(def_rel, RowExclusiveLock);
+}
+
+
+/*
+ * update the exists index information.
+ * 1. add column with first or after col_name.
+ */
+static void UpdateIndexFirstAfter(Relation rel)
+{
+ Relation pg_index_rel, table_index_rel;
+ HeapTuple index_tuple;
+ ScanKeyData key;
+ SysScanDesc scan;
+ Form_pg_index index_form;
+
+ /* Prepare to scan pg_index for entries having indrelid = this rel. */
+ ScanKeyInit(&key, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ pg_index_rel = heap_open(IndexRelationId, RowExclusiveLock);
+ scan = systable_beginscan(pg_index_rel, IndexIndrelidIndexId, true, NULL, 1, &key);
+
+ while (HeapTupleIsValid(index_tuple = systable_getnext(scan))) {
+ index_form = (Form_pg_index)GETSTRUCT(index_tuple);
+
+ table_index_rel = index_open(index_form->indexrelid, RowExclusiveLock);
+
+ table_index_rel->rd_index = index_form;
+
+ index_close(table_index_rel, RowExclusiveLock);
+ }
+ systable_endscan(scan);
+ heap_close(pg_index_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_attrdef.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgAttrdefFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ ScanKeyData key[2];
+ HeapTuple def_tuple;
+ Relation def_rel;
+ SysScanDesc scan;
+ Form_pg_attrdef def_form;
+
+ def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
+
+ for (int i = (is_increase ? endattnum : startattnum);
+ (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) {
+ AttrNumber myattnum = (AttrNumber)i;
+ ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum));
+
+ scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 2, key);
+
+ // only one
+ while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_attrdef] = { 0 };
+ bool nulls[Natts_pg_attrdef] = { 0 };
+ bool replaces[Natts_pg_attrdef] = { 0 };
+ HeapTuple new_def_tuple;
+
+ def_form = (Form_pg_attrdef)GETSTRUCT(def_tuple);
+
+ values[Anum_pg_attrdef_adnum - 1] = is_increase ? Int16GetDatum(def_form->adnum + 1) :
+ Int16GetDatum(def_form->adnum - 1);
+ replaces[Anum_pg_attrdef_adnum - 1] = true;
+
+ new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(def_rel), values, nulls, replaces);
+ simple_heap_update(def_rel, &new_def_tuple->t_self, new_def_tuple);
+ CatalogUpdateIndexes(def_rel, new_def_tuple);
+
+ heap_freetuple_ext(new_def_tuple);
+ }
+ systable_endscan(scan);
+ }
+ heap_close(def_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_depend.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgDependFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ ScanKeyData key[2];
+ HeapTuple dep_tuple;
+ Relation dep_rel;
+ SysScanDesc scan;
+ Form_pg_depend dep_form;
+
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ dep_rel = heap_open(DependRelationId, RowExclusiveLock);
+
+ // find pg_depend based on refobjid and refobjsubid, then update refobjsubid
+ ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTGreaterStrategyNumber, F_INT4GT, Int32GetDatum(0));
+
+ scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 2, key);
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_depend] = { 0 };
+ bool nulls[Natts_pg_depend] = { 0 };
+ bool replaces[Natts_pg_depend] = { 0 };
+ HeapTuple new_dep_tuple;
+
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+
+ if (dep_form->refobjsubid >= startattnum && dep_form->refobjsubid <= endattnum) {
+ values[Anum_pg_depend_refobjsubid - 1] = is_increase ?
+ Int32GetDatum(dep_form->refobjsubid + 1) : Int32GetDatum(dep_form->refobjsubid - 1);
+ replaces[Anum_pg_depend_refobjsubid - 1] = true;
+ } else if (dep_form->refobjsubid == curattnum) {
+ values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newattnum);
+ replaces[Anum_pg_depend_refobjsubid - 1] = true;
+ }
+
+ new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(dep_rel), values, nulls, replaces);
+ simple_heap_update(dep_rel, &new_dep_tuple->t_self, new_dep_tuple);
+ CatalogUpdateIndexes(dep_rel, new_dep_tuple);
+
+ heap_freetuple_ext(new_dep_tuple);
+ }
+ systable_endscan(scan);
+ heap_close(dep_rel, RowExclusiveLock);
+
+ CommandCounterIncrement();
+
+ dep_rel = heap_open(DependRelationId, RowExclusiveLock);
+
+ // find pg_depend based on objid and objsubid, then update objsubid
+ ScanKeyInit(&key[0], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+ ScanKeyInit(&key[1], Anum_pg_depend_objsubid, BTGreaterStrategyNumber, F_INT4GT, Int32GetDatum(0));
+
+ scan = systable_beginscan(dep_rel, DependDependerIndexId, true, NULL, 2, key);
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_depend] = { 0 };
+ bool nulls[Natts_pg_depend] = { 0 };
+ bool replaces[Natts_pg_depend] = { 0 };
+ HeapTuple new_dep_tuple;
+
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+
+ /* the situation has been updated in sqlcmd_update_depend_refobjsubid_first_after */
+ if (dep_form->refobjsubid == -1 && dep_form->refobjid == RelationGetRelid(rel)) {
+ continue;
+ }
+
+ if (dep_form->objsubid >= startattnum && dep_form->objsubid <= endattnum) {
+ values[Anum_pg_depend_objsubid - 1] = is_increase ? Int32GetDatum(dep_form->objsubid + 1) :
+ Int32GetDatum(dep_form->objsubid - 1);
+ replaces[Anum_pg_depend_objsubid - 1] = true;
+ } else if (dep_form->objsubid == curattnum) {
+ values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(newattnum);
+ replaces[Anum_pg_depend_objsubid - 1] = true;
+ }
+
+ new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(dep_rel), values, nulls, replaces);
+ simple_heap_update(dep_rel, &new_dep_tuple->t_self, new_dep_tuple);
+ CatalogUpdateIndexes(dep_rel, new_dep_tuple);
+
+ heap_freetuple_ext(new_dep_tuple);
+ }
+ systable_endscan(scan);
+ heap_close(dep_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_partition.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase,
+ bool is_modified, bool *has_partition)
+{
+ ScanKeyData key;
+ HeapTuple par_tuple;
+ Relation par_rel;
+ SysScanDesc scan;
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ par_rel = heap_open(PartitionRelationId, RowExclusiveLock);
+
+ ScanKeyInit(&key, Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+
+ scan = systable_beginscan(par_rel, PartitionParentOidIndexId, true, NULL, 1, &key);
+ while (HeapTupleIsValid(par_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+
+ // update pg_partition_partkey
+ Datum partkey_datum = SysCacheGetAttr(PARTRELID, par_tuple, Anum_pg_partition_partkey, &is_null);
+ if (!is_null) {
+ Datum values[Natts_pg_partition] = { 0 };
+ bool nulls[Natts_pg_partition] = { 0 };
+ bool replaces[Natts_pg_partition] = { 0 };
+ int2vector *partkey = NULL;
+ int2vector *new_partKey = NULL;
+ HeapTuple new_par_tuple;
+
+ partkey = (int2vector *)DatumGetPointer(partkey_datum);
+ new_partKey = buildint2vector(NULL, partkey->dim1);
+ for (int i = 0; i < partkey->dim1; i++) {
+ if (partkey->values[i] >= startattnum && partkey->values[i] <= endattnum) {
+ new_partKey->values[i] = is_increase ? (partkey->values[i] + 1) : (partkey->values[i] - 1);
+ } else if (partkey->values[i] == curattnum) {
+ if (is_modified) {
+ if (has_partition != NULL) {
+ *has_partition = true;
+ }
+ new_partKey->values[i] = 0;
+ } else {
+ new_partKey->values[i] = newattnum;
+ }
+ } else {
+ new_partKey->values[i] = partkey->values[i];
+ }
+ }
+ values[Anum_pg_partition_partkey - 1] = PointerGetDatum(new_partKey);
+ replaces[Anum_pg_partition_partkey - 1] = true;
+
+ new_par_tuple = heap_modify_tuple(par_tuple, RelationGetDescr(par_rel), values, nulls, replaces);
+ simple_heap_update(par_rel, &new_par_tuple->t_self, new_par_tuple);
+ CatalogUpdateIndexes(par_rel, new_par_tuple);
+
+ pfree_ext(new_partKey);
+ heap_freetuple_ext(new_par_tuple);
+ }
+ }
+ systable_endscan(scan);
+ heap_close(par_rel, RowExclusiveLock);
+}
+
+
+static ViewInfoForAdd *GetViewInfoFirstAfter(Relation rel, Oid objid, bool keep_star)
+{
+ ScanKeyData entry;
+ ViewInfoForAdd *info = NULL;
+
+ ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid));
+
+ Relation rewrite_rel = heap_open(RewriteRelationId, AccessShareLock);
+
+ SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry);
+
+ HeapTuple rewrite_tup = systable_getnext(rewrite_scan);
+
+ if (HeapTupleIsValid(rewrite_tup)) {
+ Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup);
+
+ if (strcmp(NameStr(rewrite_form->rulename), ViewSelectRuleName) == 0) {
+ bool is_null = false;
+
+ Datum ev_actiom_datum = fastgetattr(rewrite_tup, Anum_pg_rewrite_ev_action, rewrite_rel->rd_att, &is_null);
+ if (!is_null) {
+ StringInfoData buf;
+
+ initStringInfo(&buf);
+
+ Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock);
+ char *ev_action_string = TextDatumGetCString(ev_actiom_datum);
+ List *ev_action = (List *)stringToNode(ev_action_string);
+ Query* query = (Query*)linitial(ev_action);
+
+ get_query_def(query,
+ &buf,
+ NIL,
+ RelationGetDescr(ev_relation),
+ 0,
+ -1,
+ 0,
+#ifdef PGXC
+ false,
+ false,
+ NULL,
+#endif /* PGXC */
+ false,
+ keep_star);
+ appendStringInfo(&buf, ";");
+
+ info = (ViewInfoForAdd *)palloc0(sizeof(ViewInfoForAdd));
+ info->ev_class = rewrite_form->ev_class;
+ info->query_string = pstrdup(buf.data);
+
+ heap_close(ev_relation, AccessShareLock);
+
+ FreeStringInfo(&buf);
+ pfree_ext(ev_action_string);
+ }
+ } else {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-supported feature"),
+ errdetail("rule %s depend on %s, alter table %s add ... first|after colname is not supported",
+ NameStr(rewrite_form->rulename), NameStr(rel->rd_rel->relname), NameStr(rel->rd_rel->relname))));
+ }
+ }
+ systable_endscan(rewrite_scan);
+ heap_close(rewrite_rel, AccessShareLock);
+
+ return info;
+}
+
+/*
+ * get sql for view.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static List *CheckPgRewriteFirstAfter(Relation rel)
+{
+ ScanKeyData key[2];
+ HeapTuple dep_tuple;
+ SysScanDesc dep_scan;
+ Form_pg_depend dep_form;
+ Oid pre_objid = 0;
+ List *query_str = NIL;
+
+ Relation dep_rel = heap_open(DependRelationId, AccessShareLock);
+
+ ScanKeyInit(
+ &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId));
+ ScanKeyInit(
+ &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel)));
+
+ dep_scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 2, key);
+
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(dep_scan))) {
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+
+ if (dep_form->classid == RewriteRelationId) {
+ ListCell* viewinfo = NULL;
+ bool is_exist = false;
+
+ if (dep_form->objid == pre_objid) {
+ continue;
+ }
+
+ pre_objid = dep_form->objid;
+
+ ViewInfoForAdd *info = GetViewInfoFirstAfter(rel, dep_form->objid);
+
+ foreach (viewinfo, query_str) {
+ ViewInfoForAdd *oldInfo = (ViewInfoForAdd *)lfirst(viewinfo);
+ if (info != NULL && oldInfo->ev_class == info->ev_class) {
+ is_exist = true;
+ break;
+ }
+ }
+
+ if (info != NULL && !is_exist) {
+ query_str = lappend(query_str, info);
+ }
+ }
+ }
+ systable_endscan(dep_scan);
+ heap_close(dep_rel, AccessShareLock);
+ return query_str;
+}
+
+/*
+ * create or replace view when the table has view.
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void ReplaceViewQueryFirstAfter(List *query_str)
+{
+ if (query_str != NIL) {
+ ListCell* viewinfo = NULL;
+
+ foreach (viewinfo, query_str) {
+ Query* query = NULL;
+ List* parsetree_list = NULL;
+ Node* parsetree = NULL;
+
+ ViewInfoForAdd *info = (ViewInfoForAdd *)lfirst(viewinfo);
+ parsetree_list = pg_parse_query(info->query_string);
+ if (list_length(parsetree_list) != 1) {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("this is not a view")));
+ }
+
+ parsetree = (Node *)linitial(parsetree_list);
+ query = parse_analyze(parsetree, info->query_string, NULL, 0);
+ StoreViewQuery(info->ev_class, query, true);
+ }
+ }
+}
+
+/*
+ * update pg_trigger
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ if (!rel->rd_rel->relhastriggers) {
+ return;
+ }
+ ScanKeyData key;
+ HeapTuple tri_tuple;
+ Relation tri_rel;
+ SysScanDesc scan;
+ int curattnum = is_increase ? endattnum + 1 : startattnum - 1;
+ int newattnum = is_increase ? startattnum : endattnum;
+
+ tri_rel = heap_open(TriggerRelationId, RowExclusiveLock);
+
+ ScanKeyInit(&key, Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+
+ scan = systable_beginscan(tri_rel, TriggerRelidNameIndexId, true, NULL, 1, &key);
+ while (HeapTupleIsValid(tri_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ Datum values[Natts_pg_trigger] = { 0 };
+ bool nulls[Natts_pg_trigger] = { 0 };
+ bool replaces[Natts_pg_trigger] = { 0 };
+ HeapTuple new_tri_tuple;
+
+ Datum tgattr_datum = fastgetattr(tri_tuple, Anum_pg_trigger_tgattr, tri_rel->rd_att, &is_null);
+ if (!is_null) {
+ int2vector *tgattr = (int2vector *)DatumGetPointer(tgattr_datum);
+ int2vector *new_tgattr = buildint2vector(NULL, tgattr->dim1);
+ for (int i = 0; i < tgattr->dim1; i++) {
+ if (tgattr->values[i] >= startattnum && tgattr->values[i] <= endattnum) {
+ new_tgattr->values[i] = is_increase ? (tgattr->values[i] + 1) : (tgattr->values[i] - 1);
+ } else if (tgattr->values[i] == curattnum) {
+ new_tgattr->values[i] = newattnum;
+ } else {
+ new_tgattr->values[i] = tgattr->values[i];
+ }
+ }
+
+ values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(new_tgattr);
+ replaces[Anum_pg_trigger_tgattr - 1] = true;
+ }
+
+ Datum tgqual_datum = fastgetattr(tri_tuple, Anum_pg_trigger_tgqual, tri_rel->rd_att, &is_null);
+ if (!is_null) {
+ char *tgqual_string = NULL;
+ Node *tgqual = NULL;
+ Node *new_tgqual = NULL;
+
+ tgqual_string = TextDatumGetCString(tgqual_datum);
+ tgqual = (Node *)stringToNode(tgqual_string);
+
+ new_tgqual = UpdateVarattnoAfterAddColumn(tgqual, startattnum, endattnum, is_increase);
+ tgqual_string = nodeToString(new_tgqual);
+ values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(tgqual_string);
+ replaces[Anum_pg_trigger_tgqual - 1] = true;
+ pfree_ext(tgqual_string);
+ }
+
+ new_tri_tuple = heap_modify_tuple(tri_tuple, RelationGetDescr(tri_rel), values, nulls, replaces);
+ simple_heap_update(tri_rel, &new_tri_tuple->t_self, new_tri_tuple);
+ CatalogUpdateIndexes(tri_rel, new_tri_tuple);
+ }
+
+ systable_endscan(scan);
+ heap_close(tri_rel, RowExclusiveLock);
+}
+
+/*
+ * update pg_rlspolicy
+ * 1. add column with first or after col_name.
+ * 2. modify column to first or after column.
+ */
+static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase)
+{
+ ScanKeyData key;
+ HeapTuple rls_tuple;
+ Relation rls_rel;
+ SysScanDesc scan;
+
+ rls_rel = heap_open(RlsPolicyRelationId, RowExclusiveLock);
+
+ ScanKeyInit(&key, Anum_pg_rlspolicy_polrelid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(rel)));
+
+ scan = systable_beginscan(rls_rel, PgRlspolicyPolrelidPolnameIndex, true, NULL, 1, &key);
+ while (HeapTupleIsValid(rls_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+ Datum values[Natts_pg_rlspolicy] = { 0 };
+ bool nulls[Natts_pg_rlspolicy] = { 0 };
+ bool replaces[Natts_pg_rlspolicy] = { 0 };
+ HeapTuple new_rls_tuple;
+
+ Datum polqual_datum = heap_getattr(rls_tuple, Anum_pg_rlspolicy_polqual, rls_rel->rd_att, &is_null);
+ if (!is_null) {
+ char *polqual_string = NULL;
+ Node *polqual = NULL;
+ Node *new_polqual = NULL;
+
+ polqual_string = TextDatumGetCString(polqual_datum);
+ polqual = (Node *)stringToNode(polqual_string);
+
+ new_polqual = UpdateVarattnoAfterAddColumn(polqual, startattnum, endattnum, is_increase);
+ polqual_string = nodeToString(new_polqual);
+ values[Anum_pg_rlspolicy_polqual - 1] = CStringGetTextDatum(polqual_string);
+ replaces[Anum_pg_rlspolicy_polqual - 1] = true;
+ pfree_ext(polqual_string);
+ }
+
+ new_rls_tuple = heap_modify_tuple(rls_tuple, RelationGetDescr(rls_rel), values, nulls, replaces);
+ simple_heap_update(rls_rel, &new_rls_tuple->t_self, new_rls_tuple);
+ CatalogUpdateIndexes(rls_rel, new_rls_tuple);
+ }
+
+ systable_endscan(scan);
+ heap_close(rls_rel, RowExclusiveLock);
+}
+
#ifdef ENABLE_MOT
static void ATExecMOTAlterTable(AlterForeingTableCmd* cmd)
{
@@ -10014,7 +11705,7 @@ static void ATExecMOTAlterTable(AlterForeingTableCmd* cmd)
#endif
static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, ColumnDef* colDef, bool isOid,
- bool recurse, bool recursing, LOCKMODE lockmode)
+ bool recurse, bool recursing, bool is_first, char *after_name, LOCKMODE lockmode)
{
Oid myrelid = RelationGetRelid(rel);
Relation pgclass = NULL;
@@ -10022,7 +11713,8 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
Relation cedesc = NULL;
HeapTuple reltup = NULL;
FormData_pg_attribute attribute;
- int newattnum;
+ int newattnum = 0;
+ int currattnum = 0;
char relkind;
HeapTuple typeTuple;
Oid typeOid = InvalidOid;
@@ -10034,18 +11726,39 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
ListCell* child = NULL;
AclResult aclresult;
bool isDfsTable = RelationIsPAXFormat(rel);
+ bool is_addloc = is_first || after_name != NULL;
+ List *query_str = NIL;
/* At top level, permission check was done in ATPrepCmd, else do it */
if (recursing)
ATSimplePermissions(rel, ATT_TABLE);
attrdesc = heap_open(AttributeRelationId, RowExclusiveLock);
+
+ if (is_addloc) {
+ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-supported feature"),
+ errdetail("foreign table is not supported for add column first|after columnName")));
+ }
+
+ if (RelationIsColumnFormat(rel)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-supported feature"),
+ errdetail("column orientated table is not supported for add column first|after columnName")));
+ }
+ }
/*
* if adding encrypted column
*/
CeHeapInfo* ceHeapInfo = NULL;
if (colDef->clientLogicColumnRef != NULL) {
+ if (is_addloc) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("Un-supported feature"),
+ errdetail("encryption column is not supported for add column first|after columnName")));
+ }
if (colDef->clientLogicColumnRef != NULL) {
ceHeapInfo = (CeHeapInfo *)palloc(sizeof(CeHeapInfo));
process_encrypted_columns(colDef, ceHeapInfo);
@@ -10130,23 +11843,36 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
if (isOid) {
newattnum = ObjectIdAttributeNumber;
} else {
- newattnum = ((Form_pg_class)GETSTRUCT(reltup))->relnatts + 1;
- if (newattnum > MaxHeapAttributeNumber)
+ currattnum = ((Form_pg_class)GETSTRUCT(reltup))->relnatts;
+ if (currattnum + 1 > MaxHeapAttributeNumber) {
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
errmsg("tables can have at most %d columns", MaxHeapAttributeNumber)));
+ }
+ if (is_first) {
+ newattnum = 1;
+ } else if (after_name != NULL) {
+ newattnum = GetAfterColumnAttnum(myrelid, after_name);
+ } else {
+ newattnum = currattnum + 1;
+ }
}
typeTuple = typenameType(NULL, colDef->typname, &typmod);
tform = (Form_pg_type)GETSTRUCT(typeTuple);
typeOid = HeapTupleGetOid(typeTuple);
+ /* And the collation */
+ Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate;
+ collOid = GetColumnDefCollation(NULL, colDef, typeOid, rel_coll_oid);
+ if (DB_IS_CMPT(B_FORMAT)) {
+ typeOid = binary_need_transform_typeid(typeOid, &collOid);
+ }
+
aclresult = pg_type_aclcheck(typeOid, GetUserId(), ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error_type(aclresult, typeOid);
- collOid = GetColumnDefCollation(NULL, colDef, typeOid);
-
/* make sure datatype is legal for a column */
CheckAttributeType(colDef->colname, typeOid, collOid, list_make1_oid(rel->rd_rel->reltype), false);
@@ -10194,6 +11920,20 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
errmsg("row-oriented table does not support compression")));
}
+ if (is_addloc) {
+ UpdatePgAttributeFirstAfter(attrdesc, myrelid, newattnum, currattnum, true);
+ UpdatePgIndexFirstAfter(rel, newattnum, currattnum, true);
+ UpdatePgConstraintFirstAfter(rel, newattnum, currattnum, true);
+ UpdatePgConstraintConfkeyFirstAfter(rel, newattnum, currattnum, true);
+ UpdatePgAttrdefFirstAfter(rel, newattnum, currattnum, true);
+ UpdatePgPartitionFirstAfter(rel, newattnum, currattnum, true, false, NULL);
+ UpdatePgTriggerFirstAfter(rel, newattnum, currattnum, true);
+ UpdatePgRlspolicyFirstAfter(rel, newattnum, currattnum, true);
+ query_str = CheckPgRewriteFirstAfter(rel);
+ tab->rewrite = true;
+ tab->is_first_after = true;
+ }
+
InsertPgAttributeTuple(attrdesc, &attribute, NULL);
heap_close(attrdesc, RowExclusiveLock);
@@ -10210,7 +11950,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
if (isOid)
((Form_pg_class)GETSTRUCT(reltup))->relhasoids = true;
else
- ((Form_pg_class)GETSTRUCT(reltup))->relnatts = newattnum;
+ ((Form_pg_class)GETSTRUCT(reltup))->relnatts = currattnum + 1;
simple_heap_update(pgclass, &reltup->t_self, reltup);
@@ -10227,6 +11967,15 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
/* Make the attribute's catalog entry visible */
CommandCounterIncrement();
+ if (is_addloc) {
+ UpdatePgDependFirstAfter(rel, newattnum, currattnum, true);
+ UpdateGenerateColFirstAfter(rel, newattnum, currattnum, true);
+ UpdateIndexFirstAfter(rel);
+
+ /* create or replace view */
+ ReplaceViewQueryFirstAfter(query_str);
+ }
+
/*
* Store the DEFAULT, if any, in the catalogs
*/
@@ -10322,7 +12071,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
else
defval = (Expr*)build_column_default(rel, attribute.attnum);
- if (defval == NULL && GetDomainConstraints(typeOid) != NIL) {
+ if (defval == NULL && (GetDomainConstraints(typeOid) != NIL || is_addloc)) {
Oid baseTypeId;
int32 baseTypeMod;
Oid baseTypeColl;
@@ -10331,11 +12080,13 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
baseTypeId = getBaseTypeAndTypmod(typeOid, &baseTypeMod);
baseTypeColl = get_typcollation(baseTypeId);
defval = (Expr*)makeNullConst(baseTypeId, baseTypeMod, baseTypeColl);
- defval = (Expr*)coerce_to_target_type(
- NULL, (Node*)defval, baseTypeId, typeOid, typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1);
- if (defval == NULL) /* should not happen */
- ereport(
- ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("failed to coerce base type to domain")));
+ if (GetDomainConstraints(typeOid) != NIL) {
+ defval = (Expr*)coerce_to_target_type(
+ NULL, (Node*)defval, baseTypeId, typeOid, typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1);
+ if (defval == NULL) /* should not happen */
+ ereport(ERROR,
+ (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("failed to coerce base type to domain")));
+ }
}
if (defval != NULL) {
@@ -10344,15 +12095,19 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
* also exclude temp table and column table.
*/
if (attribute.attnum == RelAutoIncAttrNum(rel)) {
- ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, true);
+ if (colDef->is_not_null) {
+ ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, true, is_addloc);
+ }
} else if (contain_specified_function((Node*)defval, NEXTVALFUNCOID)) {
/* We don't support alter table add column which default with nextval expression. */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("It's not supported to alter table add column default with nextval expression.")));
- } else if (RelationIsCUFormat(rel) || tab->rewrite ||
- RelationUsesSpaceType(rel->rd_rel->relpersistence) == SP_TEMP || colDef->generatedCol) {
- ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false);
+ } else if (RelationIsCUFormat(rel)) {
+ ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false);
+ } else if (tab->rewrite || colDef->generatedCol ||
+ RelationUsesSpaceType(rel->rd_rel->relpersistence) == SP_TEMP) {
+ ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true);
} else {
bytea* value = NULL;
AT_INSTANT_DEFAULT_VALUE ret =
@@ -10377,7 +12132,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
"2. the storage length of default value may be greater than 127.\n"
"3. the data type of new column is not supported."))));
}
- ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false);
+ ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false);
}
/* nothing to do if ret is DEFAULT_NULL */
}
@@ -10448,7 +12203,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
tagtab = ATGetQueueEntry(wqueue, tagrel);
- ATExecAddColumn(wqueue, tagtab, tagrel, colDef, isOid, false, false, lockmode);
+ ATExecAddColumn(wqueue, tagtab, tagrel, colDef, isOid, false, false, false, NULL, lockmode);
char tag_relname[NAMEDATALEN] = {0};
Tsdb::GenMetaRelname(rel->rd_rel->relnamespace, Tsdb::MetaTableType::META_TABLE_TAGS,
@@ -10463,7 +12218,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
Relation delta_rel = Tsdb::RelationGetDeltaRelation(rel, lockmode);
CheckTableNotInUse(delta_rel, "ALTER TABLE");
AlteredTableInfo* delta_tab = ATGetQueueEntry(wqueue, delta_rel);
- ATExecAddColumn(wqueue, delta_tab, delta_rel, colDef, isOid, false, false, lockmode);
+ ATExecAddColumn(wqueue, delta_tab, delta_rel, colDef, isOid, false, false, false, NULL, lockmode);
heap_close(delta_rel, NoLock);
}
}
@@ -10537,7 +12292,7 @@ static void ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel,
childtab = ATGetQueueEntry(wqueue, childrel);
/* Recurse to child */
- ATExecAddColumn(wqueue, childtab, childrel, colDef, isOid, recurse, true, lockmode);
+ ATExecAddColumn(wqueue, childtab, childrel, colDef, isOid, recurse, true, is_first, after_name, lockmode);
heap_close(childrel, NoLock);
}
@@ -10638,7 +12393,7 @@ static void ATPrepAddOids(List** wqueue, Relation rel, bool recurse, AlterTableC
cdef->cmprs_mode = ATT_CMPR_NOCOMPRESS;
cmd->def = (Node*)cdef;
}
- ATPrepAddColumn(wqueue, rel, recurse, false, cmd, lockmode);
+ ATPrepAddColumn(wqueue, NULL, rel, recurse, false, cmd, lockmode);
if (recurse)
cmd->subtype = AT_AddOidsRecurse;
@@ -11574,7 +13329,8 @@ static void ATExecAddIndex(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt,
true, /* is_alter_table */
check_rights,
skip_build,
- quiet);
+ quiet,
+ tab->is_modify_primary);
#ifndef ENABLE_MULTIPLE_NODES
if (RelationIsCUFormat(rel) && (stmt->primary || stmt->unique)) {
DefineDeltaUniqueIndex(RelationGetRelid(rel), stmt, new_index);
@@ -13477,6 +15233,7 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
Oid targettype = InvalidOid;
int32 targettypmod = -1;
Oid targetcollid = InvalidOid;
+ int target_charset = PG_INVALID_ENCODING;
NewColumnValue* newval = NULL;
ParseState* pstate = make_parsestate(NULL);
AclResult aclresult;
@@ -13510,12 +15267,6 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot alter column type to \"%s\"", tname)));
}
- if (attnum == RelAutoIncAttrNum(rel)) {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Un-supported feature"),
- errdetail("auto-increment column cannot be modified.")));
- }
if (typname == NULL) {
ereport(ERROR,
@@ -13525,6 +15276,14 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
/* Look up the target type */
typenameTypeIdAndMod(NULL, typname, &targettype, &targettypmod);
+ /* And the collation */
+ Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate;
+ targetcollid = GetColumnDefCollation(NULL, def, targettype, rel_coll_oid);
+ if (DB_IS_CMPT(B_FORMAT)) {
+ targettype = binary_need_transform_typeid(targettype, &targetcollid);
+ target_charset = get_charset_by_collation(targetcollid);
+ }
+
// check the unsupported datatype.
if (RelationIsColStore(rel) && !IsTypeSupportedByCStore(targettype)) {
ereport(ERROR,
@@ -13537,9 +15296,6 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
if (aclresult != ACLCHECK_OK)
aclcheck_error_type(aclresult, targettype);
- /* And the collation */
- targetcollid = GetColumnDefCollation(NULL, def, targettype);
-
/* make sure datatype is legal for a column */
CheckAttributeType(colName, targettype, targetcollid, list_make1_oid(rel->rd_rel->reltype), false);
@@ -13628,7 +15384,9 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
errmsg(
"column \"%s\" cannot be cast automatically to type %s", colName, format_type_be(targettype)),
errhint("Specify a USING expression to perform the conversion.")));
-
+#ifndef ENABLE_MULTIPLE_NODES
+ transform = coerce_to_target_charset(transform, target_charset, attTup->atttypid);
+#endif
/* Fix collations after all else */
assign_expr_collations(pstate, transform);
@@ -13644,6 +15402,10 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation
newval->expr = (Expr*)transform;
newval->is_generated = false;
newval->is_autoinc = false;
+ newval->is_addloc = false;
+ newval->newattnum = 0;
+ newval->col_name = pstrdup(colName);
+ newval->generate_attnum = 0;
tab->newvals = lappend(tab->newvals, newval);
if (ATColumnChangeRequiresRewrite(transform, attnum))
@@ -13757,6 +15519,379 @@ static void DelDependencONDataType(const Relation rel, Relation depRel, const Fo
systable_endscan(scan);
}
+/*
+ * update pg_attrdef adnum for the modified column with first or after column.
+ */
+static void UpdateAttrdefAdnumFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum,
+ bool *has_default)
+{
+ ScanKeyData key[2];
+ HeapTuple def_tuple;
+ SysScanDesc scan;
+
+ ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid));
+ ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(curattnum));
+
+ scan = systable_beginscan(rel, AttrDefaultIndexId, true, NULL, 2, key);
+
+ def_tuple = systable_getnext(scan);
+ if (HeapTupleIsValid(def_tuple)) {
+ Datum values[Natts_pg_attrdef] = { 0 };
+ bool nulls[Natts_pg_attrdef] = { 0 };
+ bool replaces[Natts_pg_attrdef] = { 0 };
+ HeapTuple new_def_tuple;
+
+ if (has_default != NULL) {
+ *has_default = true;
+ }
+
+ values[Anum_pg_attrdef_adnum - 1] = Int16GetDatum(newattnum);
+ replaces[Anum_pg_attrdef_adnum - 1] = true;
+
+ new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(rel), values, nulls, replaces);
+ simple_heap_update(rel, &new_def_tuple->t_self, new_def_tuple);
+ CatalogUpdateIndexes(rel, new_def_tuple);
+
+ heap_freetuple_ext(new_def_tuple);
+ }
+
+ systable_endscan(scan);
+}
+
+/*
+ * update pg_depend refobjsubid for the modified column with first or after column.
+ */
+static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum,
+ bool *has_depend)
+{
+ ScanKeyData key[2];
+ HeapTuple dep_tuple;
+ Form_pg_depend dep_form;
+ SysScanDesc scan;
+
+ ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid));
+ ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(curattnum));
+
+ scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, 2, key);
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_depend] = { 0 };
+ bool nulls[Natts_pg_depend] = { 0 };
+ bool replaces[Natts_pg_depend] = { 0 };
+ HeapTuple new_dep_tuple;
+
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+
+ if (has_depend != NULL) {
+ *has_depend = true;
+ }
+
+ values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(-1);
+ replaces[Anum_pg_depend_refobjsubid - 1] = true;
+
+ if (dep_form->objid == myrelid) {
+ int startattnum;
+ int endattnum;
+ bool is_increase = false;
+ if (newattnum <= curattnum - 1) {
+ startattnum = newattnum;
+ endattnum = curattnum - 1;
+ is_increase = true;
+ } else {
+ startattnum = curattnum + 1;
+ endattnum = newattnum;
+ }
+ if (dep_form->objsubid >= startattnum && dep_form->objsubid <= endattnum) {
+ values[Anum_pg_depend_objsubid - 1] = is_increase ?
+ Int32GetDatum(dep_form->objsubid + 1) : Int32GetDatum(dep_form->objsubid - 1);
+ replaces[Anum_pg_depend_objsubid - 1] = true;
+ } else if (dep_form->objsubid == curattnum) {
+ values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(newattnum);
+ replaces[Anum_pg_depend_objsubid - 1] = true;
+ }
+ }
+
+ new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(rel), values, nulls, replaces);
+ simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple);
+ CatalogUpdateIndexes(rel, new_dep_tuple);
+
+ heap_freetuple_ext(new_dep_tuple);
+ }
+ systable_endscan(scan);
+}
+
+/*
+ * update pg_depend refobjsubid for the modified column with first or after column.
+ */
+static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum)
+{
+ ScanKeyData key[2];
+ HeapTuple dep_tuple;
+ Form_pg_depend dep_form;
+ SysScanDesc scan;
+
+ ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid));
+ ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(curattnum));
+
+ scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, 2, key);
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) {
+ Datum values[Natts_pg_depend] = { 0 };
+ bool nulls[Natts_pg_depend] = { 0 };
+ bool replaces[Natts_pg_depend] = { 0 };
+ HeapTuple new_dep_tuple;
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+
+ values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newattnum);
+ replaces[Anum_pg_depend_refobjsubid - 1] = true;
+
+ new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(rel), values, nulls, replaces);
+ simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple);
+ CatalogUpdateIndexes(rel, new_dep_tuple);
+
+ heap_freetuple_ext(new_dep_tuple);
+ }
+
+ systable_endscan(scan);
+}
+
+/*
+ * update pg_partition partkey for the modified column with first or after column.
+ */
+static void UpdatePartitionPartkeyFirstAfter(Oid myrelid, int curattnum, int newattnum)
+{
+ ScanKeyData skey;
+ HeapTuple par_tuple;
+ Relation par_rel;
+ SysScanDesc scan;
+
+ par_rel = heap_open(PartitionRelationId, RowExclusiveLock);
+
+ ScanKeyInit(&skey, Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid));
+
+ scan = systable_beginscan(par_rel, PartitionParentOidIndexId, true, NULL, 1, &skey);
+ while (HeapTupleIsValid(par_tuple = systable_getnext(scan))) {
+ bool is_null = false;
+
+ // update pg_partition_partkey
+ Datum partkey_datum = SysCacheGetAttr(PARTRELID, par_tuple, Anum_pg_partition_partkey, &is_null);
+ if (!is_null) {
+ Datum values[Natts_pg_partition] = { 0 };
+ bool nulls[Natts_pg_partition] = { 0 };
+ bool replaces[Natts_pg_partition] = { 0 };
+ int2vector *partkey = NULL;
+ int2vector *new_partKey = NULL;
+ HeapTuple new_par_tuple;
+
+ partkey = (int2vector *)DatumGetPointer(partkey_datum);
+ new_partKey = buildint2vector(NULL, partkey->dim1);
+ for (int i = 0; i < partkey->dim1; i++) {
+ if (partkey->values[i] == curattnum) {
+ new_partKey->values[i] = newattnum;
+ } else {
+ new_partKey->values[i] = partkey->values[i];
+ }
+ }
+ values[Anum_pg_partition_partkey - 1] = PointerGetDatum(new_partKey);
+ replaces[Anum_pg_partition_partkey - 1] = true;
+
+ new_par_tuple = heap_modify_tuple(par_tuple, RelationGetDescr(par_rel), values, nulls, replaces);
+ simple_heap_update(par_rel, &new_par_tuple->t_self, new_par_tuple);
+ CatalogUpdateIndexes(par_rel, new_par_tuple);
+
+ pfree_ext(new_partKey);
+ heap_freetuple_ext(new_par_tuple);
+ }
+ }
+ systable_endscan(scan);
+ heap_close(par_rel, RowExclusiveLock);
+}
+
+static int GetNewattnumFirstAfter(Relation rel, AlterTableCmd* cmd, int curattnum)
+{
+ bool is_first = cmd->is_first;
+ char *after_name = cmd->after_name;
+ int newattnum = 0;
+
+ if (is_first && curattnum == 1) {
+ return 0;
+ }
+
+ if (is_first) {
+ newattnum = 1;
+ } else if (after_name != NULL) {
+ newattnum =GetAfterColumnAttnum(RelationGetRelid(rel), after_name) - 1;
+ if (newattnum + 1 == curattnum) {
+ return 0;
+ }
+
+ if (newattnum == curattnum) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("Unknown column \"%s\" in \"%s\"", after_name, RelationGetRelationName(rel))));
+ } else if (newattnum < curattnum) {
+ newattnum++;
+ }
+ }
+ return newattnum;
+}
+
+static void AlterColumnToFirstAfter(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd,
+ int curattnum)
+{
+ Oid myrelid = RelationGetRelid(rel);
+ int newattnum;
+ Relation attr_rel;
+ HeapTuple att_tuple_old, att_tuple_new;
+ Form_pg_attribute att_form_old, attr_form_new;
+ int startattnum, endattnum;
+ bool has_default = false;
+ bool has_depend = false;
+ bool has_partition = false;
+ bool is_increase = false;
+ List *query_str = NIL;
+
+ newattnum = GetNewattnumFirstAfter(rel, cmd, curattnum);
+ if (newattnum == 0) {
+ return;
+ }
+
+ tab->rewrite = true;
+
+ attr_rel = heap_open(AttributeRelationId, RowExclusiveLock);
+
+ att_tuple_old = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(myrelid), Int16GetDatum(curattnum));
+ if (!HeapTupleIsValid(att_tuple_old)) {
+ ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_CACHE_LOOKUP_FAILED),
+ errmsg("cache lookup failed for attribute %d of relation %u", curattnum, myrelid), errdetail("N/A"),
+ errcause("System error."), erraction("Contact engineer to support.")));
+ }
+
+ att_form_old = (Form_pg_attribute)GETSTRUCT(att_tuple_old);
+
+ att_form_old->attnum = 0;
+
+ simple_heap_update(attr_rel, &att_tuple_old->t_self, att_tuple_old);
+ CatalogUpdateIndexes(attr_rel, att_tuple_old);
+
+ Relation def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
+ UpdateAttrdefAdnumFirstAfter(def_rel, myrelid, curattnum, 0, &has_default);
+
+ Relation dep_rel = heap_open(DependRelationId, RowExclusiveLock);
+ UpdateDependRefobjsubidFirstAfter(dep_rel, myrelid, curattnum, newattnum, &has_depend);
+
+ if (newattnum <= curattnum - 1) {
+ startattnum = newattnum;
+ endattnum = curattnum - 1;
+ is_increase = true;
+ } else {
+ startattnum = curattnum + 1;
+ endattnum = newattnum;
+ }
+
+ UpdatePgPartitionFirstAfter(rel, startattnum, endattnum, is_increase, true, &has_partition);
+ UpdatePgAttributeFirstAfter(attr_rel, myrelid, startattnum, endattnum, is_increase);
+ UpdatePgIndexFirstAfter(rel, startattnum, endattnum, is_increase);
+ UpdatePgConstraintFirstAfter(rel, startattnum, endattnum, is_increase);
+ UpdatePgConstraintConfkeyFirstAfter(rel, startattnum, endattnum, is_increase);
+ UpdatePgAttrdefFirstAfter(rel, startattnum, endattnum, is_increase);
+ query_str = CheckPgRewriteFirstAfter(rel);
+ UpdatePgTriggerFirstAfter(rel, startattnum, endattnum, is_increase);
+ UpdatePgRlspolicyFirstAfter(rel, startattnum, endattnum, is_increase);
+ CommandCounterIncrement();
+
+ UpdateGenerateColFirstAfter(rel, startattnum, endattnum, is_increase);
+ UpdatePgDependFirstAfter(rel, startattnum, endattnum, is_increase);
+ CommandCounterIncrement();
+
+ att_tuple_new = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(myrelid), Int16GetDatum(0));
+ if (!HeapTupleIsValid(att_tuple_new)) {
+ ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_CACHE_LOOKUP_FAILED),
+ errmsg("cache lookup failed for attribute %d of relation %u", 0, myrelid), errdetail("N/A"),
+ errcause("System error."), erraction("Contact engineer to support.")));
+ }
+ attr_form_new = (Form_pg_attribute)GETSTRUCT(att_tuple_new);
+
+ attr_form_new->attnum = newattnum;
+ simple_heap_update(attr_rel, &att_tuple_new->t_self, att_tuple_new);
+ // keep system catalog indexes current
+ CatalogUpdateIndexes(attr_rel, att_tuple_new);
+
+ heap_close(attr_rel, RowExclusiveLock);
+ heap_freetuple_ext(att_tuple_old);
+ heap_freetuple_ext(att_tuple_new);
+
+ if (has_default) {
+ UpdateAttrdefAdnumFirstAfter(def_rel, myrelid, 0, newattnum, NULL);
+ }
+ heap_close(def_rel, RowExclusiveLock);
+
+ if (has_depend) {
+ UpdateDependRefobjsubidToNewattnum(dep_rel, myrelid, -1, newattnum);
+ }
+ heap_close(dep_rel, RowExclusiveLock);
+
+ if (has_partition) {
+ UpdatePartitionPartkeyFirstAfter(myrelid, 0, newattnum);
+ }
+
+ CommandCounterIncrement();
+
+ /* create or replace view */
+ ReplaceViewQueryFirstAfter(query_str);
+}
+
+static bool CheckIndexIsConstraint(Relation dep_rel, Oid objid, Oid *refobjid)
+{
+ ScanKeyData key[2];
+ HeapTuple dep_tuple;
+ SysScanDesc scan;
+ Form_pg_depend dep_form;
+ bool is_constraint = false;
+
+ ScanKeyInit(&key[0], Anum_pg_depend_classid,
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId));
+ ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid));
+ scan = systable_beginscan(dep_rel, DependDependerIndexId, true, NULL, 2, key);
+
+ while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) {
+ dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple);
+ if (dep_form->refclassid == ConstraintRelationId && dep_form->refobjsubid == 0) {
+ *refobjid = dep_form->refobjid;
+ is_constraint = true;
+ break;
+ }
+ }
+ systable_endscan(scan);
+ return is_constraint;
+}
+
+static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, char* col_name)
+{
+ ListCell* l = NULL;
+ foreach(l, tab->newvals) {
+ NewColumnValue* ex = (NewColumnValue*)lfirst(l);
+
+ if (ex->col_name == NULL) {
+ continue;
+ }
+
+ if (strcmp(ex->col_name, col_name) == 0) {
+ HeapTuple heap_tup;
+ Form_pg_attribute att_tup;
+
+ heap_tup = SearchSysCacheCopyAttName(RelationGetRelid(rel), col_name);
+ if (!HeapTupleIsValid(heap_tup)) { /* shouldn't happen */
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel))));
+ }
+ att_tup = (Form_pg_attribute)GETSTRUCT(heap_tup);
+ ex->attnum = att_tup->attnum;
+ ex->newattnum = GetNewattnumFirstAfter(rel, cmd, ex->attnum);
+
+ tableam_tops_free_tuple(heap_tup);
+ }
+ }
+}
+
+
static void ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode)
{
char* colName = cmd->name;
@@ -13826,18 +15961,27 @@ static void ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTabl
}
/* Check for multiple ALTER TYPE on same column --- can't cope */
- if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1].atttypid ||
- attTup->atttypmod != tab->oldDesc->attrs[attnum - 1].atttypmod)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", colName)));
+ if (!tab->is_first_after) {
+ if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1].atttypid ||
+ attTup->atttypmod != tab->oldDesc->attrs[attnum - 1].atttypmod)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", colName)));
+ }
+
/* Look up the target type (should not fail, since prep found it) */
typeTuple = typenameType(NULL, typname, &targettypmod);
tform = (Form_pg_type)GETSTRUCT(typeTuple);
targettype = HeapTupleGetOid(typeTuple);
/* And the collation */
- targetcollid = GetColumnDefCollation(NULL, def, targettype);
-
+ Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate;
+ targetcollid = GetColumnDefCollation(NULL, def, targettype, rel_coll_oid);
+ if (DB_IS_CMPT(B_FORMAT)) {
+ targettype = binary_need_transform_typeid(targettype, &targetcollid);
+ }
+ if (attnum == RelAutoIncAttrNum(rel)) {
+ CheckAutoIncrementDatatype(targettype, colName);
+ }
generatedCol = GetGeneratedCol(rel->rd_att, attnum -1);
/*
@@ -13853,27 +15997,42 @@ static void ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTabl
* implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef) {
- defaultexpr = build_column_default(rel, attnum);
- /* for column only with on update but no default ,here could be NULL*/
- if (defaultexpr != NULL) {
- defaultexpr = strip_implicit_coercions(defaultexpr);
- defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
- defaultexpr,
- exprType(defaultexpr),
- targettype,
- targettypmod,
- COERCION_ASSIGNMENT,
- COERCE_IMPLICIT_CAST,
- -1);
+ if (RelAutoIncAttrNum(rel) == attnum) {
+ defaultexpr = RecookAutoincAttrDefault(rel, attnum, targettype, targettypmod);
if (defaultexpr == NULL) {
if (generatedCol == ATTRIBUTE_GENERATED_STORED) {
ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("generation expression for column \"%s\" cannot be cast automatically to type %s", colName,
- format_type_be(targettype))));
+ errmsg("generation expression for column \"%s\" cannot be cast automatically to type %s",
+ colName, format_type_be(targettype))));
} else {
ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("default for column \"%s\" cannot be cast automatically to type %s", colName,
- format_type_be(targettype))));
+ format_type_be(targettype))));
+ }
+ }
+ } else {
+ defaultexpr = build_column_default(rel, attnum);
+ /* for column only with on update but no default ,here could be NULL*/
+ if (defaultexpr != NULL) {
+ defaultexpr = strip_implicit_coercions(defaultexpr);
+ defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
+ defaultexpr,
+ exprType(defaultexpr),
+ targettype,
+ targettypmod,
+ COERCION_ASSIGNMENT,
+ COERCE_IMPLICIT_CAST,
+ -1);
+ if (defaultexpr == NULL) {
+ if (generatedCol == ATTRIBUTE_GENERATED_STORED) {
+ ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("generation expression for column \"%s\" cannot be cast automatically to type %s",
+ colName, format_type_be(targettype))));
+ } else {
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("default for column \"%s\" cannot be cast automatically to type %s", colName,
+ format_type_be(targettype))));
+ }
}
}
}
@@ -13922,7 +16081,14 @@ static void ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTabl
if (relKind == RELKIND_INDEX || relKind == RELKIND_GLOBAL_INDEX) {
Assert(foundObject.objectSubId == 0);
- if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) {
+
+ Oid refobjid;
+ if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId) &&
+ CheckIndexIsConstraint(depRel, foundObject.objectId, &refobjid)) {
+ tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, refobjid);
+ tab->changedConstraintDefs =
+ lappend(tab->changedConstraintDefs, pg_get_constraintdef_string(refobjid));
+ } else if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) {
/*
* Question: alter table set datatype and table index execute concurrently, data inconsistency
* occurs. The index file is deleted and metadata is left. Because the data type is not locked
@@ -14409,12 +16575,55 @@ static void ATPostAlterTypeCleanup(List** wqueue, AlteredTableInfo* tab, LOCKMOD
performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL);
}
+ foreach (oid_item, tab->changedTriggerOids) {
+ obj.classId = TriggerRelationId;
+ obj.objectId = lfirst_oid(oid_item);
+ obj.objectSubId = 0;
+ performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL);
+ }
+
/*
* The objects will get recreated during subsequent passes over the work
* queue.
*/
}
+static void setPrimaryNotnull(Oid relid, IndexStmt *stmt, AlteredTableInfo* tab)
+{
+ if (stmt->primary && !stmt->internal_flag) {
+ ListCell* columns = NULL;
+ IndexElem* iparam = NULL;
+
+ tab->is_modify_primary = true;
+ foreach (columns, stmt->indexParams) {
+ HeapTuple atttuple;
+ Form_pg_attribute attform;
+
+ iparam = (IndexElem*)lfirst(columns);
+
+ atttuple = SearchSysCacheAttName(relid, iparam->name);
+ if (!HeapTupleIsValid(atttuple)) {
+ ereport(ERROR,
+ (errcode(ERRCODE_CACHE_LOOKUP_FAILED),
+ errmsg("cache lookup failed for attribute %s of relation %u", iparam->name, relid)));
+ }
+ attform = (Form_pg_attribute)GETSTRUCT(atttuple);
+
+ if (!attform->attnotnull) {
+ /* Add a subcommand to make this one NOT NULL */
+ AlterTableCmd* cmd = makeNode(AlterTableCmd);
+
+ cmd->subtype = AT_SetNotNull;
+ cmd->name = pstrdup(NameStr(attform->attname));
+ tab->subcmds[AT_PASS_ADD_CONSTR] =
+ lappend(tab->subcmds[AT_PASS_ADD_CONSTR], cmd);
+ }
+
+ ReleaseSysCache(atttuple);
+ }
+ }
+}
+
/*
* Attach each generated command to the proper place in the work queue.
* Note this could result in creation of entirely new work-queue entries.
@@ -14475,6 +16684,10 @@ static void AttachEachCommandInQueue(
cmd->subtype = AT_ReAddIndex;
tab->subcmds[AT_PASS_OLD_INDEX] = lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd);
+
+ if (tab->is_first_after) {
+ setPrimaryNotnull(rel->rd_id, (IndexStmt*)cmd->def, tab);
+ }
break;
case AT_AddConstraint:
Assert(IsA(cmd->def, Constraint));
@@ -15884,7 +18097,7 @@ static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation re
case T_RangeVar: {
char objectType = RelationIsRelation(rel) ? PART_OBJ_TYPE_TABLE_PARTITION : PART_OBJ_TYPE_INDEX_PARTITION;
- partOid = partitionNameGetPartitionOid(rel->rd_id,
+ partOid = PartitionNameGetPartitionOid(rel->rd_id,
((RangeVar*)partition)->relname,
objectType,
AccessExclusiveLock,
@@ -15907,7 +18120,7 @@ static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation re
((RangePartitionMap*)rel->partMap)->partitionKey,
rangePartDef->boundary);
partOid =
- partitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, false, false);
+ PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, false, false);
break;
}
default: {
@@ -15922,6 +18135,12 @@ static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation re
if (!OidIsValid(partOid)) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range")));
}
+
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(rel)) {
+ LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
tab->partid = partOid;
}
@@ -18817,14 +21036,14 @@ void AlterRelationNamespaceInternal(
* Do nothing when there's nothing to do.
*/
if (!object_address_present(&thisobj, objsMoved)) {
- /*
- * Check relation name to ensure that it doesn't conflict with existing synonym.
- */
- if (!IsInitdb && GetSynonymOid(NameStr(classForm->relname), newNspOid, true) != InvalidOid) {
- ereport(ERROR,
- (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
- get_namespace_name(newNspOid))));
- }
+ /*
+ * Check relation name to ensure that it doesn't conflict with existing synonym.
+ */
+ if (!IsInitdb && GetSynonymOid(NameStr(classForm->relname), newNspOid, true) != InvalidOid) {
+ ereport(ERROR,
+ (errmsg("relation name is already used by an existing synonym in schema \"%s\"",
+ get_namespace_name(newNspOid))));
+ }
/* check for duplicate name (more friendly than unique-index failure) */
if (get_relname_relid(NameStr(classForm->relname), newNspOid) != InvalidOid)
ereport(ERROR,
@@ -19570,31 +21789,19 @@ static void CheckListPartitionKeyType(FormData_pg_attribute* attrs, List* pos)
if (pos == NULL || attrs == NULL) {
ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition")));
}
- Oid typeOid = InvalidOid;
- ListCell* lhead = pos->head;
- int location = lfirst_int(lhead);
- typeOid = attrs[location].atttypid;
-
- /* 1. Check datatype for head of partitionkey list */
- if (!CheckListPartitionKeyType(typeOid)) {
- list_free_ext(pos);
- ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column %s cannot serve as a list partitioning column because of its datatype",
- NameStr(attrs[location].attname))));
- }
-
- /* 2. Check if datatype of partition keys are same */
-
+ int location;
ListCell* cell = NULL;
+
foreach (cell, pos) {
location = lfirst_int(cell);
- if (!can_coerce_type(1, &(attrs[location].atttypid), &typeOid, COERCION_IMPLICIT)) {
+ if (!CheckListPartitionKeyType(attrs[location].atttypid)) {
list_free_ext(pos);
ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column %s cannot serve as a list partitioning column because of its datatype",
- NameStr(attrs[location].attname))));
+ errmsg("column %s cannot serve as a list partitioning column because of its datatype",
+ NameStr(attrs[location].attname))));
+
}
- }
+ }
}
static void CheckHashPartitionKeyType(FormData_pg_attribute* attrs, List* pos)
@@ -19870,44 +22077,71 @@ Oid GetPartkeyExprType(Oid* target_oid, int* target_mod)
return typcollation;
}
-static Const* GetListPartitionValue(Form_pg_attribute attrs, List* value, bool partkeyIsFunc)
+static void FillListPartitionValueList(List** result, RowExpr* row, const List* keyPos, FormData_pg_attribute* attrs,
+ int boundId)
{
- Const* result = NULL;
- Const* cell = NULL;
+ Const* targetExpr = NULL;
+ ListCell* keyCell = NULL;
+ ListCell* posCell = NULL;
+
+ forboth (keyCell, row->args, posCell, keyPos) {
+ /* transform the const to target datatype */
+ targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(posCell)], (Const*)lfirst(keyCell), false, false);
+ if (targetExpr == NULL) {
+ for (int i = 0; i <= boundId; i++) {
+ list_free_ext(result[boundId]);
+ }
+ pfree_ext(result);
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("partition key value must be const or const-evaluable expression")));
+ }
+ targetExpr->constcollid = attrs[lfirst_int(posCell)].attcollation;
+ result[boundId] = lappend(result[boundId], targetExpr);
+ }
+}
+
+static List** GetListPartitionValueLists(const List* keyPos, FormData_pg_attribute* attrs, List* value, bool partkeyIsFunc)
+{
+ Node* cell = NULL;
ListCell* valueCell = NULL;
int count = 0;
Const* targetExpr = NULL;
- result = (Const*)palloc0(value->length * sizeof(Const));
+ List** result = (List**)palloc0(value->length * sizeof(List*));
foreach(valueCell, value) {
- cell = (Const*)lfirst(valueCell);
-
+ cell = (Node*)lfirst(valueCell);
+ if (IsA(cell, RowExpr)) { /* Multi-keys partition boundary values */
+ FillListPartitionValueList(result, (RowExpr*)cell, keyPos, attrs, count++);
+ continue;
+ }
/* del with maxvalue */
- if (cell->ismaxvalue) {
- result[count].xpr.type = T_Const;
- result[count].ismaxvalue = cell->ismaxvalue;
-
+ if (((Const*)cell)->ismaxvalue) {
+ Const* max = makeNode(Const);
+ max->xpr.type = T_Const;
+ max->ismaxvalue = true;
+ result[count] = lappend(result[count], max);
count++;
continue;
}
/* transform the const to target datatype */
- targetExpr = (Const*)GetTargetValue(attrs, cell, false, partkeyIsFunc);
+ targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(keyPos->head)], (Const*)cell, false, partkeyIsFunc);
if (targetExpr == NULL) {
+ for (int i = 0; i <= count; i++) {
+ list_free_ext(result[count]);
+ }
pfree_ext(result);
ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
errmsg("partition key value must be const or const-evaluable expression")));
}
-
- result[count] = *targetExpr;
if (partkeyIsFunc) {
Oid target_oid = InvalidOid;
int target_mod = -1;
- result[count].constcollid = GetPartkeyExprType(&target_oid, &target_mod);
+ targetExpr->constcollid = GetPartkeyExprType(&target_oid, &target_mod);
} else {
- result[count].constcollid = attrs->attcollation;
+ targetExpr->constcollid = attrs[lfirst_int(keyPos->head)].attcollation;
}
-
+ result[count] = lappend(result[count], targetExpr);
count++;
}
@@ -20036,6 +22270,13 @@ Node* GetTargetValue(Form_pg_attribute attrs, Const* src, bool isinterval, bool
}
} break;
+ case T_RelabelType: {
+ expr = (Node*)(((RelabelType*)expr)->arg);
+ if (T_Const == nodeTag((Node*)expr)) {
+ target_expr = expr;
+ }
+ } break;
+
default:
target_expr = NULL;
break;
@@ -20077,10 +22318,14 @@ static bool ConfirmTypeInfo(Oid* target_oid, int* target_mod, Const* src, Form_p
return true;
}
-static void ReportListPartitionIntersect(const List* partitionList, Const* value[], int idxFirst, int idxSecond)
+static void ReportListPartitionIntersect(const List* partitionList, List** values[], int partValueLen[],
+ int idxFirst, int idxSecond)
{
for (int e = 0; e < partitionList->length; ++e) {
- pfree_ext(value[e]);
+ for (int f = 0; f < partValueLen[e]; ++f) {
+ list_free_ext(values[e][f]);
+ }
+ pfree_ext(values[e]);
}
int i = 0;
ListCell* cell;
@@ -20112,81 +22357,168 @@ static void ReportListPartitionIntersect(const List* partitionList, Const* value
}
}
+/* Each Const in RowExpr corresponds to a partition key column. Check length and datatype. */
+static void sqlcmd_check_list_partition_rowexpr_bound(RowExpr *bound, const List *key_pos, FormData_pg_attribute *attrs,
+ List *part_def_list, char* part_name)
+{
+ if (list_length(bound->args) != list_length(key_pos)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid partition values"),
+ errdetail("list partition values in \"%s\" does not match the number of partition keys", part_name)));
+ }
+
+ ListCell* keyvalue_cell = NULL;
+ ListCell* keypos_cell = NULL;
+ /* check multi-keys partition boundary values datatype */
+ forboth (keyvalue_cell, bound->args, keypos_cell, key_pos) {
+ Const* key_value = (Const*)lfirst(keyvalue_cell);
+ FormData_pg_attribute key_pg_attr = attrs[lfirst_int(keypos_cell)];
+ if (!can_coerce_type(1, &key_value->consttype, &key_pg_attr.atttypid, COERCION_IMPLICIT)) {
+ list_free_ext(part_def_list);
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("list partition value in \"%s\" does not match datatype of partition key \"%s\"",
+ part_name, NameStr(key_pg_attr.attname))));
+ }
+ }
+}
+
+/* Check the boundary length and datatype of each list partition. */
+static void sqlcmd_check_list_partition_boundary(ListPartitionDefState *part_def, const List *key_pos,
+ FormData_pg_attribute *attrs, List *part_def_list, bool* has_default)
+{
+ List *boundaries = part_def->boundary;
+ char* part_name = part_def->partitionName;
+ Const* key_value_const = NULL;
+ Oid first_key_type = attrs[linitial_int(key_pos)].atttypid;
+
+ foreach_cell(bound_cell, boundaries) {
+ Node* bound = (Node*)lfirst(bound_cell);
+ /* check multi-keys partition boundary values */
+ if (IsA(bound, RowExpr)) {
+ sqlcmd_check_list_partition_rowexpr_bound((RowExpr*)bound, key_pos, attrs, part_def_list, part_name);
+ continue;
+ }
+
+ key_value_const = (Const *)bound;
+ if (key_value_const->ismaxvalue) {
+ /* default partition boundary can only have one max Const */
+ if (list_length(boundaries) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("The default partition can have only one boundary value.")));
+ }
+ /* Cannot have two default partition */
+ if (*has_default) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Partition table has multiple default partitions")));
+ }
+ *has_default = true;
+ continue;
+ }
+ /* check single-key partition */
+ if (list_length(key_pos) != 1) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid partition values"),
+ errdetail("list partition values in \"%s\" does not match the number of partition keys", part_name)));
+ }
+ /* check single-key partition boundary value datatype */
+ if (!can_coerce_type(1, &key_value_const->consttype, &first_key_type, COERCION_IMPLICIT)) {
+ list_free_ext(part_def_list);
+ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("list partition value in \"%s\" does not match datatype of partition key \"%s\"",
+ part_name, NameStr(attrs[linitial_int(key_pos)].attname))));
+ }
+ }
+}
+
+static void sqlcmd_check_list_partition_have_duplicate_values(List** key_values_array[], int part_idx, int bound_idx,
+ List *partition_list, int part_value_len[])
+{
+ ListCell* c1 = NULL;
+ ListCell* c2 = NULL;
+ for (int k = 0; k < bound_idx; ++k) {
+ forboth (c1, key_values_array[part_idx][bound_idx], c2, key_values_array[part_idx][k]) {
+ if (ConstCompareWithNull((Const*)lfirst(c1), (Const*)lfirst(c2)) != 0) {
+ break;
+ }
+ }
+ /* All key values are equal, the key value set is duplicate. */
+ if (c1 == NULL) {
+ ReportListPartitionIntersect(
+ partition_list, key_values_array, part_value_len, part_idx, part_idx);
+ }
+ }
+}
+
+static void sqlcmd_check_two_list_partition_values_overlapped(List** key_values_array[], int p1_idx, int b1_idx,
+ int p2_idx, int b2_idx, List *partition_list, int part_value_len[])
+{
+ ListCell* c1 = NULL;
+ ListCell* c2 = NULL;
+ Const* con1 = NULL;
+ Const* con2 = NULL;
+ forboth (c1, key_values_array[p1_idx][b1_idx], c2, key_values_array[p2_idx][b2_idx]) {
+ con1 = (Const*)lfirst(c1);
+ con2 = (Const*)lfirst(c2);
+ if (con1->ismaxvalue || con2->ismaxvalue) {
+ Assert(!(con1->ismaxvalue && con2->ismaxvalue));
+ break;
+ }
+ if (ConstCompareWithNull(con1, con2) != 0) {
+ break;
+ }
+ }
+ /* All key values are equal, the key value set is duplicate. */
+ if (c1 == NULL) {
+ ReportListPartitionIntersect(partition_list, key_values_array, part_value_len, p2_idx, p1_idx);
+ }
+}
+
+static void sqlcmd_check_list_value_overlapped_with_others(List** key_values_array[], int part_idx, int bound_idx,
+ List *partition_list, int part_value_len[])
+{
+ for (int m = 0; m < part_idx; ++m) {
+ for (int n = 0; n < part_value_len[m]; ++n) {
+ sqlcmd_check_two_list_partition_values_overlapped(
+ key_values_array, part_idx, bound_idx, m, n, partition_list, part_value_len);
+ }
+ }
+}
+
void CompareListValue(const List* pos, FormData_pg_attribute* attrs, List *partitionList, bool partkeyIsFunc)
{
if (pos == NULL || attrs == NULL) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition")));
}
- Oid typeOid = InvalidOid;
- ListCell* lhead = pos->head;
- int location = lfirst_int(lhead);
- typeOid = attrs[location].atttypid;
-
- List* partValue = NIL;
- ListCell* valueCell = NULL;
- ListCell* cell = NULL;
- Const* valueArray[partitionList->length]; // save all the list partion values.
- int partValueLen[partitionList->length]; // save list length of each partitionList
- int partListIdx = 0;
- int partListDefaultPartNum = 0;
+ List** values_array[partitionList->length]; // save all the list partion values.
+ int part_value_len[partitionList->length]; // save list length of each partitionList
+ int part_list_idx = 0;
+ bool has_default = false;
/* Check if datatype of values ars consistent with partition keys' */
- foreach (cell, partitionList) {
- partValue = ((ListPartitionDefState*)lfirst(cell))->boundary;
- foreach (valueCell, partValue) {
- if (((Const *)lfirst(valueCell))->ismaxvalue) {
- if (partValue->length != 1) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
- errmsg("The default partition can have only one boundary value.")));
- }
- partListDefaultPartNum++;
- if (partListDefaultPartNum > 1) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
- errmsg("Partition table has multiple default partitions")));
- }
- continue;
- }
- if (!can_coerce_type(1, &(((Const*)lfirst(valueCell))->consttype), &typeOid, COERCION_IMPLICIT)) {
- list_free_ext(partitionList);
- ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot serve %s as a list partitioning column because of its datatype",
- ((ListPartitionDefState*)lfirst(cell))->partitionName)));
- }
- }
- partValueLen[partListIdx] = partValue->length;
- valueArray[partListIdx] = GetListPartitionValue(&attrs[location], partValue, partkeyIsFunc);
- ++partListIdx;
+ foreach_cell(cell, partitionList) {
+ ListPartitionDefState *part_def = (ListPartitionDefState*)lfirst(cell);
+ sqlcmd_check_list_partition_boundary(part_def, pos, attrs, partitionList, &has_default);
+ part_value_len[part_list_idx] = list_length(part_def->boundary);
+ values_array[part_list_idx] = GetListPartitionValueLists(pos, attrs, part_def->boundary, partkeyIsFunc);
+ ++part_list_idx;
}
/* Check if list partition has intersect values */
/* XXX need a better way to reduce time complexity */
- int compare = 0;
for (int i = 0; i < partitionList->length; ++i) {
- for (int j = 0; j < partValueLen[i]; ++j) {
+ for (int j = 0; j < part_value_len[i]; ++j) {
/* Check if value overlapped in same list */
- for (int k = 0; k < j; ++k) {
- constCompare(valueArray[i] + j, valueArray[i] + k, compare);
- if (compare == 0) {
- ReportListPartitionIntersect(partitionList, valueArray, i, i);
- }
- }
+ sqlcmd_check_list_partition_have_duplicate_values(values_array, i, j, partitionList, part_value_len);
/* Check if value overlapped in different list */
- for (int m = 0; m < i; ++m) {
- for (int n = 0; n < partValueLen[m]; ++n) {
- Assert(!((valueArray[i] + j)->ismaxvalue && (valueArray[m] + n)->ismaxvalue));
- if ((valueArray[i] + j)->ismaxvalue || (valueArray[m] + n)->ismaxvalue) {
- continue;
- }
- constCompare(valueArray[i] + j, valueArray[m] + n, compare);
- if (compare == 0) {
- ReportListPartitionIntersect(partitionList, valueArray, m, i);
- }
- }
- }
+ sqlcmd_check_list_value_overlapped_with_others(values_array, i, j, partitionList, part_value_len);
}
}
for (int e = 0; e < partitionList->length; ++e) {
- pfree_ext(valueArray[e]);
+ for (int f = 0; f < part_value_len[e]; ++f) {
+ list_free_ext(values_array[e][f]);
+ }
+ pfree_ext(values_array[e]);
}
}
@@ -20321,6 +22653,10 @@ static void ATPrepDropPartition(Relation rel)
errcause("DROP PARTITION works on a NON-PARTITIONED table"),
erraction("Please check DDL syntax for \"DROP PARTITION\"")));
}
+
+ if (rel->partMap->type == PART_TYPE_HASH) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Droping hash partition is unsupported.")));
+ }
}
static void ATPrepDropSubPartition(Relation rel)
@@ -20332,6 +22668,15 @@ static void ATPrepDropSubPartition(Relation rel)
errcause("DROP SUBPARTITION works on a NON-SUBPARTITIONED table"),
erraction("Please check DDL syntax for \"DROP SUBPARTITION\"")));
}
+
+ char subparttype = PART_STRATEGY_INVALID;
+ RelationGetSubpartitionInfo(rel, &subparttype, NULL, NULL);
+ if (subparttype == PART_STRATEGY_HASH) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Un-support feature"),
+ errdetail("The syntax is unsupported for hash subpartition"),
+ errcause("Try DROP SUBPARTITION on a hash-subpartitioned table"),
+ erraction("Please check DDL syntax for \"DROP SUBPARTITION\"")));
+ }
}
static void ATPrepUnusableIndexPartition(Relation rel)
@@ -20500,29 +22845,53 @@ static void ATPrepSplitSubPartition(Relation rel)
}
}
+static void ATPrepResetPartitionno(Relation rel)
+{
+ if (!RELATION_IS_PARTITIONED(rel)) {
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not reset partitionno against NON-PARTITIONED table")));
+ }
+}
+
static void ATExecAddPartition(Relation rel, AddPartitionState *partState)
{
Assert(RELATION_IS_PARTITIONED(rel));
+ int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id));
+ Assert(PARTITIONNO_IS_VALID(partitionno));
+
+ ListCell* cell = NULL;
+ ListCell* subcell = NULL;
+ foreach (cell, partState->partitionList) {
+ partitionno++;
+ PartitionDefState* partitionDefState = (PartitionDefState*)lfirst(cell);
+ partitionDefState->partitionno = partitionno;
+ int subpartitionno = 0;
+ foreach(subcell, partitionDefState->subPartitionDefState) {
+ subpartitionno++;
+ PartitionDefState* subpartitionDefState = (PartitionDefState*)lfirst(subcell);
+ subpartitionDefState->partitionno = subpartitionno;
+ }
+ }
+
if (rel->partMap->type == PART_TYPE_LIST) {
- if (IsA(linitial(partState->partitionList), ListPartitionDefState)) {
- ATExecAddListPartition(rel, partState);
- } else {
- ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("can not add none-list partition to list partition table")));
+ if (!IsA(linitial(partState->partitionList), ListPartitionDefState)) {
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("can not add none-list partition to list partition table")));
}
} else if (rel->partMap->type == PART_TYPE_HASH) {
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add hash partition")));
} else {
- if (IsA(linitial(partState->partitionList), RangePartitionDefState)) {
- ATExecAddRangePartition(rel, partState);
- } else {
- ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("can not add none-range partition to range partition table")));
+ if (!IsA(linitial(partState->partitionList), RangePartitionDefState)) {
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("can not add none-range partition to range partition table")));
}
-
}
+
+ ATExecAddPartitionInternal(rel, partState);
+ /* inplace update on partitioned table, because we can't cover the wait_clean_gpi info, which is inplace updated */
+ UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id), -partitionno, true);
}
/* check tablespace permission for add partition/subpartition */
@@ -20530,40 +22899,11 @@ static void CheckTablespaceForAddPartition(Relation rel, List *partDefStateList)
{
ListCell *cell = NULL;
foreach (cell, partDefStateList) {
- switch (nodeTag(lfirst(cell))) {
- case T_RangePartitionDefState:
- {
- RangePartitionDefState *partDef = (RangePartitionDefState*)lfirst(cell);
- if (PointerIsValid(partDef->tablespacename)) {
- CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner);
- }
- CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState);
- break;
- }
- case T_ListPartitionDefState:
- {
- ListPartitionDefState *partDef = (ListPartitionDefState*)lfirst(cell);
- if (PointerIsValid(partDef->tablespacename)) {
- CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner);
- }
- CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState);
- break;
- }
- case T_HashPartitionDefState:
- {
- HashPartitionDefState *partDef = (HashPartitionDefState*)lfirst(cell);
- if (PointerIsValid(partDef->tablespacename)) {
- CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner);
- }
- CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState);
- break;
- }
- default:
- ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Unknown PartitionDefState for ADD PARTITION"),
- errdetail("N/A"), errcause("The partition type is incorrect."),
- erraction("Use the correct partition type.")));
+ PartitionDefState *partDef = (PartitionDefState*)lfirst(cell);
+ if (PointerIsValid(partDef->tablespacename)) {
+ CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner);
}
+ CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState);
}
}
@@ -20608,15 +22948,59 @@ static void CheckPartitionNameConflictForAddPartition(List *newPartitionNameList
}
}
+/*
+ * This function is used to find an existing list partition by the new boundary.
+ * Used for adding a list partition syntax, for example:
+ * 'ADD PARTITION VALUES (listValueList)' or 'SPLIT PARTITION VALUES (expr_list)'
+ */
+static Oid FindPartOidByListBoundary(Relation rel, ListPartitionMap *partMap, Node* boundKey)
+{
+ List *partKeyValueList = NIL;
+ Oid res;
+ if (IsA(boundKey, RowExpr)) { /* Multi-keys partition boundary values */
+ partKeyValueList = transformConstIntoTargetType(
+ rel->rd_att->attrs, partMap->partitionKey, ((RowExpr*)boundKey)->args);
+ res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false);
+ list_free_ext(partKeyValueList);
+ return res;
+ }
+
+ Const* con = (Const*)boundKey;
+ FormData_pg_attribute attr = rel->rd_att->attrs[partMap->partitionKey->values[0] - 1];
+
+ if (con->ismaxvalue) {
+ /*
+ * DEFAULT boundary of a list partition has only one Const.
+ * So it cannot be used to PartitionValuesGetPartitionOid for multi-keys partition.
+ * Just return default partition Oid.
+ */
+ for (int i = 0; i < partMap->listElementsNum; i++) {
+ ListPartElement *list = &partMap->listElements[i];
+ if (list->boundary[0].values[0]->ismaxvalue) {
+ return list->partitionOid;
+ }
+ }
+ return InvalidOid;
+ }
+ con = (Const*)GetTargetValue(&attr, con, false);
+ if (!PointerIsValid(con)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("partition key value must be const or const-evaluable expression")));
+ }
+ if (!OidIsValid(con->constcollid) && OidIsValid(attr.attcollation)) {
+ con->constcollid = attr.attcollation;
+ }
+ partKeyValueList = list_make1(con);
+ res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false);
+ list_free_ext(partKeyValueList);
+ return res;
+}
+
static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partDefState)
{
Assert(IsA(partDefState, RangePartitionDefState) || IsA(partDefState, ListPartitionDefState));
- int i;
- int j;
ListCell *cell = NULL;
- Const *curBound = NULL;
- Const *val = NULL;
List *partKeyValueList = NIL;
Oid existingPartOid = InvalidOid;
@@ -20626,42 +23010,31 @@ static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partD
if (nodeTag(partDefState) == T_RangePartitionDefState) {
RangePartitionDefState *partDef = (RangePartitionDefState *)partDefState;
RangePartitionMap *partMap = (RangePartitionMap *)rel->partMap;
- curBound = (Const *)copyObject(partMap->rangeElements[partNum - 1].boundary[0]);
- val = partDef->curStartVal;
+ Const *curBound = (Const *)copyObject(partMap->rangeElements[partNum - 1].boundary[0]);
+ Const *val = partDef->curStartVal;
if (!curBound->ismaxvalue && val != NULL && partitonKeyCompare(&val, &curBound, 1) != 0) {
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.",
partDef->partitionInitName ? partDef->partitionInitName : partDef->partitionName)));
}
- partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, partMap->partitionKey, partDef->boundary);
+ partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs,
+ partMap->partitionKey, partDef->boundary);
pfree_ext(curBound);
- } else {
- ListPartitionDefState *partDef = (ListPartitionDefState *)partDefState;
- ListPartitionMap *partMap = (ListPartitionMap *)rel->partMap;
- for (i = 0; i < partNum; i++) {
- for (j = 0; j < partMap->listElements[i].len; j++) {
- curBound = partMap->listElements[i].boundary[j];
- foreach (cell, partDef->boundary) {
- val = (Const *)lfirst(cell);
- if (partitonKeyCompare(&curBound, &val, 1) == 0) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("list boundary of adding partition MUST NOT overlap with existing partition")));
- }
- }
- }
- }
- partKeyValueList =
- transformIntoTargetType(rel->rd_att->attrs, partMap->partitionKey->values[0], partDef->boundary);
- }
-
- existingPartOid = partitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false);
- if (OidIsValid(existingPartOid)) {
- if (rel->partMap->type == PART_TYPE_RANGE) {
+ existingPartOid = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false);
+ list_free_ext(partKeyValueList);
+ if (OidIsValid(existingPartOid)) {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("upper boundary of adding partition MUST overtop last existing partition")));
- } else {
- ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("list boundary of adding partition MUST NOT overlap with existing partition")));
+ }
+ } else {
+ ListPartitionDefState *partDef = (ListPartitionDefState *)partDefState;
+
+ foreach (cell, partDef->boundary) {
+ existingPartOid = FindPartOidByListBoundary(rel, (ListPartitionMap *)rel->partMap, (Node*)lfirst(cell));
+ if (OidIsValid(existingPartOid)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("list boundary of adding partition MUST NOT overlap with existing partition")));
+ }
}
}
@@ -20847,174 +23220,6 @@ static void CheckForAddSubPartition(Relation rel, Relation partrel, List *subpar
}
}
-static void ATExecAddListPartition(Relation rel, AddPartitionState *partState)
-{
- Relation pgPartRel = NULL;
- Oid newPartOid = InvalidOid;
- List *newSubpartOidList = NIL;
- Datum new_reloptions;
- Datum rel_reloptions;
- HeapTuple tuple;
- bool isnull = false;
- List* old_reloptions = NIL;
- ListCell* cell = NULL;
- Oid bucketOid;
- Relation parentrel = NULL;
- char subparttype = PART_STRATEGY_INVALID;
- int2vector *subpartitionKey = NULL;
-
- /* if the relation is a partrel of a subpartition, here we get the relation first */
- if (RelationIsPartitionOfSubPartitionTable(rel)) {
- /* the lock of parentrel has been obtained already, seen in ATExecAddSubPartition */
- parentrel = heap_open(rel->parentId, NoLock);
- if (!RelationIsValid(parentrel)) {
- ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("missing relation for partition \"%s\"", rel->rd_rel->relname.data),
- errdetail("N/A"),
- errcause("Maybe the partition table is dropped"),
- erraction("Check system table 'pg_class' for more information")));
- }
- }
-
- ListPartitionDefState* partDef = NULL;
-
- /* step 1: Check before the actual work */
- if (RelationIsPartitionOfSubPartitionTable(rel)) {
- CheckForAddSubPartition(parentrel, rel, partState->partitionList);
- } else {
- CheckForAddPartition(rel, partState->partitionList);
- }
-
- bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(rel);
- bool *isTimestamptzForSubPartKey = NULL;
- if (RelationIsSubPartitioned(rel)) {
- List *subpartKeyPosList = NIL;
- RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, &subpartitionKey);
- isTimestamptzForSubPartKey = CheckSubPartkeyHasTimestampwithzone(rel, subpartKeyPosList);
- list_free_ext(subpartKeyPosList);
- }
-
- pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock);
-
- /* step 2: add new partition entry in pg_partition */
- /* TRANSFORM into target first */
- Oid relOid =
- RelationIsPartitionOfSubPartitionTable(rel) ? ObjectIdGetDatum(rel->parentId) : ObjectIdGetDatum(rel->rd_id);
- tuple = SearchSysCache1(RELOID, relOid);
- if (!HeapTupleIsValid(tuple))
- ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_CACHE_LOOKUP_FAILED),
- errmsg("cache lookup failed for relation %u", relOid), errdetail("N/A"),
- errcause("System error."), erraction("Contact engineer to support.")));
- rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull);
-
- old_reloptions = untransformRelOptions(rel_reloptions);
- RemoveRedisRelOptionsFromList(&old_reloptions);
- new_reloptions = transformRelOptions((Datum)0, old_reloptions, NULL, NULL, false, false);
- ReleaseSysCache(tuple);
-
- if (old_reloptions != NIL)
- list_free_ext(old_reloptions);
-
- bucketOid = RelationGetBucketOid(rel);
-
- List *partitionNameList =
- list_concat(GetPartitionNameList(partState->partitionList), RelationGetPartitionNameList(rel));
- foreach (cell, partState->partitionList) {
- partDef = (ListPartitionDefState*)lfirst(cell);
-
- PartitionState *partitionState = makeNode(PartitionState);
- partitionState->partitionStrategy = PART_STRATEGY_LIST;
- partitionState->partitionNameList = partitionNameList;
- if (RelationIsSubPartitioned(rel)) {
- partitionState->subPartitionState = makeNode(PartitionState);
- partitionState->subPartitionState->partitionStrategy = subparttype;
- if (partDef->subPartitionDefState == NIL) {
- Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (Node *)partDef);
- partDef->subPartitionDefState =
- lappend(partDef->subPartitionDefState, subPartitionDefState);
- }
- }
-
- newPartOid = HeapAddListPartition(pgPartRel,
- rel->rd_id,
- rel->rd_rel->reltablespace,
- bucketOid,
- partDef,
- rel->rd_rel->relowner,
- (Datum)new_reloptions,
- isTimestamptz,
- RelationGetStorageType(rel),
- subpartitionKey,
- RelationIsPartitionOfSubPartitionTable(rel));
-
- Oid partTablespaceOid =
- GetPartTablespaceOidForSubpartition(rel->rd_rel->reltablespace, partDef->tablespacename);
- newSubpartOidList = addNewSubPartitionTuplesForPartition(pgPartRel,
- newPartOid,
- partTablespaceOid,
- bucketOid,
- rel->rd_rel->relowner,
- (Datum)new_reloptions,
- isTimestamptzForSubPartKey,
- RelationGetStorageType(rel),
- partitionState,
- (Node *)partDef,
- AccessExclusiveLock);
-
- /* step 3: no need to update number of partitions in pg_partition */
- /*
- * We must bump the command counter to make the newly-created partition
- * tuple visible for opening.
- */
- CommandCounterIncrement();
-
- if (RelationIsColStore(rel)) {
- addCudescTableForNewPartition(rel, newPartOid);
- addDeltaTableForNewPartition(rel, newPartOid);
- }
-
- if (RelationIsPartitionOfSubPartitionTable(rel)) {
- addIndexForPartition(parentrel, newPartOid);
- addToastTableForNewPartition(rel, newPartOid, true);
- } else if (RelationIsSubPartitioned(rel)) {
- Assert(newSubpartOidList != NIL);
- Partition part = partitionOpen(rel, newPartOid, AccessExclusiveLock);
- Relation partrel = partitionGetRelation(rel, part);
- ListCell* lc = NULL;
- foreach (lc, newSubpartOidList) {
- Oid subpartOid = lfirst_oid(lc);
- addIndexForPartition(rel, subpartOid);
- addToastTableForNewPartition(partrel, subpartOid, true);
- }
- releaseDummyRelation(&partrel);
- partitionClose(rel, part, NoLock);
- } else {
- addIndexForPartition(rel, newPartOid);
- addToastTableForNewPartition(rel, newPartOid);
- }
-
- /* step 4: invalidate relation */
- if (RelationIsPartitionOfSubPartitionTable(rel)) {
- CacheInvalidateRelcache(parentrel);
- CacheInvalidatePartcacheByPartid(rel->rd_id);
- } else {
- CacheInvalidateRelcache(rel);
- }
- pfree_ext(partitionState->subPartitionState);
- pfree_ext(partitionState);
- }
-
- /* close relation, done */
- relation_close(pgPartRel, NoLock);
- pfree_ext(isTimestamptz);
- pfree_ext(isTimestamptzForSubPartKey);
- list_free_ext(partitionNameList);
-
- if (RelationIsPartitionOfSubPartitionTable(rel)) {
- heap_close(parentrel, NoLock);
- }
-}
-
/*
* @@GaussDB@@
* Target : data partition
@@ -21022,7 +23227,7 @@ static void ATExecAddListPartition(Relation rel, AddPartitionState *partState)
* Description :
* Notes :
*/
-static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState)
+static void ATExecAddPartitionInternal(Relation rel, AddPartitionState *partState)
{
Relation pgPartRel = NULL;
Oid newPartOid = InvalidOid;
@@ -21037,22 +23242,14 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState)
Relation parentrel = NULL;
char subparttype = PART_STRATEGY_INVALID;
int2vector *subpartitionKey = NULL;
+ PartitionDefState* partDef = NULL;
/* if the relation is a partrel of a subpartition, here we get the relation first */
if (RelationIsPartitionOfSubPartitionTable(rel)) {
/* the lock of parentrel has been obtained already, seen in ATExecAddSubPartition */
parentrel = heap_open(rel->parentId, NoLock);
- if (!RelationIsValid(parentrel)) {
- ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("missing relation for partition \"%s\"", rel->rd_rel->relname.data),
- errdetail("N/A"),
- errcause("Maybe the partition table is dropped"),
- erraction("Check system table 'pg_class' for more information")));
- }
}
- RangePartitionDefState* partDef = NULL;
-
/* step 1: Check before the actual work */
if (RelationIsPartitionOfSubPartitionTable(rel)) {
CheckForAddSubPartition(parentrel, rel, partState->partitionList);
@@ -21095,32 +23292,46 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState)
List *partitionNameList =
list_concat(GetPartitionNameList(partState->partitionList), RelationGetPartitionNameList(rel));
foreach (cell, partState->partitionList) {
- partDef = (RangePartitionDefState*)lfirst(cell);
+ partDef = (PartitionDefState*)lfirst(cell);
PartitionState *partitionState = makeNode(PartitionState);
- partitionState->partitionStrategy = PART_STRATEGY_RANGE;
+ partitionState->partitionStrategy = PartitionMapIsRange(rel) ? PART_STRATEGY_RANGE : PART_STRATEGY_LIST;
partitionState->partitionNameList = partitionNameList;
if (RelationIsSubPartitioned(rel)) {
partitionState->subPartitionState = makeNode(PartitionState);
partitionState->subPartitionState->partitionStrategy = subparttype;
if (partDef->subPartitionDefState == NIL) {
- Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (Node *)partDef);
+ Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (PartitionDefState *)partDef);
partDef->subPartitionDefState = lappend(partDef->subPartitionDefState, subPartitionDefState);
}
}
- newPartOid = heapAddRangePartition(pgPartRel,
- rel->rd_id,
- rel->rd_rel->reltablespace,
- bucketOid,
- partDef,
- rel->rd_rel->relowner,
- (Datum)new_reloptions,
- isTimestamptz,
- RelationGetStorageType(rel),
- AccessExclusiveLock,
- subpartitionKey,
- RelationIsPartitionOfSubPartitionTable(rel));
+ if (rel->partMap->type == PART_TYPE_LIST) {
+ newPartOid = HeapAddListPartition(pgPartRel,
+ rel->rd_id,
+ rel->rd_rel->reltablespace,
+ bucketOid,
+ (ListPartitionDefState *)partDef,
+ rel->rd_rel->relowner,
+ (Datum)new_reloptions,
+ isTimestamptz,
+ RelationGetStorageType(rel),
+ subpartitionKey,
+ RelationIsPartitionOfSubPartitionTable(rel));
+ } else {
+ newPartOid = heapAddRangePartition(pgPartRel,
+ rel->rd_id,
+ rel->rd_rel->reltablespace,
+ bucketOid,
+ (RangePartitionDefState *)partDef,
+ rel->rd_rel->relowner,
+ (Datum)new_reloptions,
+ isTimestamptz,
+ RelationGetStorageType(rel),
+ AccessExclusiveLock,
+ subpartitionKey,
+ RelationIsPartitionOfSubPartitionTable(rel));
+ }
Oid partTablespaceOid =
GetPartTablespaceOidForSubpartition(rel->rd_rel->reltablespace, partDef->tablespacename);
@@ -21195,21 +23406,47 @@ static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartSta
Assert(PointerIsValid(subpartState->partitionName));
Assert(RelationIsSubPartitioned(rel));
- Oid partOid = partitionNameGetPartitionOid(rel->rd_id,
+ /* get partoid and lock partition */
+ Oid partOid = PartitionNameGetPartitionOid(rel->rd_id,
subpartState->partitionName,
PART_OBJ_TYPE_TABLE_PARTITION,
- AccessExclusiveLock,
+ ShareUpdateExclusiveLock,
false,
false,
NULL,
NULL,
NoLock);
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partrel = partitionGetRelation(rel, part);
AddPartitionState* partState = makeNode(AddPartitionState);
partState->partitionList = subpartState->subPartitionList;
- ATExecAddPartition(partrel, partState);
+
+ int subpartitionno = -GetCurrentSubPartitionNo(partOid);
+ Assert(PARTITIONNO_IS_VALID(subpartitionno));
+ ListCell* cell = NULL;
+ foreach (cell, partState->partitionList) {
+ subpartitionno++;
+ PartitionDefState* partitionDefState = (PartitionDefState*)lfirst(cell);
+ partitionDefState->partitionno = subpartitionno;
+ }
+
+ if (partrel->partMap->type == PART_TYPE_LIST) {
+ if (!IsA(linitial(partState->partitionList), ListPartitionDefState)) {
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("can not add none-list subpartition to list subpartition table")));
+ }
+ } else if (partrel->partMap->type == PART_TYPE_HASH) {
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add hash subpartition")));
+ } else {
+ if (!IsA(linitial(partState->partitionList), RangePartitionDefState)) {
+ ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("can not add none-range subpartition to range subpartition table")));
+ }
+ }
+
+ ATExecAddPartitionInternal(partrel, partState);
+ UpdateCurrentSubPartitionNo(partOid, -subpartitionno);
releaseDummyRelation(&partrel);
partitionClose(rel, part, NoLock);
@@ -21326,22 +23563,28 @@ static void ATExecDropPartition(Relation rel, AlterTableCmd *cmd)
Partition part = NULL;
Relation partrel = NULL;
- if (rel->partMap->type == PART_TYPE_HASH) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Droping hash partition is unsupported.")));
- }
- /* getting the dropping partition's oid */
+ /* getting the dropping partition's oid, and lock partition */
partOid = GetPartOidByATcmd(rel, cmd, "DROP PARTITION");
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(rel)) {
+ LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
/* check 1: check validity of partition oid */
if (!OidIsValid(partOid)) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range")));
}
- /* get subpartOidList if is subpartition */
+ /* get subpartOidList if is subpartition, and lock subpartition */
if (RelationIsSubPartitioned(rel)) {
- part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ part = partitionOpen(rel, partOid, NoLock);
partrel = partitionGetRelation(rel, part);
subpartOidList = relationGetPartitionOidList(partrel);
+ foreach (cell, subpartOidList) {
+ subpartOid = lfirst_oid(cell);
+ LockPartitionOid(partOid, subpartOid, AccessExclusiveLock);
+ }
}
/* check 2: can not drop the last existing partition */
@@ -21395,16 +23638,7 @@ static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd)
Oid partOid = InvalidOid;
Oid subpartOid = InvalidOid;
- char subparttype = PART_STRATEGY_INVALID;
- RelationGetSubpartitionInfo(rel, &subparttype, NULL, NULL);
- if (subparttype == PART_STRATEGY_HASH) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Un-support feature"),
- errdetail("The syntax is unsupported for hash subpartition"),
- errcause("Try DROP SUBPARTITION on a hash-subpartitioned table"),
- erraction("Please check DDL syntax for \"DROP SUBPARTITION\"")));
- }
-
- /* getting the dropping subpartition's oid */
+ /* getting the dropping subpartition's oid, and lock subpartition */
subpartOid = GetSubpartOidByATcmd(rel, cmd, &partOid, "DROP SUBPARTITION");
/* check 1: check validity of partition oid */
@@ -21423,7 +23657,7 @@ static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd)
erraction("Please check DDL syntax for \"DROP SUBPARTITION\"")));
}
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partrel = partitionGetRelation(rel, part);
/* check 2: can not drop the last existing subpartition */
@@ -21452,7 +23686,7 @@ static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *comma
/* FIRST IS the PARTITION (partname) branch */
if (PointerIsValid(cmd->name)) {
- partOid = partitionNameGetPartitionOid(rel->rd_id,
+ partOid = PartitionNameGetPartitionOid(rel->rd_id,
cmd->name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -21489,7 +23723,7 @@ static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *comma
errcause("Only range/list/hash/interval partitioned table is supported for %s", command),
erraction("Please check DDL syntax for \"%s\"", command)));
}
- partOid = partitionValuesGetPartitionOid(rel,
+ partOid = PartitionValuesGetPartitionOid(rel,
rangePartDef->boundary,
AccessExclusiveLock,
true,
@@ -21504,10 +23738,10 @@ static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid,
/* FIRST IS the SUBPARTITION (subpartname) branch */
if (PointerIsValid(cmd->name)) {
- subpartOid = partitionNameGetPartitionOid(rel->rd_id,
+ subpartOid = SubPartitionNameGetSubPartitionOid(rel->rd_id,
cmd->name,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- AccessExclusiveLock,
+ ShareUpdateExclusiveLock, /* partition lock */
+ AccessExclusiveLock, /* subpartition lock */
false,
false,
NULL,
@@ -21564,10 +23798,11 @@ static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid,
subpartitionKey,
subpartBoundary);
- subpartOid = subpartitionValuesGetSubpartitionOid(rel,
+ subpartOid = SubPartitionValuesGetSubPartitionOid(rel,
partBoundary,
subpartBoundary,
- AccessExclusiveLock,
+ ShareUpdateExclusiveLock, /* partition lock */
+ AccessExclusiveLock, /* subpartition lock */
true,
true, /* will check validity of partition oid next step */
false,
@@ -21700,7 +23935,7 @@ static void ATExecUnusableIndexPartition(Relation rel, const char* partition_nam
/* the AccessShareLock lock on heap relation is held by AlterTableLookupRelation(). */
/* getting the partition's oid, lock it the same time */
- indexPartOid = partitionNameGetPartitionOid(rel->rd_id,
+ indexPartOid = PartitionNameGetPartitionOid(rel->rd_id,
partition_name,
PART_OBJ_TYPE_INDEX_PARTITION,
AccessExclusiveLock, // lock on index partition
@@ -21783,7 +24018,7 @@ static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partitio
}
/* getting the partition's oid, lock it the same time */
- partOid = partitionNameGetPartitionOid(rel->rd_id,
+ partOid = PartitionNameGetPartitionOid(rel->rd_id,
partition_name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -21994,6 +24229,16 @@ static void ATExecModifyRowMovement(Relation rel, bool rowMovement)
CommandCounterIncrement();
}
+static void ATExecResetPartitionno(Relation rel)
+{
+ Assert(RELATION_IS_PARTITIONED(rel));
+
+ bool isupgrade = (t_thrd.proc->workingVersionNum < PARTITION_ENHANCE_VERSION_NUM);
+ LOCKMODE relationlock = isupgrade ? ShareUpdateExclusiveLock : AccessExclusiveLock;
+
+ RelationResetPartitionno(rel->rd_id, relationlock);
+}
+
List* GetPartitionBoundary(Relation partTableRel, Node *PartDef)
{
List *boundary = NIL;
@@ -22091,6 +24336,15 @@ static Oid heap_truncate_one_part_new(const AlterTableCmd* cmd, Relation partRel
char* destPartitionName = NULL;
Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, partRel, srcPartOid, &renameTargetPart);
+ if (RelationIsPartitionOfSubPartitionTable(partRel)) {
+ int subpartitionno = GetCurrentSubPartitionNo(srcPartOid);
+ PARTITIONNO_VALID_ASSERT(subpartitionno);
+ UpdateCurrentSubPartitionNo(destPartOid, subpartitionno);
+ } else {
+ int partitionno = GetCurrentPartitionNo(srcPartOid);
+ PARTITIONNO_VALID_ASSERT(partitionno);
+ UpdateCurrentPartitionNo(destPartOid, partitionno, false);
+ }
List* indexList = NULL;
if (RelationIsPartitionOfSubPartitionTable(partRel) && RelationIsValid(rel)) {
@@ -22130,12 +24384,15 @@ static void ATExecTruncatePartitionForSubpartitionTable(Relation rel, Oid partOi
List *subPartOidList = relationGetPartitionOidList(partRel);
ListCell *subPartOidCell = NULL;
+ Oid subPartOid = InvalidOid;
+ foreach (subPartOidCell, subPartOidList) {
+ subPartOid = lfirst_oid(subPartOidCell);
+ LockPartitionOid(partOid, subPartOid, AccessExclusiveLock);
+ }
if (!cmd->alterGPI) {
// Unusable Global Index
ATUnusableGlobalIndex(rel);
- } else {
- AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid);
}
foreach (subPartOidCell, subPartOidList) {
Oid subPartOid = lfirst_oid(subPartOidCell);
@@ -22144,7 +24401,7 @@ static void ATExecTruncatePartitionForSubpartitionTable(Relation rel, Oid partOi
AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subPartOid);
}
- if (!hasGPI || RelationIsColStore(rel)) {
+ if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) {
heap_truncate_one_part(partRel, subPartOid);
} else {
heap_truncate_one_part_new(cmd, partRel, subPartOid, rel);
@@ -22214,7 +24471,7 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd)
* 2. Get partition oid values clause
*/
if (PointerIsValid(cmd->name)) {
- partOid = partitionNameGetPartitionOid(rel->rd_id,
+ partOid = PartitionNameGetPartitionOid(rel->rd_id,
cmd->name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -22224,7 +24481,7 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd)
NULL,
NoLock);
if (newTableRel) {
- newPartOid = partitionNameGetPartitionOid(newTableRel->rd_id,
+ newPartOid = PartitionNameGetPartitionOid(newTableRel->rd_id,
cmd->name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -22236,14 +24493,14 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd)
}
} else {
List *boundary = GetPartitionBoundary(rel, cmd->def);
- partOid = partitionValuesGetPartitionOid(rel,
+ partOid = PartitionValuesGetPartitionOid(rel,
boundary,
AccessExclusiveLock,
true,
true, /* will check validity of partition oid next step */
false);
if (newTableRel) {
- newPartOid = partitionValuesGetPartitionOid(newTableRel,
+ newPartOid = PartitionValuesGetPartitionOid(newTableRel,
boundary,
AccessExclusiveLock,
true,
@@ -22256,6 +24513,11 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd)
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range")));
}
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(rel)) {
+ LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
if (RelationIsSubPartitioned(rel)) {
ATExecTruncatePartitionForSubpartitionTable(rel, partOid, cmd, hasGPI);
return;
@@ -22268,7 +24530,7 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd)
AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid);
}
- if (!hasGPI || RelationIsColStore(rel)) {
+ if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) {
heap_truncate_one_part(rel, partOid);
} else {
destPartOid = heap_truncate_one_part_new(cmd, rel, partOid);
@@ -22329,9 +24591,9 @@ static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd)
*/
Oid partOid = InvalidOid;
if (PointerIsValid(cmd->name)) {
- subPartOid = partitionNameGetPartitionOid(rel->rd_id,
+ subPartOid = SubPartitionNameGetSubPartitionOid(rel->rd_id,
cmd->name,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
+ ShareUpdateExclusiveLock,
AccessExclusiveLock,
false,
false,
@@ -22347,18 +24609,17 @@ static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd)
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The subpartition name is invalid")));
}
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
if (!cmd->alterGPI) {
// Unusable Global Index
ATUnusableGlobalIndex(rel);
} else {
- AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid);
AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subPartOid);
}
- if (!hasGPI || RelationIsColStore(rel)) {
+ if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) {
heap_truncate_one_part(partRel, subPartOid);
} else {
heap_truncate_one_part_new(cmd, partRel, subPartOid, rel);
@@ -22366,7 +24627,7 @@ static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd)
pgstat_report_truncate(subPartOid, rel->rd_id, rel->rd_rel->relisshared);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
#ifdef ENABLE_MULTIPLE_NODES
if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) {
@@ -22834,11 +25095,6 @@ static void mergePartitionHeapData(Relation partTableRel, Relation tempTableRel,
mergeHeapBlocks += srcPartHeapBlocks;
}
- if (RelationIsUstoreFormat(tempTableRel)) {
- /* for ustore tables, all the tuples in dest rel are frozen above in mergeHeapBlock */
- FreezeXid = GetCurrentTransactionId();
- }
-
pfree_ext(srcPartsHasVM);
if (freezexid != NULL)
@@ -22982,10 +25238,10 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd)
partName = strVal(lfirst(cell));
/* from name to partition oid */
- srcPartOid = partitionNameGetPartitionOid(partTableRel->rd_id,
+ srcPartOid = PartitionNameGetPartitionOid(partTableRel->rd_id,
partName,
PART_OBJ_TYPE_TABLE_PARTITION,
- ExclusiveLock, // get ExclusiveLock lock on src partitions
+ AccessExclusiveLock, // get AccessExclusiveLock lock on src partitions
false, // no missing
false, // wait
NULL,
@@ -23054,9 +25310,16 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd)
renameTargetPart = true;
}
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) {
+ LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
if (cmd->alterGPI) {
destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, curPartIndex, &renameTargetPart);
- lockMode = ExclusiveLock;
+ int partitionno = GetPartitionnoFromSequence(partTableRel->partMap, curPartIndex);
+ UpdateCurrentPartitionNo(destPartOid, partitionno, false);
+ lockMode = AccessExclusiveLock;
}
/*
@@ -23078,7 +25341,7 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd)
/*
* open the dest partition.
- * If it's not alterGPI, destPart was already locked by partitionNameGetPartitionOid() call.
+ * If it's not alterGPI, destPart was already locked by PartitionNameGetPartitionOid() call.
*/
destPart = partitionOpen(partTableRel, destPartOid, lockMode);
destPartRel = partitionGetRelation(partTableRel, destPart);
@@ -23102,7 +25365,7 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd)
object.objectSubId = 0;
ReleaseSysCache(tuple);
- partitionClose(partTableRel, destPart, lockMode);
+ partitionClose(partTableRel, destPart, NoLock);
releaseDummyRelation(&destPartRel);
/* open temp relation */
@@ -23490,6 +25753,11 @@ static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd)
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("Specified partition does not exist")));
}
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) {
+ LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
Assert(OidIsValid(ordTableOid));
ordTableRel = heap_open(ordTableOid, NoLock);
@@ -24388,7 +26656,7 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT
tupleDesc = ordTableRel->rd_att;
if (exchangeVerbose) {
- indexList = RelationGetSpecificKindIndexList(partTableRel, false);
+ indexList = RelationGetIndexList(partTableRel, false);
foreach (cell, indexList) {
Oid indexOid = lfirst_oid(cell);
@@ -24414,7 +26682,7 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT
estate->es_num_result_relations = 1;
estate->es_result_relation_info = resultRelInfo;
- indexslot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel));
+ indexslot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_ops);
}
}
@@ -24434,10 +26702,10 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT
int2 bucketId = InvalidBktId;
// get right partition oid for the tuple
- targetPartOid = heapTupleGetPartitionId(partTableRel, (HeapTuple)tuple, true);
+ targetPartOid = heapTupleGetPartitionId(partTableRel, (HeapTuple)tuple, NULL, true);
- searchFakeReationForPartitionOid(
- partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partRel, part, RowExclusiveLock);
+ searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid,
+ INVALID_PARTITION_NO, partRel, part, RowExclusiveLock);
if (RELATION_HAS_BUCKET(partTableRel)) {
// Get the target bucket.
@@ -24472,19 +26740,24 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT
bool isNull[tupleDesc->natts];
bool estateIsNotNull = false;
- partIndexOid = getPartitionIndexOid(indexOid, targetPartOid);
- searchFakeReationForPartitionOid(partRelHTAB,
- CurrentMemoryContext,
- indexRel,
- partIndexOid,
- partIndexRel,
- partIndex,
- RowExclusiveLock);
+ bool isglobal = RelationIsGlobalIndex(indexRel);
+ if (!isglobal) {
+ partIndexOid = getPartitionIndexOid(indexOid, targetPartOid);
+ searchFakeReationForPartitionOid(partRelHTAB,
+ CurrentMemoryContext,
+ indexRel,
+ partIndexOid,
+ INVALID_PARTITION_NO,
+ partIndexRel,
+ partIndex,
+ RowExclusiveLock);
+ indexRel = partIndexRel;
+ }
if (RELATION_HAS_BUCKET(indexRel) && !(RelationAmIsBtree(indexRel) &&
RELOPTIONS_CROSSBUCKET(indexRel->rd_options))) {
searchHBucketFakeRelation(
- partRelHTAB, CurrentMemoryContext, partIndexRel, bucketId, partIndexRel);
+ partRelHTAB, CurrentMemoryContext, indexRel, bucketId, indexRel);
}
if (indexInfo->ii_Expressions != NIL || indexInfo->ii_ExclusionOps != NULL) {
@@ -24495,7 +26768,7 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT
FormIndexDatum(indexInfo, indexslot, estateIsNotNull ? estate : NULL, values, isNull);
- (void)index_insert(partIndexRel,
+ (void)index_insert(indexRel,
values,
isNull,
&((HeapTuple)copyTuple)->t_self,
@@ -24864,7 +27137,7 @@ static Oid getPartitionOid(Relation partTableRel, const char *partName, Node *Pa
Oid partOid = InvalidOid;
if (PointerIsValid(partName)) {
- partOid = partitionNameGetPartitionOid(RelationGetRelid(partTableRel),
+ partOid = PartitionNameGetPartitionOid(RelationGetRelid(partTableRel),
partName,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -24875,7 +27148,7 @@ static Oid getPartitionOid(Relation partTableRel, const char *partName, Node *Pa
NoLock);
} else {
List* boundary = GetPartitionBoundary(partTableRel, PartDef);
- partOid = partitionValuesGetPartitionOid(
+ partOid = PartitionValuesGetPartitionOid(
partTableRel, boundary, AccessExclusiveLock, true, true, false);
}
@@ -24925,18 +27198,9 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd)
partKeyNum = partMap->partitionKey->dim1;
partTableOid = RelationGetRelid(partTableRel);
- // check final partition num
- targetPartNum = getNumberOfPartitions(partTableRel) + list_length(destPartDefList) - 1;
- if (targetPartNum > MAX_PARTITION_NUM) {
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("too many partitions for partitioned table"),
- errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM)));
- }
-
// get src partition oid
if (PointerIsValid(splitPart->src_partition_name)) {
- srcPartOid = partitionNameGetPartitionOid(RelationGetRelid(partTableRel),
+ srcPartOid = PartitionNameGetPartitionOid(RelationGetRelid(partTableRel),
splitPart->src_partition_name,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessExclusiveLock,
@@ -24948,10 +27212,24 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd)
} else {
splitPart->partition_for_values = transformConstIntoTargetType(
partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->partition_for_values);
- srcPartOid = partitionValuesGetPartitionOid(
+ srcPartOid = PartitionValuesGetPartitionOid(
partTableRel, splitPart->partition_for_values, AccessExclusiveLock, true, true, false);
}
+ /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */
+ if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) {
+ LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK);
+ }
+
+ // check final partition num
+ targetPartNum = getNumberOfPartitions(partTableRel) + list_length(destPartDefList) - 1;
+ if (targetPartNum > MAX_PARTITION_NUM) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("too many partitions for partitioned table"),
+ errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM)));
+ }
+
/* check src partition exists */
if (!OidIsValid(srcPartOid)) {
ereport(ERROR,
@@ -25077,7 +27355,17 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd)
}
// add dest partitions
+ int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(partTableRel->rd_id));
+ Assert(PARTITIONNO_IS_VALID(partitionno));
+ foreach (cell, destPartDefList) {
+ partitionno++;
+ PartitionDefState* partDef = (PartitionDefState*)lfirst(cell);
+ partDef->partitionno = partitionno;
+ }
fastAddPartition(partTableRel, destPartDefList, &newPartOidList);
+ /* inplace update on partitioned table, because we can't cover the wait_clean_gpi info, which is inplace updated */
+ UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(partTableRel->rd_id), -partitionno, true);
+
freeDestPartBoundaryList(destPartBoundaryList, listForFree);
if (isPrevInterval) {
// modify all previous *interval* partitions to range partitions, *possibly* no such partitions
@@ -25152,9 +27440,9 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd)
void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid)
{
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
- Partition subPart = partitionOpen(partRel, subPartOid, AccessExclusiveLock);
+ Partition subPart = partitionOpen(partRel, subPartOid, NoLock);
if (subPart->pd_part->partstrategy == PART_STRATEGY_HASH) {
ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Hash subpartition does not support split."),
errdetail("N/A"), errcause("Hash subpartition does not support split."),
@@ -25167,7 +27455,7 @@ void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid)
errdetail("SPLIT SUBPARTITION NAME VALUES shouldn't be used, it's for list subpartitions."),
errcause("Wrong split subpartition syntax used."), erraction("Use proper split subpartition syntax.")));
}
- partitionClose(partRel, subPart, AccessExclusiveLock);
+ partitionClose(partRel, subPart, NoLock);
int srcSubPartIndex = partOidGetPartSequence(partRel, subPartOid) - 1;
List* boundary = getListPartitionBoundaryList(partRel, srcSubPartIndex);
@@ -25178,7 +27466,7 @@ void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid)
}
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
list_free_deep(boundary);
}
@@ -25270,9 +27558,9 @@ static void CheckDestRangeSubPartitionNameForSplit(Relation rel, List* destPartD
static void ChecksrcRangeSubPartitionNameForSplit(Relation rel, Oid partOid, Oid subPartOid)
{
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
- Partition subPart = partitionOpen(partRel, subPartOid, AccessExclusiveLock);
+ Partition subPart = partitionOpen(partRel, subPartOid, NoLock);
if (subPart->pd_part->partstrategy == PART_STRATEGY_HASH) {
ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Hash subpartition does not support split."),
errdetail("N/A"), errcause("Hash subpartition does not support split."),
@@ -25285,60 +27573,43 @@ static void ChecksrcRangeSubPartitionNameForSplit(Relation rel, Oid partOid, Oid
errdetail("SPLIT SUBPARTITION NAME AT shouldn't be used, it's for range subpartitions."),
errcause("Wrong split subpartition syntax used."), erraction("Use proper split subpartition syntax.")));
}
- partitionClose(partRel, subPart, AccessExclusiveLock);
+ partitionClose(partRel, subPart, NoLock);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
}
void CheckDestListSubPartitionBoundaryForSplit(Relation rel, Oid partOid, SplitPartitionState* splitSubPart)
{
ListCell *cell = NULL;
- ParseState* pstate = NULL;
- ListPartitionMap* partMap = NULL;
- int partKeyNum = 0;
- int listElementsNum = 0;
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Oid existingSubPartOid;
+ Oid defaultSubPartOid = InvalidOid;
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
+ ListPartitionMap* partMap = (ListPartitionMap*)partRel->partMap;
+ ParseState* pstate = make_parsestate(NULL);
- // get partition key number
- partMap = (ListPartitionMap*)partRel->partMap;
- partKeyNum = partMap->partitionKey->dim1;
- listElementsNum = partMap->listElementsNum;
- Assert(partKeyNum == 1);
-
- pstate = make_parsestate(NULL);
splitSubPart->newListSubPartitionBoundry =
transformListPartitionValue(pstate, splitSubPart->newListSubPartitionBoundry, true, true);
pfree_ext(pstate);
- List *tmp = splitSubPart->newListSubPartitionBoundry;
- splitSubPart->newListSubPartitionBoundry = transformIntoTargetType(
- partRel->rd_att->attrs, partMap->partitionKey->values[0], splitSubPart->newListSubPartitionBoundry);
- list_free_ext(tmp);
+ for (int i = 0; i < partMap->listElementsNum; i++) {
+ ListPartElement *list = &partMap->listElements[i];
+ if (list->boundary[0].values[0]->ismaxvalue) {
+ defaultSubPartOid = list->partitionOid;
+ break;
+ }
+ }
foreach (cell, splitSubPart->newListSubPartitionBoundry) {
- Const *newBoundary = (Const *)lfirst(cell);
- for (int i = 0; i < listElementsNum; i++) {
- Const** boundarys = partMap->listElements[i].boundary;
- int boundarysLen = partMap->listElements[i].len;
- for (int j = 0; j < boundarysLen; j++) {
- Const *boundary = boundarys[j];
- if (boundary->ismaxvalue) {
- continue;
- }
- int compare = 0;
- constCompare(boundary, newBoundary, compare);
- if (compare == 0) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
- errmsg("list subpartition %s has overlapped value",
- getPartitionName(partMap->listElements[i].partitionOid, false))));
- }
- }
+ existingSubPartOid = FindPartOidByListBoundary(partRel, partMap, (Node*)lfirst(cell));
+ if (OidIsValid(existingSubPartOid) && existingSubPartOid != defaultSubPartOid) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("list subpartition %s has overlapped value", getPartitionName(existingSubPartOid, false))));
}
}
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
}
int GetNumberOfSubPartitions(Relation rel)
@@ -25348,11 +27619,11 @@ int GetNumberOfSubPartitions(Relation rel)
ListCell *cell = NULL;
foreach (cell, partOidList) {
Oid partOid = lfirst_oid(cell);
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
subPartitionNum += GetNumberOfSubPartitions(partRel);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
}
return subPartitionNum;
}
@@ -25366,6 +27637,7 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
int currentPartNum = 0;
Oid partOid = InvalidOid;
Oid srcSubPartOid = InvalidOid;
+ int subpartitionno = INVALID_PARTITION_NO;
splitSubPart = (SplitPartitionState*)cmd->def;
destPartDefList = splitSubPart->dest_partition_define_list;
@@ -25379,10 +27651,10 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
errhint("Number of subpartitions can not be more than %d", MAX_PARTITION_NUM)));
}
if (splitSubPart->splitType == LISTSUBPARTITIION) {
- srcSubPartOid = partitionNameGetPartitionOid(RelationGetRelid(partTableRel),
+ srcSubPartOid = SubPartitionNameGetSubPartitionOid(RelationGetRelid(partTableRel),
splitSubPart->src_partition_name,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- AccessExclusiveLock,
+ ShareUpdateExclusiveLock, /* partition lock */
+ AccessExclusiveLock, /* subpartition lock */
true,
false,
NULL,
@@ -25413,9 +27685,12 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
}
// check dest partition tablespace
+ subpartitionno = -GetCurrentSubPartitionNo(partOid);
+ Assert(PARTITIONNO_IS_VALID(subpartitionno));
foreach (cell, destPartDefList) {
ListPartitionDefState *listSubPartDef = (ListPartitionDefState *)lfirst(cell);
-
+ subpartitionno++;
+ listSubPartDef->partitionno = subpartitionno;
CheckPartitionTablespace(listSubPartDef->tablespacename, partTableRel->rd_rel->relowner);
}
@@ -25433,10 +27708,10 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
boundaryDefault->location = -1;
listPartDef->boundary = list_make1(boundaryDefault);
} else if (splitSubPart->splitType == RANGESUBPARTITIION) {
- srcSubPartOid = partitionNameGetPartitionOid(RelationGetRelid(partTableRel),
+ srcSubPartOid = SubPartitionNameGetSubPartitionOid(RelationGetRelid(partTableRel),
splitSubPart->src_partition_name,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
- AccessExclusiveLock,
+ ShareUpdateExclusiveLock, /* partition lock */
+ AccessExclusiveLock, /* subpartition lock */
true,
false,
NULL,
@@ -25463,12 +27738,15 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
}
// check dest partition tablespace
+ subpartitionno = -GetCurrentSubPartitionNo(partOid);
+ Assert(PARTITIONNO_IS_VALID(subpartitionno));
foreach (cell, destPartDefList) {
RangePartitionDefState *listSubPartDef = (RangePartitionDefState *)lfirst(cell);
-
+ subpartitionno++;
+ listSubPartDef->partitionno = subpartitionno;
CheckPartitionTablespace(listSubPartDef->tablespacename, partTableRel->rd_rel->relowner);
}
- Partition part = partitionOpen(partTableRel, partOid, AccessShareLock);
+ Partition part = partitionOpen(partTableRel, partOid, NoLock);
Relation partRel = partitionGetRelation(partTableRel, part);
// get src partition sequence
@@ -25505,11 +27783,11 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
FastAddRangeSubPartition(partTableRel, destPartDefList, partOid, &newSubPartOidList);
releaseDummyRelation(&partRel);
- partitionClose(partTableRel, part, AccessShareLock);
+ partitionClose(partTableRel, part, NoLock);
}
- Partition part = partitionOpen(partTableRel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(partTableRel, partOid, NoLock);
Relation partRel = partitionGetRelation(partTableRel, part);
- Partition subPart = partitionOpen(partRel, srcSubPartOid, AccessExclusiveLock);
+ Partition subPart = partitionOpen(partRel, srcSubPartOid, NoLock);
// creat temp table and swap relfilenode with src partition
tempTableOid = createTempTableForPartition(partTableRel, subPart);
@@ -25530,7 +27808,7 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
CacheInvalidatePartcache(part);
releaseDummyRelation(&partRel);
- partitionClose(partTableRel, part, AccessExclusiveLock);
+ partitionClose(partTableRel, part, NoLock);
if (splitSubPart->splitType == LISTSUBPARTITIION) {
/*
@@ -25541,6 +27819,8 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd)
FastAddListSubPartition(partTableRel, destPartDefList, partOid, &newSubPartOidList);
}
+ UpdateCurrentSubPartitionNo(partOid, -subpartitionno);
+
Relation tempTableRel = relation_open(tempTableOid, AccessExclusiveLock);
// read temp table tuples and insert into partitioned table
@@ -25897,10 +28177,9 @@ static void AlterPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel
Relation partRel = NULL;
Oid parentOid = partid_get_parentid(targetPartOid);
if (parentOid != partTableRel->rd_id) {
- part = partitionOpen(partTableRel, parentOid, AccessExclusiveLock);
+ part = partitionOpen(partTableRel, parentOid, AccessShareLock);
partRel = partitionGetRelation(partTableRel, part);
targetPart = partitionOpen(partRel, targetPartOid, AccessExclusiveLock);
- partitionClose(partTableRel, part, AccessExclusiveLock);
} else {
targetPart = partitionOpen(partTableRel, targetPartOid, AccessExclusiveLock);
}
@@ -25915,6 +28194,7 @@ static void AlterPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel
if (partRel != NULL) {
partitionClose(partRel, targetPart, NoLock);
releaseDummyRelation(&partRel);
+ partitionClose(partTableRel, part, NoLock);
} else {
partitionClose(partTableRel, targetPart, NoLock);
}
@@ -25980,6 +28260,7 @@ static Oid AddTemporaryRangePartitionForAlterPartitions(const AlterTableCmd* cmd
}
partDef->boundary = getRangePartitionBoundaryList(partTableRel, sequence);
partDef->tablespacename = pstrdup(cmd->target_partition_tablespace);
+ partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence);
partDef->curStartVal = NULL;
partDef->partitionInitName = NULL;
newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef);
@@ -26020,6 +28301,7 @@ static Oid AddTemporaryListPartitionForAlterPartitions(const AlterTableCmd* cmd,
}
partDef->boundary = getListPartitionBoundaryList(partTableRel, sequence);
partDef->tablespacename = pstrdup(cmd->target_partition_tablespace);
+ partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence);
newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef);
pfree_ext(partDef->partitionName);
pfree_ext(partDef->tablespacename);
@@ -26058,6 +28340,7 @@ static Oid AddTemporaryHashPartitionForAlterPartitions(const AlterTableCmd* cmd,
}
partDef->boundary = getHashPartitionBoundaryList(partTableRel, sequence);
partDef->tablespacename = pstrdup(cmd->target_partition_tablespace);
+ partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence);
newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef);
pfree_ext(partDef->partitionName);
pfree_ext(partDef->tablespacename);
@@ -26091,7 +28374,27 @@ static Oid AddTemporaryPartitionForAlterPartitions(const AlterTableCmd* cmd, Rel
}
case PART_TYPE_RANGE:
case PART_TYPE_INTERVAL: {
+ RangePartitionMap *partmap = (RangePartitionMap *)partTableRel->partMap;
+ bool isinterval = partmap->rangeElements[partSeq].isInterval;
destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart);
+
+ if (!isinterval) {
+ break;
+ }
+
+ /* if srcPartOid is a interval partition oid, we need transform destPart to interval */
+ Relation pg_partition = relation_open(PartitionRelationId, RowExclusiveLock);
+ HeapTuple parttup = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(destPartOid));
+ Assert(HeapTupleIsValid(parttup));
+
+ Form_pg_partition partform = (Form_pg_partition)GETSTRUCT(parttup);
+ partform->partstrategy = PART_STRATEGY_INTERVAL;
+ simple_heap_update(pg_partition, &parttup->t_self, parttup);
+ CatalogUpdateIndexes(pg_partition, parttup);
+
+ tableam_tops_free_tuple(parttup);
+ relation_close(pg_partition, RowExclusiveLock);
+ CommandCounterIncrement();
break;
}
default:
@@ -26127,6 +28430,9 @@ static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTabl
char* destPartitionName = NULL;
Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, partTableRel, srcPartOid, &renameTargetPart);
+ int partitionno = GetCurrentPartitionNo(srcPartOid);
+ PARTITIONNO_VALID_ASSERT(partitionno);
+ UpdateCurrentPartitionNo(destPartOid, partitionno, false);
srcPart = partitionOpen(partTableRel, srcPartOid, AccessExclusiveLock);
destPartitionName = pstrdup(PartitionGetPartitionName(srcPart));
@@ -26245,7 +28551,7 @@ static void FastAddListSubPartition(Relation rel, List* destPartDefList, Oid par
ListCell* cell = NULL;
Oid bucketOid;
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, ShareUpdateExclusiveLock);
Relation partRel = partitionGetRelation(rel, part);
bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partRel, true);
@@ -26287,7 +28593,7 @@ static void FastAddListSubPartition(Relation rel, List* destPartDefList, Oid par
pfree_ext(isTimestamptz);
relation_close(pgPartRel, NoLock);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
}
static void FastAddRangeSubPartition(Relation rel, List* destPartDefList, Oid partOid, List** newPartOidList)
@@ -26297,7 +28603,7 @@ static void FastAddRangeSubPartition(Relation rel, List* destPartDefList, Oid pa
ListCell* cell = NULL;
Oid bucketOid;
- Partition part = partitionOpen(rel, partOid, AccessExclusiveLock);
+ Partition part = partitionOpen(rel, partOid, ShareUpdateExclusiveLock);
Relation partRel = partitionGetRelation(rel, part);
bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partRel, true);
@@ -26340,7 +28646,7 @@ static void FastAddRangeSubPartition(Relation rel, List* destPartDefList, Oid pa
pfree_ext(isTimestamptz);
relation_close(pgPartRel, NoLock);
releaseDummyRelation(&partRel);
- partitionClose(rel, part, AccessExclusiveLock);
+ partitionClose(rel, part, NoLock);
}
static Oid createTempTableForPartition(Relation partTableRel, Partition part)
@@ -26389,6 +28695,9 @@ static void readTuplesAndInsertInternal(Relation tempTableRel, Relation partTabl
while ((tuple = tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) {
Oid targetPartOid = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
+ Oid targetSubPartOid = InvalidOid;
+ int subpartitionno = INVALID_PARTITION_NO;
Relation partRel = NULL;
Partition part = NULL;
Relation subPartRel = NULL;
@@ -26397,20 +28706,24 @@ static void readTuplesAndInsertInternal(Relation tempTableRel, Relation partTabl
/* tableam_tops_copy_tuple is not ready so we add UStore hack path */
copyTuple = tableam_tops_copy_tuple(tuple);
- targetPartOid = heapTupleGetPartitionId(partTableRel, (void *)tuple, true);
- searchFakeReationForPartitionOid(
- partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partRel, part, RowExclusiveLock);
+ targetPartOid = heapTupleGetPartitionId(partTableRel, (void *)tuple, &partitionno, true);
+ searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partitionno,
+ partRel, part, RowExclusiveLock);
if (RelationIsSubPartitioned(partTableRel)) {
- targetPartOid = heapTupleGetPartitionId(partRel, (void *)tuple, true);
- searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partRel, targetPartOid, subPartRel,
- subPart, RowExclusiveLock);
+ targetSubPartOid = heapTupleGetPartitionId(partRel, (void *)tuple, &subpartitionno, true);
+ searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partRel, targetSubPartOid,
+ subpartitionno, subPartRel, subPart, RowExclusiveLock);
partRel = subPartRel;
}
if (bucketId != InvalidBktId) {
searchHBucketFakeRelation(partRelHTAB, CurrentMemoryContext, partRel, bucketId, partRel);
}
- AlterPartitionedSetWaitCleanGPI(true, partTableRel, targetPartOid);
+ if (RelationIsSubPartitioned(partTableRel)) {
+ AlterSubPartitionedSetWaitCleanGPI(true, partTableRel, targetPartOid, targetSubPartOid);
+ } else {
+ AlterPartitionedSetWaitCleanGPI(true, partTableRel, targetPartOid);
+ }
if (relisustore) {
Oid reloid = RelationGetRelid(partRel);
@@ -28419,6 +30732,7 @@ static void at_timeseries_check(Relation rel, AlterTableCmd* cmd)
switch (cmd->subtype) {
case AT_AddPartition:
case AT_DropPartition:
+ case AT_ResetPartitionno:
case AT_SetRelOptions:
case AT_DropColumn:
case AT_TruncatePartition:
@@ -28688,20 +31002,41 @@ void ShrinkRealtionChunk(ShrinkStmt* shrink)
}
}
-static Datum GetAutoIncrementDatum(Relation rel, TupleDesc desc)
+static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber attnum, Datum* value, bool* is_null)
{
- int128 autoinc;
ConstrAutoInc* cons_autoinc = desc->constr->cons_autoinc;
-
- if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
- autoinc = tmptable_autoinc_nextval(rel->rd_rel->relfilenode, cons_autoinc->next);
+ int128 autoinc;
+ bool modify_value = false;
+
+ if (*is_null) {
+ autoinc = 0;
+ modify_value = desc->attrs[attnum - 1].attnotnull;
} else {
- autoinc = nextval_internal(cons_autoinc->seqoid);
+ autoinc = datum2autoinc(cons_autoinc, *value);
+ modify_value = (autoinc == 0);
}
- if (cons_autoinc->autoinc2datum_func != NULL) {
- return DirectFunctionCall1((PGFunction)(uintptr_t)cons_autoinc->autoinc2datum_func, Int128GetDatum(autoinc));
+ /* When datum is NULL/0, auto increase */
+ if (autoinc == 0) {
+ if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
+ autoinc = tmptable_autoinc_nextval(rel->rd_rel->relfilenode, cons_autoinc->next);
+ } else {
+ autoinc = nextval_internal(cons_autoinc->seqoid);
+ }
+ if (modify_value) {
+ *is_null = false;
+ *value = autoinc2datum(cons_autoinc, autoinc);
+ }
+ }
+ return autoinc;
+}
+
+static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc)
+{
+ if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
+ tmptable_autoinc_setval(rel->rd_rel->relfilenode, desc->constr->cons_autoinc->next, autoinc, true);
+ } else {
+ autoinc_setval(desc->constr->cons_autoinc->seqoid, autoinc, true);
}
- return Int128GetDatum(autoinc);
}
static void CopyTempAutoIncrement(Relation oldrel, Relation newrel)
@@ -28717,6 +31052,38 @@ static void CopyTempAutoIncrement(Relation oldrel, Relation newrel)
}
}
+static void ATAlterCheckModifiyColumnRepeatedly(const AlterTableCmd* cmd, const List* tab_cmds)
+{
+ ListCell* tcmd = NULL;
+ foreach (tcmd, tab_cmds) {
+ AlterTableCmd* acmd = (AlterTableCmd*)lfirst(tcmd);
+ if (acmd->name != NULL && strcmp(acmd->name, cmd->name) == 0) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("cannot modify or change column \"%s\" twice", cmd->name)));
+ }
+ }
+}
+
+void CheckAutoIncrementDatatype(Oid typid, const char* colname)
+{
+ switch (typid) {
+ case BOOLOID:
+ case INT1OID:
+ case INT2OID:
+ case INT4OID:
+ case INT8OID:
+ case INT16OID:
+ case FLOAT4OID:
+ case FLOAT8OID:
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("The datatype of column '%s' does not support auto_increment", colname)));
+ break;
+ }
+}
+
void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode)
{
List* idxoidlist = NULL;
@@ -28730,8 +31097,8 @@ void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode)
}
if (!rel->rd_rel->relhasindex) {
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- (errmsg("Incorrect table definition, auto_increment column must be defined as a key"))));
+ ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ (errmsg("auto_increment column must be defined as a unique or primary key"))));
}
idxoidlist = RelationGetIndexList(rel);
@@ -28752,7 +31119,753 @@ void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode)
list_free(idxoidlist);
if (!found) {
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- (errmsg("Incorrect table definition, auto_increment column must be defined as a key"))));
+ ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ (errmsg("auto_increment column must be defined as a unique or primary key"))));
}
}
+
+/* Daparse a expression and reparse it. Return new expression node. */
+static Node* ReparseSingleRelExpr(Relation rel, Node* src_expr)
+{
+ Node* dst_expr = NULL;
+ List* parsetree = NULL;
+ SelectStmt* stmt = NULL;
+ StringInfoData query_string;
+ List* deparse_context = deparse_context_for(RelationGetRelationName(rel), RelationGetRelid(rel));
+ char* expr_string = deparse_expression(src_expr, deparse_context, false, false);
+ /* construct sql */
+ initStringInfo(&query_string);
+ appendStringInfo(&query_string, "SELECT %s ;", expr_string);
+ /* parse sql */
+ parsetree = raw_parser(query_string.data, NULL);
+ /* get SelectStmt from parsetree */
+ Assert(list_length(parsetree) == 1);
+ dst_expr = (Node*)linitial(parsetree);
+ Assert(IsA(dst_expr, SelectStmt));
+ stmt = (SelectStmt*)dst_expr;
+ /* get ResTarget from SelectStmt */
+ Assert(list_length(stmt->targetList) == 1);
+ dst_expr = (Node*)linitial(stmt->targetList);
+ /* get reparsed expr from ResTarget */
+ Assert(IsA(dst_expr, ResTarget));
+ dst_expr = ((ResTarget*)dst_expr)->val;
+ list_free(parsetree);
+ pfree(query_string.data);
+ pfree(expr_string);
+ list_free_deep(deparse_context);
+ return dst_expr;
+}
+
+/* Rebuild the generated expression because the data type of the column referenced in it has changed. */
+static Node* RebuildGeneratedColumnExpr(Relation rel, AttrNumber gen_attnum)
+{
+ ParseState* pstate = NULL;
+ RangeTblEntry *rte = NULL;
+ FormData_pg_attribute pgattr = rel->rd_att->attrs[gen_attnum - 1];
+ Node* gen_expr = build_column_default(rel, gen_attnum);
+
+ Assert(gen_expr);
+ /* reparse generated column expression */
+ gen_expr = ReparseSingleRelExpr(rel, gen_expr);
+ /* cook generated expression */
+ pstate = make_parsestate(NULL);
+ rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true);
+ addRTEtoQuery(pstate, rte, false, true, true);
+ gen_expr = cookDefault(
+ pstate, gen_expr, pgattr.atttypid, pgattr.atttypmod, NameStr(pgattr.attname), ATTRIBUTE_GENERATED_STORED);
+ /* readd pg_attrdef */
+ RemoveAttrDefault(RelationGetRelid(rel), gen_attnum, DROP_RESTRICT, true, true);
+ StoreAttrDefault(rel, gen_attnum, gen_expr, ATTRIBUTE_GENERATED_STORED, NULL, true);
+ pfree(pstate);
+ return gen_expr;
+}
+
+static void ATPrepAlterModifyColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse,
+ bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode)
+{
+ ColumnDef* def = (ColumnDef*)cmd->def;
+ Node* tmp_expr = def->raw_default;
+ char* tmp_name = cmd->name;
+ if (def->generatedCol != ATTRIBUTE_GENERATED_STORED && (tmp_expr == NULL || !IsA(tmp_expr, AutoIncrement))) {
+ ATPrepCheckDefault(tmp_expr);
+ }
+
+ def->raw_default = NULL;
+ cmd->name = def->colname;
+ /* For ATPrepAlterColumnType, raw_default is used to convert the original data to the target type. */
+ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode);
+ cmd->name = tmp_name;
+ def->raw_default = tmp_expr;
+}
+
+static char* GetCreateViewCommand(Relation rel, HeapTuple tup, Form_pg_class reltup, Oid pg_rewrite_oid, Oid view_oid)
+{
+ StringInfoData buf;
+ ViewInfoForAdd* view_info = NULL;
+ char* view_options = NULL;
+ bool isnull = true;
+ const char* ns_name = quote_identifier(get_namespace_name(reltup->relnamespace));
+
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "CREATE OR REPLACE ");
+ if (reltup->relpersistence == RELPERSISTENCE_TEMP) {
+ appendStringInfo(&buf, "TEMPORARY ");
+ }
+ if (ns_name) {
+ appendStringInfo(&buf, "VIEW %s.%s(", ns_name, quote_identifier(NameStr(reltup->relname)));
+ } else {
+ appendStringInfo(&buf, "VIEW %s(", quote_identifier(NameStr(reltup->relname)));
+ }
+ for (AttrNumber i = 1; i <= reltup->relnatts; i++) {
+ char* attname = get_relid_attribute_name(view_oid, i);
+ if (i == reltup->relnatts) {
+ appendStringInfo(&buf, "%s) ", quote_identifier(attname));
+ } else {
+ appendStringInfo(&buf, "%s, ", quote_identifier(attname));
+ }
+ }
+ Datum reloptions = SysCacheGetAttr(RELOID, tup, Anum_pg_class_reloptions, &isnull);
+ if (!isnull) {
+ Datum sep = CStringGetTextDatum(", ");
+ Datum txt = OidFunctionCall2(F_ARRAY_TO_TEXT, reloptions, sep);
+ view_options = TextDatumGetCString(txt);
+ }
+ if (view_options && strlen(view_options) > 0) {
+ appendStringInfo(&buf, "WITH (%s) ", view_options);
+ }
+ pfree_ext(view_options);
+ /* concat CREATE VIEW command with query */
+ view_info = GetViewInfoFirstAfter(rel, pg_rewrite_oid, true);
+ if (view_info == NULL) {
+ pfree_ext(buf.data);
+ return NULL; /* should not happen */
+ }
+ appendStringInfo(&buf, "AS %s", view_info->query_string);
+ pfree_ext(view_info->query_string);
+ pfree_ext(view_info);
+ return buf.data;
+}
+
+static void ATAlterRecordRebuildView(AlteredTableInfo* tab, Relation rel, Oid pg_rewrite_oid, bool type_changed)
+{
+ HeapTuple tup;
+ char* view_def = NULL;
+ Oid view_oid = get_rewrite_relid(pg_rewrite_oid, true);
+ /* the view has been recorded */
+ if (list_member_oid(tab->changedViewOids, view_oid) || !type_changed) {
+ return;
+ }
+ /* get pg_class tuple by view oid */
+ tup = SearchSysCache1(RELOID, ObjectIdGetDatum(view_oid));
+ if (!HeapTupleIsValid(tup)) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change a column used by materialized view or rule is not supported")));
+ }
+ Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tup);
+ if (reltup->relkind != RELKIND_VIEW) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change a column used by materialized view or rule is not supported")));
+ }
+ /* print CREATE VIEW command */
+ view_def = GetCreateViewCommand(rel, tup, reltup, pg_rewrite_oid, view_oid);
+ ReleaseSysCache(tup);
+ if (view_def) {
+ /* record it */
+ tab->changedViewOids = lappend_oid(tab->changedViewOids, view_oid);
+ tab->changedViewDefs = lappend(tab->changedViewDefs, view_def);
+ }
+}
+
+static Node* CookRlspolicyQual(Relation rel, Node* src_qual)
+{
+ ParseState* pstate = make_parsestate(NULL);
+ RangeTblEntry* rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, false);
+ addRTEtoQuery(pstate, rte, false, true, true);
+ /* Transform expr clause */
+ Node *cooked_qual = transformWhereClause(pstate, src_qual, "POLICY");
+ /* Take care of collations */
+ assign_expr_collations(pstate, cooked_qual);
+ pfree(pstate);
+ return cooked_qual;
+}
+
+/*
+ * Find a row level security policy by oid. Rebuild qual expression tree by replacing Var node;
+ */
+static void ATAlterModifyRebuildRlspolicyExpr(Relation rel, Oid pg_rlspolicy_oid)
+{
+ Relation rlsp_rel;
+ ScanKeyData scankey;
+ SysScanDesc scan;
+ HeapTuple tuple;
+ Datum values[Natts_pg_rlspolicy] = {0};
+ bool nulls[Natts_pg_rlspolicy] = {0};
+ bool replaces[Natts_pg_rlspolicy] = {0};
+ Datum datum;
+ bool isnull = false;
+ char* polqual = NULL;
+ Node *expr = NULL;
+
+ ScanKeyInit(&scankey, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(pg_rlspolicy_oid));
+ rlsp_rel = heap_open(RlsPolicyRelationId, RowExclusiveLock);
+ scan = systable_beginscan(rlsp_rel, PgRlspolicyOidIndex, true, NULL, 1, &scankey);
+ tuple = systable_getnext(scan);
+ if (HeapTupleIsValid(tuple)) {
+ datum = heap_getattr(tuple, Anum_pg_rlspolicy_polqual, RelationGetDescr(rlsp_rel), &isnull);
+ if (!isnull) {
+ polqual = TextDatumGetCString(datum);
+ /* rebuild polqual expression */
+ expr = (Node*)stringToNode_skip_extern_fields(polqual);
+ expr = ReparseSingleRelExpr(rel, expr);
+ expr = CookRlspolicyQual(rel, expr);
+ pfree_ext(polqual);
+ /* update polqual */
+ polqual = nodeToString(expr);
+ values[Anum_pg_rlspolicy_polqual - 1] = CStringGetTextDatum(polqual);
+ replaces[Anum_pg_rlspolicy_polqual - 1] = true;
+ tuple = heap_modify_tuple(tuple, RelationGetDescr(rlsp_rel), values, nulls, replaces);
+ simple_heap_update(rlsp_rel, &tuple->t_self, tuple);
+ CatalogUpdateIndexes(rlsp_rel, tuple);
+ }
+ }
+
+ systable_endscan(scan);
+ heap_close(rlsp_rel, RowExclusiveLock);
+ pfree(DatumGetPointer(values[Anum_pg_rlspolicy_polqual - 1]));
+ pfree_ext(polqual);
+}
+
+static void ATHandleClassObjectDependOnModifiedColumn(AlteredTableInfo* tab, Relation dep_rel,
+ ObjectAddress* object)
+{
+ char relKind = get_rel_relkind(object->objectId);
+
+ if (relKind == RELKIND_INDEX || relKind == RELKIND_GLOBAL_INDEX) {
+ Assert(object->objectSubId == 0);
+ Oid refobjid;
+ if (!list_member_oid(tab->changedConstraintOids, object->objectId) &&
+ CheckIndexIsConstraint(dep_rel, object->objectId, &refobjid)) {
+ tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, refobjid);
+ tab->changedConstraintDefs =
+ lappend(tab->changedConstraintDefs, pg_get_constraintdef_string(refobjid));
+ } else if (!list_member_oid(tab->changedIndexOids, object->objectId)) {
+ /*
+ * Question: alter table set datatype and table index execute concurrently, data inconsistency
+ * occurs. The index file is deleted and metadata is left. Because the data type is not locked
+ * after modification, which ultimately leads to could not open file. Alter table column set
+ * datatype maybe trigger index operation but index is not locked. When the index data is
+ * inconsistent, we can use"reindex index" to repair the index.
+ * Solution: we should lock index at the beginning.The ACCESS_EXCLUSIVE_LOCK for index is used
+ * because we think ACCESS_EXCLUSIVE_LOCK for data table will block any operation and index
+ * will be not used to query data. This operation will block individual index operations,
+ * such as reindex index\set index tablespace.
+ * Testcase: alter table row_table alter column col_varchar set data type text,alter column
+ * col_smallint set data type bigint + alter index idx set tablespace.
+ */
+ LockRelationOid(object->objectId, AccessExclusiveLock);
+ tab->changedIndexOids = lappend_oid(tab->changedIndexOids, object->objectId);
+ tab->changedIndexDefs = lappend(tab->changedIndexDefs, pg_get_indexdef_string(object->objectId));
+ }
+ } else if (RELKIND_IS_SEQUENCE(relKind)) {
+ /*
+ * This must be a SERIAL or AUTO_INCREMENT column's sequence. We need not do anything to it.
+ */
+ Assert(object->objectSubId == 0);
+ } else if (relKind == RELKIND_RELATION && object->objectSubId != 0 &&
+ GetGenerated(object->objectId, object->objectSubId)) {
+ if (!list_member_int(tab->changedGeneratedCols, object->objectSubId)) {
+ tab->changedGeneratedCols = lappend_int(tab->changedGeneratedCols, object->objectSubId);
+ }
+ } else {
+ /* Not expecting any other direct dependencies... */
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unexpected object depending on column: %s", getObjectDescription((object)))));
+ }
+}
+
+static void ATAlterRecordRebuildConstraint(AlteredTableInfo* tab, Oid constraint_oid, Form_pg_depend found_dep)
+{
+ if (!list_member_oid(tab->changedConstraintOids, constraint_oid)) {
+ char* defstring = pg_get_constraintdef_string(constraint_oid);
+ /*
+ * Put NORMAL dependencies at the front of the list and
+ * AUTO dependencies at the back. This makes sure that
+ * foreign-key constraints depending on this column will
+ * be dropped before unique or primary-key constraints of
+ * the column; which we must have because the FK
+ * constraints depend on the indexes belonging to the
+ * unique constraints.
+ */
+ if (found_dep->deptype == DEPENDENCY_NORMAL) {
+ tab->changedConstraintOids = lcons_oid(constraint_oid, tab->changedConstraintOids);
+ tab->changedConstraintDefs = lcons(defstring, tab->changedConstraintDefs);
+ } else {
+ tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, constraint_oid);
+ tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, defstring);
+ }
+ }
+}
+
+static void ATAlterRecordRebuildTrigger(AlteredTableInfo* tab, Oid trigger_oid, bool type_changed)
+{
+ if (!list_member_oid(tab->changedTriggerOids, trigger_oid) && type_changed) {
+ char* defstring = pg_get_triggerdef_string(trigger_oid);
+ tab->changedTriggerOids = lappend_oid(tab->changedTriggerOids, trigger_oid);
+ tab->changedTriggerDefs = lappend(tab->changedTriggerDefs, defstring);
+ }
+}
+
+static void ATAlterRecordRebuildRlsp(AlteredTableInfo* tab, Oid rlsp_oid, bool type_changed)
+{
+ if (!list_member_oid(tab->changedRLSPolicies, rlsp_oid) && type_changed) {
+ tab->changedRLSPolicies = lappend_oid(tab->changedRLSPolicies, rlsp_oid);
+ }
+}
+
+static void ATHandleObjectsDependOnModifiedColumn(AlteredTableInfo* tab, Relation rel,
+ Form_pg_attribute pg_attr, AttrNumber attnum, bool type_changed)
+{
+ ScanKeyData key[3];
+ SysScanDesc scan;
+ HeapTuple dep_tup;
+ Relation dep_rel = heap_open(DependRelationId, RowExclusiveLock);
+
+ ScanKeyInit(
+ &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId));
+ ScanKeyInit(
+ &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel)));
+ ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum((int32)attnum));
+
+ scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 3, key);
+
+ while (HeapTupleIsValid(dep_tup = systable_getnext(scan))) {
+ Form_pg_depend found_dep = (Form_pg_depend)GETSTRUCT(dep_tup);
+ ObjectAddress found_object;
+
+ /* We don't expect any PIN dependencies on columns */
+ if (found_dep->deptype == DEPENDENCY_PIN) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("cannot modify or change a pinned column")));
+ }
+
+ found_object.classId = found_dep->classid;
+ found_object.objectId = found_dep->objid;
+ found_object.objectSubId = found_dep->objsubid;
+ switch (getObjectClass(&found_object)) {
+ case OCLASS_CLASS:
+ ATHandleClassObjectDependOnModifiedColumn(tab, dep_rel, &found_object);
+ break;
+
+ case OCLASS_CONSTRAINT:
+ Assert(found_object.objectSubId == 0);
+ ATAlterRecordRebuildConstraint(tab, found_object.objectId, found_dep);
+ break;
+
+ case OCLASS_REWRITE:
+ ATAlterRecordRebuildView(tab, rel, found_object.objectId, type_changed);
+ break;
+
+ case OCLASS_TRIGGER:
+ Assert(found_object.objectSubId == 0);
+ ATAlterRecordRebuildTrigger(tab, found_object.objectId, type_changed);
+ break;
+
+ case OCLASS_RLSPOLICY:
+ Assert(found_object.objectSubId == 0);
+ ATAlterRecordRebuildRlsp(tab, found_object.objectId, type_changed);
+ break;
+
+ case OCLASS_DEFAULT:
+ break;
+
+ case OCLASS_CL_CACHED_COLUMN:
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change encrypted column is not supported")));
+ break;
+
+ case OCLASS_PROC:
+ case OCLASS_TYPE:
+ case OCLASS_CAST:
+ case OCLASS_COLLATION:
+ case OCLASS_CONVERSION:
+ case OCLASS_LANGUAGE:
+ case OCLASS_LARGEOBJECT:
+ case OCLASS_OPERATOR:
+ case OCLASS_OPCLASS:
+ case OCLASS_OPFAMILY:
+ case OCLASS_AMOP:
+ case OCLASS_AMPROC:
+ case OCLASS_SCHEMA:
+ case OCLASS_TSPARSER:
+ case OCLASS_TSDICT:
+ case OCLASS_TSTEMPLATE:
+ case OCLASS_TSCONFIG:
+ case OCLASS_ROLE:
+ case OCLASS_DATABASE:
+ case OCLASS_TBLSPACE:
+ case OCLASS_FDW:
+ case OCLASS_FOREIGN_SERVER:
+ case OCLASS_USER_MAPPING:
+ case OCLASS_DEFACL:
+ case OCLASS_EXTENSION:
+ case OCLASS_DATA_SOURCE:
+ case OCLASS_GLOBAL_SETTING_ARGS:
+ case OCLASS_GS_CL_PROC:
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unexpected object depending on column: %s", getObjectDescription(&found_object))));
+ break;
+ default:
+ ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE),
+ errmsg("unrecognized object class: %u", found_object.classId)));
+ }
+ }
+
+ systable_endscan(scan);
+ DelDependencONDataType(rel, dep_rel, pg_attr);
+ heap_close(dep_rel, RowExclusiveLock);
+}
+
+static NewColumnValue* findNewColumnValue(AlteredTableInfo* tab, const char* col_name)
+{
+ NewColumnValue* result = NULL;
+
+ foreach_cell(vcell, tab->newvals) {
+ result = (NewColumnValue*)lfirst(vcell);
+ if (result->col_name != NULL && strcmp(col_name, result->col_name) == 0) {
+ return result;
+ }
+ }
+ return NULL;
+}
+
+static int128 getAutoIncrementValue(Relation rel, ColumnDef* def, AttrNumber attnum)
+{
+ AttrNumber autoinc_attnum = RelAutoIncAttrNum(rel);
+ if (autoinc_attnum > 0) {
+ if (autoinc_attnum == attnum) {
+ if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
+ return *rel->rd_att->constr->cons_autoinc->next;
+ } else {
+ return autoinc_get_nextval(RelAutoIncSeqOid(rel));
+ }
+ } else if (def->raw_default && IsA(def->raw_default, AutoIncrement)) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ (errmsg("Incorrect table definition, there can be only one auto_increment column"))));
+ }
+ }
+ return 0;
+}
+
+static void ATAlterModifyAutoinColumn(AlteredTableInfo* tab, Relation rel, ColumnDef* def,
+ AttrNumber attnum, int128 autoinc)
+{
+ if (autoinc > 0) { /* Column is auto_increment before modified, keep auto_increment value. */
+ if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) {
+ tmptable_autoinc_setval(
+ rel->rd_rel->relfilenode, rel->rd_att->constr->cons_autoinc->next, autoinc, false);
+ } else {
+ autoinc_setval(RelAutoIncSeqOid(rel), autoinc, false);
+ }
+ } else { /* Column is not auto_increment before modified, need rewrite table later. */
+ Expr* defval = NULL;
+ NewColumnValue* newval = findNewColumnValue(tab, def->colname);
+ if (newval != NULL) {
+ newval->is_autoinc = true;
+ tab->rewrite = true;
+ } else {
+ defval = (Expr*)build_column_default(rel, attnum);
+ Assert(defval != NULL);
+ ATExecAppendDefValExpr(attnum, defval, tab, def, true, false);
+ }
+ }
+}
+
+static void ATAlterModifyColumnDefault(AlteredTableInfo* tab, Relation rel, ColumnDef* def,
+ AttrNumber attnum, int128 autoinc)
+{
+ RawColumnDefault raw_col_def;
+
+ raw_col_def.attnum = attnum;
+ raw_col_def.raw_default = (Node*)copyObject(def->raw_default);
+ raw_col_def.generatedCol = def->generatedCol;
+ raw_col_def.update_expr = (Node*)copyObject(def->update_default);
+ (void)AddRelationNewConstraints(rel, list_make1(&raw_col_def), NIL, false, true);
+ CommandCounterIncrement();
+ /* AUTO_INCREMENT and GENERATED COLUMN need rewrite table */
+ if (RelAutoIncAttrNum(rel) == attnum) {
+ ATAlterModifyAutoinColumn(tab, rel, def, attnum, autoinc);
+ if (list_length(tab->changedGeneratedCols) > 0) {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ (errmsg("generated column cannot refer to auto_increment column"))));
+ }
+ } else if (def->generatedCol == ATTRIBUTE_GENERATED_STORED) {
+ if (list_length(tab->changedGeneratedCols) > 0) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("A generated column cannot reference another generated column.")));
+ }
+
+ NewColumnValue* newval = findNewColumnValue(tab, def->colname);
+ Expr* defval = (Expr*)build_column_default(rel, attnum);
+ Assert(defval != NULL);
+ if (newval != NULL) {
+ newval->expr = expression_planner(defval);
+ newval->is_generated = true;
+ tab->rewrite = true;
+ } else {
+ ATExecAppendDefValExpr(attnum, defval, tab, def, false, false);
+ }
+ }
+}
+
+/*
+ * Rebulid generated column expression and update pg_attrdef.
+ * Set rewrite if need.
+ */
+static void ATRewriteChangedGeneratedColumn(AlteredTableInfo* tab, Relation rel, AttrNumber gen_attnum)
+{
+ char *gen_col_name = NameStr(rel->rd_att->attrs[gen_attnum - 1].attname);
+ NewColumnValue* newval = findNewColumnValue(tab, gen_col_name);
+ Expr* defval = (Expr*)RebuildGeneratedColumnExpr(rel, gen_attnum);
+ Assert(defval != NULL);
+ /*
+ * Rebuilt generated column expression does not affect the data of the generated column.
+ * Replace the expression only when the generated column needs to be rewritten.
+ */
+ if (newval == NULL) {
+ newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue));
+ newval->attnum = gen_attnum;
+ newval->expr = expression_planner(defval);
+ newval->is_generated = true;
+ newval->is_autoinc = false;
+ newval->generate_attnum = 0;
+ newval->col_name = pstrdup(gen_col_name);
+ tab->newvals = lappend(tab->newvals, newval);
+ tab->rewrite = true;
+ } else if (newval->is_generated) {
+ newval->expr = expression_planner(defval);
+ tab->rewrite = true;
+ }
+}
+
+static bool ModifiedColumnIsPrimaryKey(AlteredTableInfo* tab, AttrNumber attrnum)
+{
+ foreach_cell(cell, tab->changedConstraintOids) {
+ Datum* keys = NULL;
+ Datum conkey_datum;
+ int key_count;
+ bool isnull = false;
+ Oid constraint_oid = lfirst_oid(cell);
+ HeapTuple tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraint_oid));
+ if (!HeapTupleIsValid(tuple)) {
+ ereport(ERROR,
+ (errcode(ERRCODE_CACHE_LOOKUP_FAILED),
+ errmsg("cache lookup failed for constraint %u", constraint_oid)));
+ }
+
+ if (((Form_pg_constraint)GETSTRUCT(tuple))->contype != CONSTRAINT_PRIMARY) {
+ ReleaseSysCache(tuple);
+ continue;
+ }
+
+ conkey_datum = SysCacheGetAttr(CONSTROID, tuple, Anum_pg_constraint_conkey, &isnull);
+ if (isnull) {
+ ReleaseSysCache(tuple);
+ continue;
+ }
+
+ deconstruct_array(DatumGetArrayTypeP(conkey_datum), INT2OID, sizeof(int16), true, 's', &keys, NULL, &key_count);
+ for (int i = 0; i < key_count; i++) {
+ if (DatumGetInt16(keys[i]) == attrnum) {
+ pfree_ext(keys);
+ ReleaseSysCache(tuple);
+ return true;
+ }
+ }
+
+ pfree_ext(keys);
+ ReleaseSysCache(tuple);
+ }
+
+ return false;
+}
+
+static void ATExecAlterModifyColumn(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd)
+{
+ ColumnDef* def = (ColumnDef*)cmd->def;
+ AttrNumber attnum;
+ HeapTuple attr_tuple;
+ HeapTuple type_tuple;
+ Form_pg_attribute pg_attr;
+ Form_pg_type pg_type;
+ Relation att_rel;
+ Oid typid;
+ int32 typmod = -1;
+ Oid collid = InvalidOid;
+ AclResult aclresult;
+ int128 autoinc = 0;
+ char* col_name = def->colname;
+ bool type_changed = false;
+ bool is_first_after = cmd->is_first || cmd->after_name != NULL;
+
+ att_rel = heap_open(AttributeRelationId, RowExclusiveLock);
+ attnum = get_attnum(RelationGetRelid(rel), col_name);
+ if (attnum == InvalidAttrNumber) {
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel))));
+ }
+
+ /* Check and get new type and collation */
+ type_tuple = typenameType(NULL, def->typname, &typmod);
+ pg_type = (Form_pg_type)GETSTRUCT(type_tuple);
+ typid = HeapTupleGetOid(type_tuple);
+ aclresult = pg_type_aclcheck(typid, GetUserId(), ACL_USAGE);
+ if (aclresult != ACLCHECK_OK) {
+ aclcheck_error_type(aclresult, typid);
+ }
+ collid = GetColumnDefCollation(NULL, def, typid);
+ CheckAttributeType(col_name, typid, collid, list_make1_oid(rel->rd_rel->reltype), false);
+
+ /* Check and save AUTO_INCREMENT */
+ autoinc = getAutoIncrementValue(rel, def, attnum);
+
+ /* drop old default */
+ RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false, true);
+
+ /* Look up the target column */
+ attr_tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), col_name);
+ if (!HeapTupleIsValid(attr_tuple)) {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel))));
+ }
+ pg_attr = (Form_pg_attribute)GETSTRUCT(attr_tuple);
+ type_changed = (pg_attr->atttypid != typid || pg_attr->atttypmod != typmod || pg_attr->attcollation != collid);
+ /* Check column partkey */
+ if (is_partition_column(rel, attnum)) {
+ if (type_changed) {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("modify or change partition key column is not supported")));
+ } else if (def->generatedCol) {
+ ereport(ERROR,
+ (errmodule(MOD_GEN_COL), errcode(ERRCODE_INVALID_OPERATION),
+ errmsg("Invalid modify column operation"),
+ errdetail("cannot modify or change a partition key column as a generated column")));
+ }
+ }
+
+ /* Working with objects that depend on the column being modified. */
+ ATHandleObjectsDependOnModifiedColumn(tab, rel, pg_attr, attnum, type_changed);
+ /* Primary key column must be not null. */
+ def->is_not_null = def->is_not_null ? def->is_not_null : ModifiedColumnIsPrimaryKey(tab, attnum);
+ if (!pg_attr->attnotnull && def->is_not_null) {
+ tab->new_notnull = true;
+ }
+
+ if (is_first_after) {
+ UpdateNewvalsAttnum(tab, rel, cmd, col_name);
+ }
+
+ pg_attr->atttypid = typid;
+ pg_attr->attlen = pg_type->typlen;
+ pg_attr->atttypmod = typmod;
+ pg_attr->attbyval = pg_type->typbyval;
+ pg_attr->attndims = list_length(def->typname->arrayBounds);
+ pg_attr->attstorage = pg_type->typstorage;
+ pg_attr->attalign = pg_type->typalign;
+ pg_attr->attcollation = collid;
+ pg_attr->attnotnull = def->is_not_null;
+ pg_attr->attislocal = def->is_local;
+ pg_attr->attkvtype = def->kvtype;
+ pg_attr->attinhcount = def->inhcount;
+ pg_attr->atthasdef = false;
+ ReleaseSysCache(type_tuple);
+ simple_heap_update(att_rel, &attr_tuple->t_self, attr_tuple);
+ CatalogUpdateIndexes(att_rel, attr_tuple);
+ heap_close(att_rel, RowExclusiveLock);
+
+ /* Install dependencies on new datatype and collation */
+ add_column_datatype_dependency(RelationGetRelid(rel), attnum, typid);
+ add_column_collation_dependency(RelationGetRelid(rel), attnum, collid);
+
+ /* Drop any pg_statistic entry for the column, since it's now wrong type */
+ if (RELATION_IS_GLOBAL_TEMP(rel)) {
+ remove_gtt_att_statistic(RelationGetRelid(rel), attnum);
+ } else {
+ RemoveStatistics<'c'>(RelationGetRelid(rel), attnum);
+ }
+
+ if (def->raw_default || def->update_default) {
+ CommandCounterIncrement();
+ ATAlterModifyColumnDefault(tab, rel, def, attnum, autoinc);
+ }
+
+ foreach_cell(attcell, tab->changedGeneratedCols) {
+ CommandCounterIncrement();
+ ATRewriteChangedGeneratedColumn(tab, rel, (AttrNumber)lfirst_int(attcell));
+ }
+ list_free_ext(tab->changedGeneratedCols);
+
+ foreach_cell(rlspcell, tab->changedRLSPolicies) {
+ CommandCounterIncrement();
+ ATAlterModifyRebuildRlspolicyExpr(rel, lfirst_oid(rlspcell));
+ }
+ list_free_ext(tab->changedRLSPolicies);
+
+ /* recreate views */
+ foreach_cell(view_def_cell, tab->changedViewDefs) {
+ CommandCounterIncrement();
+ char* cmd_str = (char*)lfirst(view_def_cell);
+ List* raw_parsetree_list = raw_parser(cmd_str);
+ Node* stmt = (Node*)linitial(raw_parsetree_list);
+ Assert(IsA(stmt, ViewStmt));
+ DefineView((ViewStmt*)stmt, cmd_str);
+ }
+ list_free_ext(tab->changedViewOids);
+ list_free_ext(tab->changedViewDefs);
+
+ if (cmd->is_first || cmd->after_name != NULL) {
+ tab->is_first_after = true;
+ CommandCounterIncrement();
+
+ AlterColumnToFirstAfter(tab, rel, cmd, attnum);
+ }
+}
+
+static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod)
+{
+ TupleDesc rd_att = rel->rd_att;
+ AutoIncrement* aexpr = NULL;
+ Node* expr = NULL;
+ AttrDefault* defval = rd_att->constr->defval;
+ int ndef = rd_att->constr->num_defval;
+
+ CheckAutoIncrementDatatype(targettype, rd_att->attrs[attrno - 1].attname.data);
+ while (--ndef >= 0) {
+ if (attrno == defval[ndef].adnum) {
+ expr = (Node*)stringToNode_skip_extern_fields(defval[ndef].adbin);
+ break;
+ }
+ }
+ Assert(expr != NULL);
+ Assert(IsA(expr, AutoIncrement));
+ aexpr = (AutoIncrement*)expr;
+ (void)find_coercion_pathway(INT16OID, targettype, COERCION_ASSIGNMENT, &aexpr->autoincin_funcid);
+ (void)find_coercion_pathway(targettype, INT16OID, COERCION_ASSIGNMENT, &aexpr->autoincout_funcid);
+ aexpr->expr = strip_implicit_coercions(aexpr->expr);
+ aexpr->expr = coerce_to_target_type(NULL, /* no UNKNOWN params */
+ aexpr->expr,
+ exprType(aexpr->expr),
+ targettype,
+ targettypmod,
+ COERCION_ASSIGNMENT,
+ COERCE_IMPLICIT_CAST,
+ -1);
+ return (Node*)aexpr;
+}
diff --git a/src/gausskernel/optimizer/commands/typecmds.cpp b/src/gausskernel/optimizer/commands/typecmds.cpp
index ec06b7969..3bcac419a 100644
--- a/src/gausskernel/optimizer/commands/typecmds.cpp
+++ b/src/gausskernel/optimizer/commands/typecmds.cpp
@@ -2177,6 +2177,7 @@ Oid DefineCompositeType(RangeVar* typevar, List* coldeflist)
createStmt->oncommit = ONCOMMIT_NOOP;
createStmt->tablespacename = NULL;
createStmt->if_not_exists = false;
+ createStmt->charset = PG_INVALID_ENCODING;
/*
* Check for collision with an existing type name. If there is one and
diff --git a/src/gausskernel/optimizer/commands/user.cpp b/src/gausskernel/optimizer/commands/user.cpp
index fa384bbac..25e1b98ad 100755
--- a/src/gausskernel/optimizer/commands/user.cpp
+++ b/src/gausskernel/optimizer/commands/user.cpp
@@ -159,7 +159,7 @@ static inline void check_iteration_count(int iteration_count)
}
}
/* Check if current user has createrole privileges */
-static bool have_createrole_privilege(void)
+bool have_createrole_privilege(void)
{
return has_createrole_privilege(GetUserId());
}
@@ -543,7 +543,7 @@ static inline void clean_role_password(const DefElem* dpassword)
* True iff role name starts with the gs_role_ prefix.
* The prefix gs_role_ is reserverd for the predefined role names.
*/
-static bool IsReservedRoleName(const char* name)
+bool IsReservedRoleName(const char* name)
{
if (strncmp(name, "gs_role_", strlen("gs_role_")) == 0) {
return true;
diff --git a/src/gausskernel/optimizer/commands/vacuum.cpp b/src/gausskernel/optimizer/commands/vacuum.cpp
index b78f21113..60ed8b888 100644
--- a/src/gausskernel/optimizer/commands/vacuum.cpp
+++ b/src/gausskernel/optimizer/commands/vacuum.cpp
@@ -578,9 +578,9 @@ static vacuum_object *GetVacuumObjectOfSubpartition(VacuumStmt* vacstmt, Oid rel
MemoryContext oldcontext = NULL;
vacuum_object* vacObj = NULL;
- subpartitionid = partitionNameGetPartitionOid(relationid,
+ subpartitionid = SubPartitionNameGetSubPartitionOid(relationid,
vacstmt->relation->subpartitionname,
- PART_OBJ_TYPE_TABLE_SUB_PARTITION,
+ AccessShareLock,
AccessShareLock,
true,
false,
@@ -732,7 +732,7 @@ List* get_rel_oids(Oid relid, VacuumStmt* vacstmt)
/* 1.a partition */
if (PointerIsValid(vacstmt->relation->partitionname)) {
- partitionid = partitionNameGetPartitionOid(relationid,
+ partitionid = PartitionNameGetPartitionOid(relationid,
vacstmt->relation->partitionname,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessShareLock,
@@ -2743,6 +2743,7 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast)
pgstat_report_waitstatus_relname(STATE_VACUUM_FULL, get_nsp_relname(relid));
GpiVacuumFullMainPartiton(relid);
CBIVacuumFullMainPartiton(relid);
+ RelationResetPartitionno(relid, AccessExclusiveLock);
pgstat_report_vacuum(relid, InvalidOid, false, 0);
/* Record changecsn when VACUUM FULL occur */
@@ -3199,6 +3200,101 @@ void vac_close_part_indexes(
pfree_ext(Irel);
}
+static void CalculateSubPartitionedRelStats(_in_ Relation partitionRel, _in_ Relation partRel,
+ _out_ BlockNumber *totalPages, _out_ BlockNumber *totalVisiblePages, _out_ double *totalTuples,
+ _out_ TransactionId *minFrozenXid, _out_ MultiXactId *minMultiXid)
+{
+ Assert(RelationIsSubPartitioned(partitionRel));
+
+ BlockNumber pages = 0;
+ BlockNumber allVisiblePages = 0;
+ double tuples = 0;
+ ScanKeyData partKey[2];
+ SysScanDesc partScan = NULL;
+ HeapTuple partTuple = NULL;
+ ScanKeyData subpartKey[2];
+ SysScanDesc subpartScan = NULL;
+ HeapTuple subpartTuple = NULL;
+ Form_pg_partition partForm;
+ /* we set xid to max as an initial value, then we find the min xid in all subpartitions */
+ TransactionId frozenXid = MaxTransactionId;
+ MultiXactId multiXid = MaxMultiXactId;
+ bool isNull = false;
+
+ ScanKeyInit(&partKey[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ,
+ CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION));
+ ScanKeyInit(&partKey[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationGetRelid(partitionRel)));
+ partScan = systable_beginscan(partRel, PartitionParentOidIndexId, true, NULL, 2, partKey);
+
+ while (HeapTupleIsValid(partTuple = systable_getnext(partScan))) {
+ ScanKeyInit(&subpartKey[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ,
+ CharGetDatum(PART_OBJ_TYPE_TABLE_SUB_PARTITION));
+ ScanKeyInit(&subpartKey[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(HeapTupleGetOid(partTuple)));
+ subpartScan = systable_beginscan(partRel, PartitionParentOidIndexId, true, NULL, 2, subpartKey);
+
+ while (HeapTupleIsValid(subpartTuple = systable_getnext(subpartScan))) {
+ partForm = (Form_pg_partition)GETSTRUCT(subpartTuple);
+ Datum xid64datum = tableam_tops_tuple_getattr(subpartTuple, Anum_pg_partition_relfrozenxid64,
+ RelationGetDescr(partRel), &isNull);
+ TransactionId relfrozenxid;
+ if (isNull) {
+ relfrozenxid = partForm->relfrozenxid;
+ if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->nextXid, relfrozenxid) ||
+ !TransactionIdIsNormal(relfrozenxid))
+ relfrozenxid = FirstNormalTransactionId;
+ } else {
+ relfrozenxid = DatumGetTransactionId(xid64datum);
+ }
+ if (TransactionIdPrecedes(relfrozenxid, frozenXid)) {
+ frozenXid = relfrozenxid;
+ }
+
+#ifndef ENABLE_MULTIPLE_NODES
+ Datum mxid64datum = tableam_tops_tuple_getattr(subpartTuple, Anum_pg_partition_relminmxid,
+ RelationGetDescr(partRel), &isNull);
+ MultiXactId relminmxid = isNull ? FirstMultiXactId : DatumGetTransactionId(mxid64datum);
+ if (TransactionIdPrecedes(relminmxid, multiXid)) {
+ multiXid = relminmxid;
+ }
+#endif
+
+ /* calculate pages and tuples from all the subpartitioned. */
+ pages += (uint32) partForm->relpages;
+ allVisiblePages += partForm->relallvisible;
+ tuples += partForm->reltuples;
+
+ }
+ systable_endscan(subpartScan);
+ }
+ systable_endscan(partScan);
+
+ /* if xid is maxvalue, means no subpartitions found, then we set it invalid */
+ if (frozenXid == MaxTransactionId) {
+ frozenXid = InvalidTransactionId;
+ }
+ if (multiXid == MaxMultiXactId) {
+ multiXid = InvalidMultiXactId;
+ }
+
+ if (totalPages != NULL) {
+ *totalPages = pages;
+ }
+ if (totalVisiblePages != NULL) {
+ *totalVisiblePages = allVisiblePages;
+ }
+ if (totalTuples != NULL) {
+ *totalTuples = tuples;
+ }
+ if (minFrozenXid != NULL) {
+ *minFrozenXid = frozenXid;
+ }
+ if (minMultiXid != NULL) {
+ *minMultiXid = multiXid;
+ }
+}
+
/* Scan pg_partition to get all the partitions of the partitioned table,
* calculate all the pages, tuples, and the min frozenXid, multiXid
*/
@@ -3217,13 +3313,13 @@ void CalculatePartitionedRelStats(_in_ Relation partitionRel, _in_ Relation part
Assert(partitionRel->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION ||
partitionRel->rd_rel->parttype == PARTTYPE_SUBPARTITIONED_RELATION);
- if (partitionRel->rd_rel->parttype == PARTTYPE_SUBPARTITIONED_RELATION) {
- ScanKeyInit(&partKey[0],
- Anum_pg_partition_parttype,
- BTEqualStrategyNumber,
- F_CHAREQ,
- CharGetDatum(PART_OBJ_TYPE_TABLE_SUB_PARTITION));
- } else if (partitionRel->rd_rel->relkind == RELKIND_RELATION) {
+ if (RelationIsSubPartitioned(partitionRel)) {
+ CalculateSubPartitionedRelStats(partitionRel, partRel, totalPages, totalVisiblePages, totalTuples, minFrozenXid,
+ minMultiXid);
+ return;
+ }
+
+ if (partitionRel->rd_rel->relkind == RELKIND_RELATION) {
ScanKeyInit(&partKey[0],
Anum_pg_partition_parttype,
BTEqualStrategyNumber,
@@ -4031,6 +4127,7 @@ static void UstoreVacuumMainPartitionGPIs(Relation onerel, const VacuumStmt* vac
{
OidRBTree* invisibleParts = CreateOidRBTree();
Oid parentOid = RelationGetRelid(onerel);
+ bool lockInterval = false;
if (vacstmt->options & VACOPT_VERBOSE) {
elevel = VERBOSEMESSAGE;
@@ -4039,11 +4136,13 @@ static void UstoreVacuumMainPartitionGPIs(Relation onerel, const VacuumStmt* vac
}
/* Get invisable parts */
- if (ConditionalLockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) {
+ if (!RELATION_IS_INTERVAL_PARTITIONED(onerel)) {
PartitionGetAllInvisibleParts(parentOid, &invisibleParts);
- UnlockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK);
+ } else if (ConditionalLockPartitionObject(onerel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK)) {
+ PartitionGetAllInvisibleParts(parentOid, &invisibleParts);
+ lockInterval = true;
}
-
+
/* In rbtree, rb_leftmost will return NULL if rbtree is empty. */
if (rb_leftmost(invisibleParts) == NULL) {
DestroyOidRBTree(&invisibleParts);
@@ -4076,17 +4175,19 @@ static void UstoreVacuumMainPartitionGPIs(Relation onerel, const VacuumStmt* vac
heap_close(classRel, RowExclusiveLock);
/*
- * Before clearing the global partition index of a partition table,
- * acquire a AccessShareLock on ADD_PARTITION_ACTION, and make sure that the interval partition
- * creation process will not be performed concurrently.
+ * Before clearing the global partition index of a partition table, acquire a AccessShareLock on
+ * INTERVAL_PARTITION_LOCK_SDEQUENCE, and make sure that the interval partition creation process will not be
+ * performed concurrently.
*/
OidRBTree* cleanedParts = CreateOidRBTree();
OidRBTreeUnionOids(cleanedParts, invisibleParts);
- if (ConditionalLockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) {
+ if (!RELATION_IS_INTERVAL_PARTITIONED(onerel)) {
PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, true);
- UnlockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK);
+ } else if (lockInterval) {
+ PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, true);
+ UnlockPartitionObject(onerel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK);
} else {
- /* Updates reloptions of cleanedParts in pg_partition after GPI vacuum is executed */
+ /* Updates reloptions of cleanedParts in pg_partition after gpi lazy_vacuum is executed */
PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, false);
}
@@ -4104,6 +4205,7 @@ static void GPIVacuumMainPartition(
OidRBTree* cleanedParts = CreateOidRBTree();
OidRBTree* invisibleParts = CreateOidRBTree();
Oid parentOid = RelationGetRelid(onerel);
+ bool lockInterval = false;
if (vacstmt->options & VACOPT_VERBOSE) {
elevel = VERBOSEMESSAGE;
@@ -4112,13 +4214,15 @@ static void GPIVacuumMainPartition(
}
/*
- * Before clearing the global partition index of a partition table,
- * acquire a AccessShareLock on ADD_PARTITION_ACTION, and make sure that the interval partition
- * creation process will not be performed concurrently.
+ * Before clearing the global partition index of a partition table, acquire a AccessShareLock on
+ * INTERVAL_PARTITION_LOCK_SDEQUENCE, and make sure that the interval partition creation process will not be
+ * performed concurrently.
*/
- if (ConditionalLockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) {
+ if (!RELATION_IS_INTERVAL_PARTITIONED(onerel)) {
PartitionGetAllInvisibleParts(parentOid, &invisibleParts);
- UnlockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK);
+ } else if (ConditionalLockPartitionObject(onerel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK)) {
+ PartitionGetAllInvisibleParts(parentOid, &invisibleParts);
+ lockInterval = true;
}
vac_strategy = bstrategy;
@@ -4144,9 +4248,11 @@ static void GPIVacuumMainPartition(
* acquire a AccessShareLock on ADD_PARTITION_ACTION, and make sure that the interval partition
* creation process will not be performed concurrently.
*/
- if (ConditionalLockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) {
+ if (!RELATION_IS_INTERVAL_PARTITIONED(onerel)) {
PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, true);
- UnlockPartition(parentOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK);
+ } else if (lockInterval) {
+ PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, true);
+ UnlockPartitionObject(onerel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK);
} else {
/* Updates reloptions of cleanedParts in pg_partition after gpi lazy_vacuum is executed */
PartitionSetEnabledClean(parentOid, cleanedParts, invisibleParts, false);
diff --git a/src/gausskernel/optimizer/commands/vacuumlazy.cpp b/src/gausskernel/optimizer/commands/vacuumlazy.cpp
index b3f17898e..bd8156454 100644
--- a/src/gausskernel/optimizer/commands/vacuumlazy.cpp
+++ b/src/gausskernel/optimizer/commands/vacuumlazy.cpp
@@ -1139,8 +1139,10 @@ static IndexBulkDeleteResult** lazy_scan_heap(
vacuum_log_cleanup_info(onerel, vacrelstats);
/* Remove index entries */
- for (i = 0; i < nindexes; i++)
+ for (i = 0; i < nindexes; i++) {
+ vacuum_log_cleanup_info(Irel[i], vacrelstats);
lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats, vac_strategy);
+ }
/* Remove tuples from heap */
lazy_vacuum_all_heap(onerel, vacrelstats);
@@ -1631,6 +1633,7 @@ static IndexBulkDeleteResult** lazy_scan_heap(
/* Remove index entries */
for (i = 0; i < nindexes; i++) {
if (!RelationIsCrossBucketIndex(Irel[i])) {
+ vacuum_log_cleanup_info(Irel[i], vacrelstats);
lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats, vac_strategy);
}
}
diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp
index 6dcc1a189..4a0c99622 100644
--- a/src/gausskernel/optimizer/commands/verify.cpp
+++ b/src/gausskernel/optimizer/commands/verify.cpp
@@ -763,7 +763,7 @@ static void DoGlobalVerifyRowRel(VacuumStmt* stmt, Oid relid, bool isDatabase)
if (!isDatabase) {
if (stmt->relation->partitionname != NULL) {
/* check the partition relation */
- Oid partOid = partitionNameGetPartitionOid(relid,
+ Oid partOid = PartitionNameGetPartitionOid(relid,
stmt->relation->partitionname,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessShareLock,
@@ -845,7 +845,7 @@ static void DoGlobalVerifyColRel(VacuumStmt* stmt, Oid relid, bool isDatabase)
if (!isDatabase) {
if (stmt->relation->partitionname != NULL) {
/* check the partition relation */
- Oid partOid = partitionNameGetPartitionOid(relid,
+ Oid partOid = PartitionNameGetPartitionOid(relid,
stmt->relation->partitionname,
PART_OBJ_TYPE_TABLE_PARTITION,
AccessShareLock,
diff --git a/src/gausskernel/optimizer/commands/view.cpp b/src/gausskernel/optimizer/commands/view.cpp
index a390efee8..34b5b8338 100644
--- a/src/gausskernel/optimizer/commands/view.cpp
+++ b/src/gausskernel/optimizer/commands/view.cpp
@@ -335,6 +335,7 @@ static Oid DefineVirtualRelation(RangeVar* relation, List* tlist, bool replace,
createStmt->oncommit = ONCOMMIT_NOOP;
createStmt->tablespacename = NULL;
createStmt->if_not_exists = false;
+ createStmt->charset = PG_INVALID_ENCODING;
Oid ownerOid = InvalidOid;
/*
diff --git a/src/gausskernel/optimizer/gplanmgr/gplanmgr.cpp b/src/gausskernel/optimizer/gplanmgr/gplanmgr.cpp
index 46e23a16a..df713547a 100644
--- a/src/gausskernel/optimizer/gplanmgr/gplanmgr.cpp
+++ b/src/gausskernel/optimizer/gplanmgr/gplanmgr.cpp
@@ -36,6 +36,7 @@ typedef struct indexUsageWalkerCxt {
MethodPlanWalkerContext mpwc;
List *usage_list; /* The list of IdxQual */
Index varelid;
+ List* paramValList;
} indexUsageWalkerCxt;
typedef struct PlanIndexUasge {
@@ -61,19 +62,34 @@ const int8 MIN_EVAL_TIMES = 3;
static void SetPlanMemoryContext(CachedPlanSource *plansource, CachedPlan *plan);
static void AcquireManagerLock(PMGRAction *action, LWLockMode mode);
static void ReleaseManagerLock(PMGRAction *action);
-static void UpdatePlanCIs(CachedPlanInfo *pinfo, List *relCis, List *indexCis);
-static bool FallIntoCI(CondInterval *ci, double selectivity);
-static List *GetPlanIndexesUsages(List *stmt_list);
+static List *GenerateIndexCIs(indexUsageWalkerCxt context, bool *hasPartial);
static void StoreStmtRoot(const char *stmt_name, PlannerInfo *root, const char *qstr, MemoryContext cxt);
static StatementRoot *FetchStmtRoot(const char *stmt_name);
static PlannerInfo *InitGenericRoot(PlannerInfo *pinfo, ParamListInfo boundParams);
-static bool SetClausesVarNo(Node *node, void *context);
+static Node *CacheIndexQual(Node *node, indexUsageWalkerCxt *context);
static List *GetBaseRelSelectivity(PlannerInfo *root);
static List *GenerateRelCIs(List *selectivities);
-static List *GenerateIndexCIs(List *indexes_info, bool *hasPartial);
-
static inline void NextAction(PMGRAction *action, PMGRActionType acttype, PMGRStatCollectType statstype);
static void ClearGenericRootCache(PlannerInfo *pinfo, bool onlyPossionCache);
+static bool ExtractPlanIndexesUsages(Node *plan, void *context);
+
+/*
+ * ReleaseCustomPlan: release a CachedPlanSource's custom plan, if any.
+ */
+void
+ReleaseCustomPlan(CachedPlanSource *plansource)
+{
+ /* Be paranoid about the possibility that ReleaseCachedPlan fails */
+ if (plansource->cplan) {
+ CachedPlan *plan = plansource->cplan;
+
+ Assert(plan->magic == CACHEDPLAN_MAGIC && !plan->is_candidate);
+ plansource->cplan = NULL;
+
+ /* release the plan */
+ ReleaseCachedPlan(plan, false);
+ }
+}
GplanSelectionMethod
GetHintSelectionMethod(const CachedPlanSource* plansource)
@@ -131,7 +147,8 @@ ChooseAdaptivePlan(CachedPlanSource *plansource, ParamListInfo boundParams)
return false;
}
- if (boundParams == NULL) {
+ // limit number of params to avoid the huge memory consumption
+ if (boundParams == NULL || boundParams->numParams >= 50) {
return false;
}
@@ -143,7 +160,8 @@ ChooseAdaptivePlan(CachedPlanSource *plansource, ParamListInfo boundParams)
return false;
}
- if(plansource->hasSubQuery || !selec_gplan_by_hint(plansource)){
+ // subquery is not supported by our plan selection method.
+ if (plansource->hasSubQuery || !selec_gplan_by_hint(plansource)) {
PMGR_ReleasePlanManager(plansource);
return false;
}
@@ -225,6 +243,8 @@ GetDefaultGenericPlan(CachedPlanSource *plansource,
*/
*mode = true;
*qlist = NIL;
+ ReleaseGenericPlan(plansource);
+
return NULL;
}
}
@@ -233,14 +253,59 @@ GetDefaultGenericPlan(CachedPlanSource *plansource,
return plan;
}
-static uint32
-GenerateIndexHashKey(List *quals)
+/*
+ * UpdateCI - make a embrace X
+ */
+static void
+UpdateCI(CondInterval *tar, double val, double dampfactor)
{
- uint32 val = 0;
- char *key = NULL;
- key = nodeToString(quals);
- val = DatumGetUInt32(hash_any((const unsigned char *)key, strlen(key) + 1));
- return val;
+ if(tar == NULL){
+ return;
+ }
+
+ /* Expand the lowerboud of CI.*/
+ if (val < tar->lowerbound) {
+ /* leanring rate is used to avoid the aggressive expandsion. */
+ tar->lowerbound -= Min(dampfactor, (tar->lowerbound - val) * 1.1);
+ }
+
+ /* Expand the upperbound of CI.*/
+ if (val > tar->upperbound) {
+ tar->upperbound += Min(dampfactor, (val - tar->upperbound) * 1.1);
+ }
+}
+
+static void
+UpdateOffsetCI(CachedPlanInfo *pinfo, int64 offset)
+{
+ if (pinfo->offsetCi == NULL) {
+ return;
+ }
+
+ CondInterval *ci = (CondInterval *)pinfo->offsetCi;
+
+ /* Expand the lowerboud of CI. */
+ if (ci->lowerbound > offset) {
+ /* Here, the leanring rate is used to avoid the aggressive expandsion. */
+ ci->lowerbound -= (ci->lowerbound - offset);
+ }
+ /* Expand the upperbound of CI. */
+ if (ci->upperbound < offset) {
+ ci->upperbound += (offset - ci->upperbound);
+ }
+}
+
+/*
+ * FallIntoCI: is the given selectivity fall in ci?
+ */
+static bool
+FallIntoCI(CondInterval *ci, double selectivity)
+{
+ if (ci->lowerbound <= selectivity && \
+ ci->upperbound >= selectivity) {
+ return true;
+ }
+ return false;
}
/*
@@ -379,12 +444,15 @@ PMGR_ExplorePlan(CachedPlanSource *plansource,
boundParams->params_lazy_bind = true;
}
+ u_sess->pcache_cxt.is_plan_exploration = true;
/* explore the query plan by planner. */
plan = BuildCachedPlan(plansource, *qlist, boundParams, false);
- /* close the lazy bind labels to avoid the binding failure in executor. */
- boundParams->uParamInfo = DEFUALT_INFO;
boundParams->params_lazy_bind = false;
+ u_sess->pcache_cxt.is_plan_exploration = false;
+
+ plan->cost = cached_plan_cost(plan);
+
ereport(DEBUG2, (errmodule(MOD_OPT),
errmsg("Explore a plan; ThreadId: %d, query: \"%s\"", gettid(), plansource->query_string)));
@@ -435,52 +503,72 @@ InsertPlan(CachedPlanSource *plansource,
CachedPlan *plan,
uint32 planHashkey)
{
- CachedPlanInfo *cpinfo = NULL;
+ if(!plansource->is_saved){
+ ereport(DEBUG2, (errmodule(MOD_OPT),
+ errmsg("skip to cache plan into an unsaved plansource; ThreadId: %d, query: \"%s\"",
+ gettid(), plansource->query_string)));
+ return;
+ }
+ CachedPlanInfo *cpinfo = NULL;
PlanManager *planMgr = plansource->planManager;
PlannerInfo *expRoot = (PlannerInfo *)u_sess->pcache_cxt.explored_plan_info;
Query *query = (Query *)linitial(plansource->query_list);
- if (plansource->is_saved) {
- MemoryContext oldcxt = MemoryContextSwitchTo(plan->context);
- int64 offset = -1;
+ PlannedStmt *stmt = (PlannedStmt *)linitial(plan->stmt_list);
+ indexUsageWalkerCxt used_indexes_cxt;
- cpinfo = makeNode(CachedPlanInfo);
- cpinfo->learningRate = LREANINGRATE;
- cpinfo->plan = plan;
- cpinfo->planHashkey = planHashkey;
- cpinfo->sample_exec_costs = 0;
- cpinfo->sample_times = 0;
- cpinfo->verification_times = 1;
- cpinfo->status = ADPT_PLAN_UNCHECKED;
- plan->is_candidate = true;
- /* collect the plan selectivities and generate CI for selection. */
+ errno_t rc = 0;
+ size_t cxt_size = sizeof(indexUsageWalkerCxt);
+ rc = memset_s(&used_indexes_cxt, cxt_size, 0, cxt_size);
+ securec_check(rc, "\0", "\0");
- offset = GetLimitValue(query, boundParams);
- if (offset > 0) {
- CondInterval *offsetCi = makeNode(CondInterval);
- offsetCi->relid = 0;
- offsetCi->lowerbound = offset;
- offsetCi->upperbound = offset;
- cpinfo->offsetCi = (void *)offsetCi;
- }
- cpinfo->relCis = GenerateRelCIs(GetBaseRelSelectivity(expRoot));
- cpinfo->indexCis = GenerateIndexCIs(GetPlanIndexesUsages(plan->stmt_list), &cpinfo->usePartIdx);
- if (plansource->gpc.status.InShareTable()) {
- pg_atomic_fetch_add_u32((volatile uint32 *)&plan->global_refcount, 1);
- plan->is_share = true;
- Assert(plan->context->is_shared);
- MemoryContextSeal(plan->context);
- } else {
- plan->refcount++;
- }
- plan->is_saved = true;
- plan->cpi = cpinfo;
+ exec_init_plan_tree_base(&used_indexes_cxt.mpwc.base, stmt);
- (void)MemoryContextSwitchTo(planMgr->context);
- planMgr->candidatePlans = lappend(planMgr->candidatePlans, plan);
- (void)MemoryContextSwitchTo(oldcxt);
+ /*
+ * Get the usages of indexes by traversing the plan tree.
+ */
+ ExtractPlanIndexesUsages((Node *)stmt->planTree, &used_indexes_cxt);
+
+ /* collect the plan selectivities and generate CI for selection. */
+ MemoryContext oldcxt = MemoryContextSwitchTo(plan->context);
+ int64 offset = -1;
+
+ cpinfo = makeNode(CachedPlanInfo);
+ cpinfo->learningRate = LREANINGRATE;
+ cpinfo->plan = plan;
+ cpinfo->planHashkey = planHashkey;
+ cpinfo->sample_exec_costs = 0;
+ cpinfo->sample_times = 0;
+ cpinfo->verification_times = 1;
+ cpinfo->status = ADPT_PLAN_UNCHECKED;
+ plan->is_candidate = true;
+
+ offset = GetLimitValue(query, boundParams);
+ if (offset > 0) {
+ CondInterval *offsetCi = makeNode(CondInterval);
+ offsetCi->relid = 0;
+ offsetCi->lowerbound = offset;
+ offsetCi->upperbound = offset;
+ cpinfo->offsetCi = (void *)offsetCi;
}
+ cpinfo->relCis = GenerateRelCIs(GetBaseRelSelectivity(expRoot));
+ cpinfo->indexCis = GenerateIndexCIs(used_indexes_cxt, &cpinfo->usePartIdx);
+ if (plansource->gpc.status.InShareTable()) {
+ pg_atomic_fetch_add_u32((volatile uint32 *)&plan->global_refcount, 1);
+ plan->is_share = true;
+ Assert(plan->context->is_shared);
+ MemoryContextSeal(plan->context);
+ } else {
+ plan->refcount++;
+ }
+ plan->is_saved = true;
+ plan->cpi = cpinfo;
+
+ (void)MemoryContextSwitchTo(planMgr->context);
+ planMgr->candidatePlans = lappend(planMgr->candidatePlans, plan);
+ (void)MemoryContextSwitchTo(oldcxt);
+
}
CachedPlan*
@@ -506,6 +594,8 @@ MakeValuedRestrictinfos(PlannerInfo *queryRoot, List *quals)
ListCell *cell;
List *rst = NIL;
+ queryRoot->glob->boundParams->params_lazy_bind =false;
+
foreach(cell, quals){
Node *qual = (Node *)lfirst(cell);
qual = eval_const_expressions(queryRoot, qual);
@@ -562,7 +652,8 @@ IsMatchedPlan(PlannerInfo *queryRoot, List *query_rel_sels,
return false;
}
- Assert(cpinfo->relCis->length == query_rel_sels->length);
+ Assert((cpinfo->relCis == NIL && query_rel_sels == NIL) ||
+ cpinfo->relCis->length == query_rel_sels->length);
/* check whether query can fall into baserel CIs of any candidate plan. */
ListCell *c1;
@@ -610,11 +701,11 @@ IsMatchedPlan(PlannerInfo *queryRoot, List *query_rel_sels,
* These invaild information would lead to an incorrect result,
* if we do not clear them up before a new selectivity computation.
*/
- ClearGenericRootCache(queryRoot, true);
+ ClearGenericRootCache(queryRoot, false);
/* get index Selectivity */
double query_idx_selec = clauselist_selectivity(queryRoot,
clauses,
- 0,
+ idxci->ci.relid,
JOIN_INNER,
NULL,
false);
@@ -627,11 +718,65 @@ IsMatchedPlan(PlannerInfo *queryRoot, List *query_rel_sels,
return true;
}
+bool
+MakePlanMatchQuery(PlannerInfo *queryRoot, List *query_rel_sels, CachedPlanInfo *cpinfo)
+{
+ int64 queryOffset = -1;
+
+ /* check whether offset values are matched */
+ queryOffset = GetLimitValue(queryRoot->parse, queryRoot->glob->boundParams);
+ UpdateOffsetCI(cpinfo, queryOffset);
+
+ /* Update baserel CI for matching. */
+ ListCell *c1;
+ ListCell *c2;
+ forboth(c1, cpinfo->relCis, c2, query_rel_sels) {
+ RelCI *relCI = (RelCI *)lfirst(c1);
+ RelSelec *qRelSelec = (RelSelec *)lfirst(c2);
+
+ UpdateCI(&relCI->ci, qRelSelec->selectivity, cpinfo->learningRate);
+ }
+
+ /* Update index CI for matching. */
+ ListCell *cell;
+ List *clauses = NIL;
+ foreach (cell, cpinfo->indexCis) {
+ IndexCI *idxci = (IndexCI *)lfirst(cell);
+
+ /*
+ * bind parameters and organize evalated indexquals as a RestrictInfo
+ * list. Note that, 'clauses' just is a copy of cached 'indexquals'.
+ */
+ clauses = MakeValuedRestrictinfos(queryRoot, idxci->indexquals);
+
+ /*
+ * At each invocation, 'clauselist_selectivity' would cache one-shot
+ * selectivity information, 'varratio, varEqRatio', into 'queryRoot'.
+ * These invaild information would lead to an incorrect result,
+ * if we do not clear them up before a new selectivity computation.
+ */
+ ClearGenericRootCache(queryRoot, false);
+
+ /* get index Selectivity */
+ double query_idx_selec = clauselist_selectivity(queryRoot,
+ clauses,
+ idxci->ci.relid,
+ JOIN_INNER,
+ NULL,
+ false);
+
+ UpdateCI(&idxci->ci, query_idx_selec, cpinfo->learningRate);
+ }
+
+ return true;
+}
+
static CachedPlan*
-FindMatchedPlan(PlanManager *manager, PlannerInfo *queryRoot)
+FindMatchedPlan(PlanManager *manager, PMGRAction *action)
{
ListCell *cl;
bool usePartIdx = false;
+ PlannerInfo *queryRoot = action->genericRoot;
if (queryRoot->glob->boundParams == NULL) {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -639,15 +784,17 @@ FindMatchedPlan(PlanManager *manager, PlannerInfo *queryRoot)
}
queryRoot->glob->boundParams->uParamInfo = PARAM_VAL_SELECTIVITY_INFO;
+ Assert(queryRoot->glob->boundParams->params_lazy_bind == false);
+
/* Do a quick baserel-selectivity estimation. */
set_base_rel_sizes(queryRoot, true);
- List *query_rel_sels = GetBaseRelSelectivity(queryRoot);
+ action->qRelSelec = GetBaseRelSelectivity(queryRoot);
usePartIdx = MatchPartIdxQuery(queryRoot);
/* A plan is matched if both types of CIs can dominate */
foreach (cl, manager->candidatePlans) {
CachedPlan *plan = (CachedPlan *)lfirst(cl);
- if (IsMatchedPlan(queryRoot, query_rel_sels, plan->cpi, usePartIdx)) {
+ if (IsMatchedPlan(queryRoot, action->qRelSelec, plan->cpi, usePartIdx)) {
return plan;
}
}
@@ -659,16 +806,16 @@ FindMatchedPlan(PlanManager *manager, PlannerInfo *queryRoot)
return NULL;
}
-bool ContainSubQuery(PlannerInfo* root)
+bool
+ContainSubQuery(PlannerInfo *root)
{
if (root->glob->subroots != NIL) {
return true;
}
- /* 0 for other purpose, so start from 1 */
- for (int i = 1; i < root->simple_rel_array_size; ++i) {
- RelOptInfo* rel = root->simple_rel_array[i];
- if (!rel) {
+ for (int rti = 1; rti < root->simple_rel_array_size; rti++) {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+ if (rel == NULL) {
continue;
}
if (rel->rtekind == RTE_SUBQUERY) {
@@ -722,38 +869,18 @@ CacheGenericRoot(CachedPlanSource *plansource, char *psrc_key)
u_sess->pcache_cxt.explored_plan_info = NULL;
}
-static void
-UpdateOffsetCI(CachedPlanInfo *pinfo, int64 offset)
-{
- if (pinfo->offsetCi == NULL) {
- return;
- }
-
- CondInterval *ci = (CondInterval *)pinfo->offsetCi;
-
- /* Expand the lowerboud of CI. */
- if (ci->lowerbound > offset) {
- /* Here, the leanring rate is used to avoid the aggressive expandsion. */
- ci->lowerbound -= (ci->lowerbound - offset);
- }
- /* Expand the upperbound of CI. */
- if (ci->upperbound < offset) {
- ci->upperbound += (offset - ci->upperbound);
- }
-}
-
CachedPlan*
PMGR_InsertPlan(CachedPlanSource *plansource,
ParamListInfo boundParams,
CachedPlan *new_plan,
- uint32 new_plan_key)
+ uint32 new_plan_key,
+ PMGRAction *action)
{
PlanManager *PlanMgr = plansource->planManager;
int num_candidates = list_length(PlanMgr->candidatePlans);
CachedPlan *cachedplan = SearchCandidatePlan(PlanMgr->candidatePlans, new_plan_key);
bool alreadyCached = cachedplan != NULL ? true : false;
- PlannerInfo *expRoot = (PlannerInfo *)u_sess->pcache_cxt.explored_plan_info;
/*
* if explored plan has been cached, update the stats; otherwise, add it
@@ -761,20 +888,13 @@ PMGR_InsertPlan(CachedPlanSource *plansource,
*/
if (alreadyCached) {
ereport(DEBUG2, (errmodule(MOD_OPT),
- errmsg("Explored plan already has been cached; ThreadId: %d, query: \"%s\"",
+ errmsg("Explored plan already has been cached, try to update planCIs; ThreadId: %d, query: \"%s\"",
gettid(), plansource->query_string)));
pg_atomic_fetch_add_u32((volatile uint32 *)&cachedplan->cpi->verification_times, 1);
-
- /* Update PlanCI (RelCIs and IndexCIs) */
- List *relCis = GenerateRelCIs(GetBaseRelSelectivity(expRoot));
- List *indexCis = GenerateIndexCIs(GetPlanIndexesUsages(new_plan->stmt_list), NULL);
- UpdatePlanCIs(cachedplan->cpi, relCis, indexCis);
-
- /* Update OffsetCI */
- Query *query = (Query *)linitial(plansource->query_list);
- int64 offset = GetLimitValue(query, boundParams);
- UpdateOffsetCI(cachedplan->cpi, offset);
+
+ /* Update PlanCI */
+ MakePlanMatchQuery(action->genericRoot, action->qRelSelec, cachedplan->cpi);
/* release new_plan after updates are completed. */
MemoryContextDelete(new_plan->context);
@@ -829,6 +949,7 @@ CreateAction(CachedPlanSource *plansource)
action->type = PMGR_START;
action->psrc = plansource;
action->selected_plan = NULL;
+ action->qRelSelec = NIL;
action->valid_plan = false;
action->statType = PMGR_GET_NONE_STATS;
action->is_shared = plansource->gpc.status.InShareTable();
@@ -836,6 +957,7 @@ CreateAction(CachedPlanSource *plansource)
action->is_lock = false;
action->lockmode = LW_SHARED;
action->needGenericRoot = false;
+ action->genericRoot = NULL;
action->step = 0;
MemoryContextSwitchTo(oldcxt);
@@ -888,9 +1010,12 @@ ActionHandle(CachedPlanSource *plansource,
/* acquire an exclusive lock to update plan manager. */
ManagerLockSwitchTo(action, LW_EXCLUSIVE);
- if (ContainSubQuery((PlannerInfo*)u_sess->pcache_cxt.explored_plan_info)) {
+ if (ContainSubQuery((PlannerInfo *)u_sess->pcache_cxt.explored_plan_info)) {
manager->is_valid = false;
- plansource->hasSubQuery = true;
+ plansource->hasSubQuery = true;
+ ereport(LOG, (errmodule(MOD_OPT),
+ errmsg("plan selection can not handle with SQL with subqueries, go to the default mode in the next round; ThreadId: %d, query: \"%s\"",
+ gettid(), plansource->query_string)));
}
/*
@@ -902,8 +1027,8 @@ ActionHandle(CachedPlanSource *plansource,
* temporary slots. However, it should not be cost too much as the temporary
* slots are narrow.
*/
- if (!TransactionIdIsValid(plan->saved_xmin)) {
- plan = PMGR_InsertPlan(plansource, boundParams, plan, plan_key);
+ if (!TransactionIdIsValid(plan->saved_xmin) && manager->is_valid) {
+ plan = PMGR_InsertPlan(plansource, boundParams, plan, plan_key, action);
if (action->needGenericRoot) {
CacheGenericRoot(plansource, psrc_key);
action->needGenericRoot = false;
@@ -923,15 +1048,19 @@ ActionHandle(CachedPlanSource *plansource,
*/
if (plansource->gplan == NULL) {
plansource->gplan = plan;
- if (!plan->is_saved) {
+ if (plan->refcount == 0) {
plan->refcount++;
}
- Assert(plansource->gplan->refcount > 0);
plansource->generic_cost = cached_plan_cost(plan);
}
+ Assert(plansource->gplan->refcount > 0);
action->selected_plan = plan;
action->valid_plan = true;
+
+ /* close the lazy bind labels to avoid the binding failure in executor. */
+ boundParams->uParamInfo = DEFUALT_INFO;
+ boundParams->params_lazy_bind = false;
NextAction(action, PMGR_FINISH, action->statType);
} break;
case PMGR_CHOOSE_BEST_METHOD: {
@@ -970,12 +1099,26 @@ ActionHandle(CachedPlanSource *plansource,
Assert(strcmp(plansource->query_string, entry->query_string) == 0);
/* initialize root's statistics cache and set boundParams */
- PlannerInfo *groot = InitGenericRoot(entry->generic_root, boundParams);
- action->selected_plan = FindMatchedPlan(plansource->planManager, groot);
+ action->genericRoot = InitGenericRoot(entry->generic_root, boundParams);
+ action->selected_plan = FindMatchedPlan(plansource->planManager, action);
NextAction(action, PMGR_CHECK_PLAN, PMGR_GET_NONE_STATS);
}
} break;
case PMGR_CHECK_PLAN: {
+
+ /*
+ * plansource maybe invalid, as a table update would be ahead of lock
+ * fetching in plan matching process. Therefore revalid plansource,
+ * and invalid the current plan manager and the selected plan.
+ */
+ if (!plansource->is_valid) {
+ (void)RevalidateCachedQuery(plansource);
+ Assert(!plansource->planManager->is_valid);
+ if(action->selected_plan != NULL) {
+ action->selected_plan->is_valid = false;
+ }
+ }
+
if (CheckCachedPlan(plansource, action->selected_plan)) {
Assert(action->selected_plan->magic == CACHEDPLAN_MAGIC);
/* Update soft parse counter for Unique SQL */
@@ -996,7 +1139,7 @@ ActionHandle(CachedPlanSource *plansource,
// selected_plan is invaild, reset the pointer as NULL
action->selected_plan = NULL;
ereport(DEBUG2, (errmodule(MOD_OPT),
- errmsg("selected plan is invaild; ThreadId: %d, query: \"%s\"",
+ errmsg("selected plan is invalid; ThreadId: %d, query: \"%s\"",
gettid(), plansource->query_string)));
}
@@ -1029,14 +1172,17 @@ ActionHandle(CachedPlanSource *plansource,
CachedPlan*
GetAdaptGenericPlan(CachedPlanSource *plansource,
ParamListInfo boundParams,
- List **qlist)
+ List **qlist,
+ bool *mode)
{
PMGRAction *action;
GplanSelectionMethod select_method;
+ double avg_custom_cost;
/* plan management only supports named stmts. */
Assert(plansource->stmt_name);
+ ReleaseCustomPlan(plansource);
select_method = GetHintSelectionMethod(plansource);
Assert(select_method == CHOOSE_ADAPTIVE_GPLAN);
@@ -1060,18 +1206,21 @@ GetAdaptGenericPlan(CachedPlanSource *plansource,
plansource->query_string)));
}
-
-
- /* prepare the selection by initializing the selected-action context. */
- action = CreateAction(plansource);
u_sess->pcache_cxt.explored_plan_info = NULL;
+ u_sess->pcache_cxt.is_plan_exploration = false;
if (u_sess->pcache_cxt.action != NULL) {
pfree(u_sess->pcache_cxt.action);
}
+ /* prepare the selection by initializing the selected-action context. */
+ action = CreateAction(plansource);
u_sess->pcache_cxt.action = (void *)action;
+ ereport(DEBUG2, (errmodule(MOD_OPT),
+ errmsg("begin plan selection. ThreadId: %d, query: \"%s\"",
+ gettid(), plansource->query_string)));
+
/* Lock plansource when a selection is beginning. */
AcquireManagerLock(action, LW_SHARED);
@@ -1118,9 +1267,47 @@ GetAdaptGenericPlan(CachedPlanSource *plansource,
Assert(action->step < 10);
} while (action->type != PMGR_FINISH);
+ if (boundParams != NULL) {
+ boundParams->uParamInfo = DEFUALT_INFO;
+ boundParams->params_lazy_bind = false;
+ }
+
/* Selection is finished; unlock plansource */
ReleaseManagerLock(action);
+ ereport(DEBUG2, (errmodule(MOD_OPT),
+ errmsg("End plan selection. ThreadId: %d, query: \"%s\"",
+ gettid(), plansource->query_string)));
+
+ avg_custom_cost = plansource->total_custom_cost / plansource->num_custom_plans;
+
+ /*
+ * Prefer selected plan if it's less expensive than the average custom plan;
+ * otherwise, return null for repicking a cplan.
+ *
+ * Note that, a knockout plan would not be in candidate list. If so, we release
+ * it to avoid memory leak.
+ */
+ if (action->selected_plan->cost > 1.1 * avg_custom_cost) {
+ Assert(action->selected_plan->cost >= 0);
+ if (!action->selected_plan->is_candidate) {
+ Assert(action->selected_plan->refcount == 0);
+
+ /*
+ * guarantee that refcount equals to 1, and thus we can use
+ * 'ReleaseCachedPlan' to release the plan.
+ */
+ action->selected_plan->refcount = 1;
+ if (plansource->gplan == action->selected_plan) {
+ plansource->gplan = NULL;
+ }
+ ReleaseCachedPlan(action->selected_plan, false);
+ }
+ action->selected_plan = NULL;
+ *mode = true;
+ }
+
+
return action->selected_plan;
}
@@ -1134,7 +1321,7 @@ GetCustomPlan(CachedPlanSource *plansource,
{
CachedPlan *plan = NULL;
/* Whenever plan is rebuild, we need to drop the old one */
- ReleaseGenericPlan(plansource);
+ ReleaseCustomPlan(plansource);
/* Build a custom plan */
plan = BuildCachedPlan(plansource, *qlist, boundParams, true);
@@ -1272,6 +1459,8 @@ GenerateRelCIs(List *selectivities)
* by current rel's selectivity.
*/
relCI->ci.relid = rselec->relid;
+
+ /* expand CI by 10% */
relCI->ci.lowerbound = rselec->selectivity;
relCI->ci.upperbound = rselec->selectivity;
relCI->init_selec = rselec->selectivity;
@@ -1282,140 +1471,47 @@ GenerateRelCIs(List *selectivities)
}
/*
- * GenerateIndexCIs: convert each element of 'indexes_info' to an IndexCI
- * structure.
- */
-static List*
-GenerateIndexCIs(List *indexes_usages, bool *hasPartial)
-{
- List *result = NIL;
- bool partial = false;
-
- ListCell *lc;
- foreach (lc, indexes_usages) {
- PlanIndexUasge *usage = (PlanIndexUasge *)lfirst(lc);
-
- IndexCI *idxci = makeNode(IndexCI);
- idxci->ci.relid = usage->scanrelid;
- idxci->ci.lowerbound = usage->selec;
- idxci->ci.upperbound = usage->selec;
- idxci->init_selec = usage->selec;
- idxci->indexoid = usage->indexoid;
- idxci->indexquals = (List *)copyObject(usage->indexquals);
-
- /*
- * replace pseudo-VarNo (i.e., 65002) by the real rel id. Then,
- * generate the hashkey of reset-quals as the index key.
- */
- (void)SetClausesVarNo((Node *)idxci->indexquals, (void *)&usage->scanrelid);
- idxci->indexkey = GenerateIndexHashKey(idxci->indexquals);
- idxci->is_partial = usage->is_partial;
- partial = usage->is_partial ? usage->is_partial : partial;
- result = lappend(result, idxci);
- }
-
- if(hasPartial != NULL){
- (*hasPartial) = partial;
- }
-
- return result;
-}
-
-
-/*
- * UpdateCI - make a embrace X
- */
-static void
-UpdateCI(CondInterval *a, CondInterval *x, double dampfactor)
-{
- if(a == NULL){
- Assert(x == NULL);
- return;
- }
-
- /* Expand the lowerboud of CI.*/
- if (x->lowerbound < a->lowerbound) {
- /* leanring rate is used to avoid the aggressive expandsion. */
- a->lowerbound -= Min(dampfactor, (a->lowerbound - x->lowerbound) * 1.1);
- }
- /* Expand the upperbound of CI.*/
- if (x->upperbound > a->upperbound) {
- a->upperbound += Min(dampfactor, (x->upperbound - a->upperbound) * 1.1);
- }
-}
-
-static void
-UpdatePlanCIs(CachedPlanInfo *pinfo, List *relCis, List *indexCis)
-{
- if (pinfo->relCis == NIL) {
- ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS),
- errmsg("no selectivity information is found.")));
- }
-
- Assert(pinfo->relCis->length == relCis->length);
-
- ListCell *c1;
- ListCell *c2;
- forboth(c1, pinfo->relCis, c2, relCis)
- {
- RelCI *pRelCI = (RelCI *)lfirst(c1);
- RelCI *qRelCI = (RelCI *)lfirst(c2);
- Assert(pRelCI->relid == qRelCI->relid);
- /* update baserel ci*/
- UpdateCI(&pRelCI->ci, &qRelCI->ci, pinfo->learningRate);
- }
-
- ListCell *cl1;
- ListCell *cl2;
- forboth(cl1, pinfo->indexCis, cl2, indexCis)
- {
- IndexCI *pIdxCi = (IndexCI *)lfirst(cl1);
- IndexCI *qIdxCi = (IndexCI *)lfirst(cl2);
- Assert(pIdxCi->indexkey == qIdxCi->indexkey);
- /* update index ci*/
- UpdateCI(&pIdxCi->ci, &qIdxCi->ci, pinfo->learningRate);
- }
-}
-
-/*
- * FallIntoCI: is the given selectivity fall in ci?
- */
-static bool
-FallIntoCI(CondInterval *ci, double selectivity)
-{
- Assert(selectivity > 0);
- if (ci->lowerbound <= selectivity && \
- ci->upperbound >= selectivity) {
- return true;
- }
- return false;
-}
-
-/*
- * SetClausesVarNo - replace the var number of index scan nodes by real
- * scan IDs.
+ * CacheIndexQual - copy indexqual from plan and replace the var number of index
+ * scan nodes by real scan IDs.
*
* i.e., varno: 65000 => varno: 1
*/
-static bool
-SetClausesVarNo(Node *node, void *context)
+static Node*
+CacheIndexQual(Node *node, indexUsageWalkerCxt *context)
{
+ Var *newVal = NULL;
+
if (node == NULL) {
- return false;
+ return NULL;
}
- Assert(!IsA(node, IndexScan));
+ Assert(!IsA(node, Plan) && !IsA(node, RestrictInfo));
+
if (IsA(node, Var)) {
- Var *var = (Var *)node;
- var->varno = *((int *)context);
+ Var *newVal = (Var *)copyObject(node);
+ newVal->varno = newVal->varnoold;
+ newVal->varattno = newVal->varoattno;
+
+ return (Node *)newVal;
}
- if(IsA(node, RestrictInfo)){
- RestrictInfo *rinfo = (RestrictInfo *)node;
- node = (Node *)rinfo->clause;
+ if (IsA(node, Param)) {
+ Param *param = (Param *)node;
+ if (param->paramkind == PARAM_EXEC) {
+ ListCell *lc;
+ foreach (lc, context->paramValList) {
+ NestLoopParam *nlp = (NestLoopParam *)lfirst(lc);
+ if (param->paramid == nlp->paramno) {
+ newVal = (Var *)copyObject(nlp->paramval);
+ newVal->varno = newVal->varnoold;
+ newVal->varattno = newVal->varoattno;
+ return (Node *)newVal;
+ }
+ }
+ }
}
- return expression_tree_walker(node,
- (bool (*)())SetClausesVarNo,
- context);
+ return expression_tree_mutator(node,
+ (Node* (*)(Node*, void*))CacheIndexQual,
+ context, true);
}
PlanIndexUasge*
@@ -1446,7 +1542,7 @@ MakePlanIndexUasge(int scanrelid,
*
* Returns false when the current plan node are processed end.
*/
-bool
+static bool
ExtractPlanIndexesUsages(Node *plan, void *context)
{
indexUsageWalkerCxt *idxCxt = ((indexUsageWalkerCxt *)context);
@@ -1494,32 +1590,53 @@ ExtractPlanIndexesUsages(Node *plan, void *context)
return false;
}
+
+ if (IsA(plan, NestLoop)) {
+ NestLoop *nl = (NestLoop *)plan;
+ List *tmpList = (List *)copyObject(nl->nestParams);
+ idxCxt->paramValList = list_concat(idxCxt->paramValList, tmpList);
+ }
+
return plan_tree_walker(plan, (MethodWalker)ExtractPlanIndexesUsages, context);
}
/*
- * GetPlanIndexUsage: get indexquals and their selectivities from the plan
- * tree.
+ * GenerateIndexCIs: get indexquals and their selectivities from the plan
+ * tree and generate index CI.
*/
List*
-GetPlanIndexesUsages(List *stmt_list)
+GenerateIndexCIs(indexUsageWalkerCxt context, bool *hasPartial)
{
- PlannedStmt *stmt = (PlannedStmt *)linitial(stmt_list);
- indexUsageWalkerCxt context;
+ List *result = NIL;
+ bool partial = false;
- errno_t rc = 0;
- size_t cxt_size = sizeof(indexUsageWalkerCxt);
- rc = memset_s(&context, cxt_size, 0, cxt_size);
- securec_check(rc, "\0", "\0");
+ ListCell *lc;
+ foreach (lc, context.usage_list) {
+ PlanIndexUasge *usage = (PlanIndexUasge *)lfirst(lc);
- exec_init_plan_tree_base(&context.mpwc.base, stmt);
+ IndexCI *idxci = makeNode(IndexCI);
+ idxci->ci.relid = usage->scanrelid;
+ idxci->ci.lowerbound = usage->selec;
+ idxci->ci.upperbound = usage->selec;
+ idxci->init_selec = usage->selec;
+ idxci->indexoid = usage->indexoid;
- /*
- * Get the usages of indexes by traversing the plan tree.
- */
- ExtractPlanIndexesUsages((Node *)stmt->planTree, &context);
+ /*
+ * replace pseudo-VarNo (i.e., 65002) by the real rel id. Then,
+ * generate the hashkey of reset-quals as the index key.
+ */
+ context.varelid = usage->scanrelid;
+ idxci->indexquals = (List *)CacheIndexQual((Node *)usage->indexquals, &context);
+ idxci->is_partial = usage->is_partial;
+ partial = usage->is_partial ? usage->is_partial : partial;
+ result = lappend(result, idxci);
+ }
- return context.usage_list;
+ if(hasPartial != NULL){
+ (*hasPartial) = partial;
+ }
+
+ return result;
}
/*
diff --git a/src/gausskernel/optimizer/path/allpaths.cpp b/src/gausskernel/optimizer/path/allpaths.cpp
index bb4970857..0c99e7aa1 100755
--- a/src/gausskernel/optimizer/path/allpaths.cpp
+++ b/src/gausskernel/optimizer/path/allpaths.cpp
@@ -904,15 +904,18 @@ static void SetPlainReSizeWithPruningRatio(RelOptInfo *rel, double pruningRatio,
static bool IsPbeSinglePartition(Relation rel, RelOptInfo* relInfo)
{
if (relInfo->pruning_result->paramArg == NULL || relInfo->pruning_result->paramArg->paramkind != PARAM_EXTERN) {
+ relInfo->pruning_result->isPbeSinlePartition = false;
return false;
}
if (RelationIsSubPartitioned(rel)) {
+ relInfo->pruning_result->isPbeSinlePartition = false;
return false;
}
if (rel->partMap->type == PART_TYPE_RANGE || rel->partMap->type == PART_TYPE_INTERVAL) {
RangePartitionMap *partMap = (RangePartitionMap *)rel->partMap;
int partKeyNum = partMap->partitionKey->dim1;
if (partKeyNum > 1) {
+ relInfo->pruning_result->isPbeSinlePartition = false;
return false;
}
}
@@ -934,19 +937,14 @@ static void set_plain_rel_size(PlannerInfo* root, RelOptInfo* rel, RangeTblEntry
Relation relation = heap_open(rte->relid, NoLock);
double pruningRatio = 1.0;
- /* Before static pruning, we save the partmap in pruning_result, which will used in dynamic pruning and
- * executor, seen in GetPartitionInfo and getPartitionOidFromSequence */
- PartitionMap *partmap = CopyPartitionMap(relation->partMap);
-
/* get pruning result */
if (rte->partitionOidList == NIL) {
- rel->pruning_result = partitionPruningForRestrictInfo(root, rte, relation, rel->baserestrictinfo, partmap);
+ rel->pruning_result = partitionPruningForRestrictInfo(root, rte, relation, rel->baserestrictinfo);
} else {
rel->pruning_result = PartitionPruningForPartitionList(rte, relation);
}
Assert(rel->pruning_result);
- rel->pruning_result->partMap = partmap;
if (IsPbeSinglePartition(relation, rel)) {
rel->partItrs = 1;
@@ -2223,6 +2221,15 @@ static bool has_multiple_baserels(PlannerInfo* root)
return false;
}
+static bool has_rownum(Query* query)
+{
+ if (query == NULL) {
+ return false;
+ }
+
+ return expression_contains_rownum((Node*)query->targetList);
+}
+
static bool can_push_qual_into_subquery(PlannerInfo* root,
RestrictInfo* rinfo,
RangeTblEntry* rte,
@@ -2248,6 +2255,10 @@ static bool can_push_qual_into_subquery(PlannerInfo* root,
return false;
}
+ if (has_rownum(subquery)) {
+ return false;
+ }
+
return true;
}
diff --git a/src/gausskernel/optimizer/path/clausesel.cpp b/src/gausskernel/optimizer/path/clausesel.cpp
index cea89e695..f9f40091f 100755
--- a/src/gausskernel/optimizer/path/clausesel.cpp
+++ b/src/gausskernel/optimizer/path/clausesel.cpp
@@ -143,6 +143,13 @@ Selectivity clauselist_selectivity(
ES_SELECTIVITY* es = NULL;
MemoryContext ExtendedStat = NULL;
MemoryContext oldcontext;
+ bool use_muti_stats = true;
+
+ if (ENABLE_CACHEDPLAN_MGR && root->glob->boundParams != NULL) {
+ root->glob->boundParams->params_lazy_bind = false;
+ use_muti_stats = (root->glob->boundParams->uParamInfo != DEFUALT_INFO) ? false : true;
+ }
+
/*
* If there's exactly one clause, then no use in trying to match up pairs,
* so just go directly to clause_selectivity().
@@ -151,7 +158,7 @@ Selectivity clauselist_selectivity(
return clause_selectivity(root, (Node*)linitial(clauses), varRelid, jointype, sjinfo, varratio_cached, false, use_poisson);
/* initialize es_selectivity class, list_length(clauses) can be 0 when called by set_baserel_size_estimates */
- if (list_length(clauses) >= 2 &&
+ if (list_length(clauses) >= 2 && use_muti_stats &&
(jointype == JOIN_INNER || jointype == JOIN_FULL || jointype == JOIN_LEFT || jointype == JOIN_ANTI ||
jointype == JOIN_SEMI || jointype == JOIN_LEFT_ANTI_FULL)) {
ExtendedStat = AllocSetContextCreate(CurrentMemoryContext,
@@ -333,6 +340,10 @@ Selectivity clauselist_selectivity(
MemoryContextDelete(ExtendedStat);
}
+ if (ENABLE_CACHEDPLAN_MGR && root->glob->boundParams != NULL && root->glob->boundParams->uParamInfo != DEFUALT_INFO) {
+ root->glob->boundParams->params_lazy_bind = true;
+ }
+
return s1;
}
diff --git a/src/gausskernel/optimizer/path/equivclass.cpp b/src/gausskernel/optimizer/path/equivclass.cpp
index 35f237698..23c699ae0 100644
--- a/src/gausskernel/optimizer/path/equivclass.cpp
+++ b/src/gausskernel/optimizer/path/equivclass.cpp
@@ -450,6 +450,27 @@ static EquivalenceMember* add_eq_member(
return em;
}
+static bool restrict_contains_bplike_walker(Node *node, void *context)
+{
+ if (node == NULL) {
+ return false;
+ }
+
+ if (IsA(node, OpExpr)) {
+ OpExpr *opExpr = (OpExpr *)node;
+ if (opExpr->opno == OID_BPCHAR_LIKE_OP || opExpr->opno == OID_BPCHAR_NOT_LIKE_OP ||
+ opExpr->opno == OID_BPCHAR_ICLIKE_OP || opExpr->opno == OID_BPCHAR_IC_NOT_LIKE_OP) {
+ return true;
+ }
+ }
+
+ return expression_tree_walker(node, (bool (*)())restrict_contains_bplike_walker, context);
+}
+static bool restrict_contains_bplike(RestrictInfo *rinfo)
+{
+ return restrict_contains_bplike_walker((Node *)rinfo->clause, NULL);
+}
+
/*
* get_eclass_for_sort_expr
* Given an expression and opfamily/collation info, find an existing
@@ -657,6 +678,23 @@ void generate_base_implied_qualities(PlannerInfo* root)
if (BMS_SINGLETON != bms_membership(rinfo->clause_relids))
continue;
+ /*
+ * Even two vars are in the same EquivalenceClass, there exists some expression get different result on two
+ * vars. The type bpchar (blank padded character) is an example. Its equal function and like function is not
+ * consistent.
+ * equal function---bpchareq: ignore the padded blank charater at the end of the Var.
+ * like function --bpliketext: NOT ignore the padded blank character!
+ * For example:
+ * create table A(a char(10)); create table B(a char(2));
+ * insert into A values('33'); insert into B values('33');
+ * Then A.a = B.a, but B.a like '33' is true while A.a like '33' is false !.
+ *
+ * This is the only inconsistent behavior among EquivalenceClass members we found yet. So just skip it!
+ */
+ if (restrict_contains_bplike(rinfo)) {
+ continue;
+ }
+
generate_base_implied_quality_clause(root, rel, rinfo);
}
}
diff --git a/src/gausskernel/optimizer/path/es_selectivity.cpp b/src/gausskernel/optimizer/path/es_selectivity.cpp
index 9ab91448c..fa1c5df63 100644
--- a/src/gausskernel/optimizer/path/es_selectivity.cpp
+++ b/src/gausskernel/optimizer/path/es_selectivity.cpp
@@ -143,6 +143,8 @@ Selectivity ES_SELECTIVITY::calculate_selectivity(PlannerInfo* root_input, List*
prob = cal_eqsel_ai(temp);
if (prob >= 0) {
result *= prob;
+ } else {
+ result *= cal_eqsel(temp);
}
} else {
result *= cal_eqsel(temp);
diff --git a/src/gausskernel/optimizer/path/indxpath.cpp b/src/gausskernel/optimizer/path/indxpath.cpp
index 053070cc7..78bb36897 100755
--- a/src/gausskernel/optimizer/path/indxpath.cpp
+++ b/src/gausskernel/optimizer/path/indxpath.cpp
@@ -45,6 +45,8 @@
#include "utils/pg_locale.h"
#include "utils/selfuncs.h"
#include "optimizer/gplanmgr.h"
+#include "instruments/instr_statement.h"
+#include "utils/expr_distinct.h"
#define IsBooleanOpfamily(opfamily) ((opfamily) == BOOL_BTREE_FAM_OID || \
(opfamily) == BOOL_HASH_FAM_OID || (opfamily) == BOOL_UBTREE_FAM_OID)
@@ -154,6 +156,8 @@ static int get_index_column_prefix_lenth(IndexOptInfo *index, int indexcol);
static Const* prefix_const_node(Const* con, int prefix_len, Oid datatype);
static RestrictInfo* rewrite_opclause_for_prefixkey(
RestrictInfo *rinfo, IndexOptInfo* index, Oid opfamily, int prefix_len);
+void check_report_cause_type(FuncExpr *funcExpr, int indkey);
+Node* match_first_var_to_indkey(Node* node, int indkey);
/*
* create_index_paths
@@ -3106,6 +3110,12 @@ bool match_index_to_operand(Node* operand, int indexcol, IndexOptInfo* index, bo
return true;
}
+ /*
+ * if FuncExpr, check whether there are risks caused by type conversion.
+ */
+ if (IsA(operand, FuncExpr))
+ check_report_cause_type((FuncExpr*)operand, indkey);
+
return false;
}
@@ -4309,5 +4319,51 @@ static RestrictInfo* rewrite_opclause_for_prefixkey(RestrictInfo *rinfo, IndexOp
return make_simple_restrictinfo(newop);
}
+/*
+ * Check whether there are risks caused by type conversion.
+ * If yes, report cause_type.
+ */
+void check_report_cause_type(FuncExpr* funcExpr, int indkey)
+{
+ Node* varNode = NULL;
+ ListCell* argsCell = NULL;
+ if (list_length(funcExpr->args) != 1) {
+ return;
+ }
+
+ argsCell = list_head(funcExpr->args);
+ Node* node = (Node*)lfirst(argsCell);
+ if (IsA(node, Var)) {
+ varNode = node;
+ } else if (IsA(node, FuncExpr)) {
+ varNode = match_first_var_to_indkey(node, indkey);
+ }
+ /* Type conversion in g_typeCastFuncOids with only one parameter is supported. */
+ if (IsFunctionTransferNumDistinct(funcExpr) && varNode != NULL && IsA(varNode, Var) &&
+ indkey == ((Var*)varNode)->varattno) {
+ instr_stmt_report_cause_type(NUM_F_TYPECASTING);
+ }
+}
+/*
+ * return the first var that matches the index column
+ * return NULL if not exist
+ */
+Node* match_first_var_to_indkey(Node* node, int indkey)
+{
+ Node* lastNode = NULL;
+
+ List* varList = pull_var_clause(node, PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS, PVC_RECURSE_SPECIAL_EXPR);
+ if (varList != NULL) {
+ ListCell* var_cell = NULL;
+ foreach (var_cell, varList) {
+ Node* var = (Node*)lfirst(var_cell);
+ if (indkey == ((Var*)var)->varattno) {
+ lastNode = var;
+ break;
+ }
+ }
+ }
+ return lastNode;
+}
diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp
index 6c3a8eb8f..ea5a056f8 100755
--- a/src/gausskernel/optimizer/plan/createplan.cpp
+++ b/src/gausskernel/optimizer/plan/createplan.cpp
@@ -165,7 +165,7 @@ static BitmapOr* make_bitmap_or(List* bitmapplans);
static NestLoop* make_nestloop(List* tlist, List* joinclauses, List* otherclauses, List* nestParams, Plan* lefttree,
Plan* righttree, JoinType jointype);
static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclauses, List* hashclauses, Plan* lefttree,
- Plan* righttree, JoinType jointype);
+ Plan* righttree, JoinType jointype, List *hashcollations);
static Hash* make_hash(
Plan* lefttree, Oid skewTable, AttrNumber skewColumn, bool skewInherit, Oid skewColType, int32 skewColTypmod);
static MergeJoin* make_mergejoin(List* tlist, List* joinclauses, List* otherclauses, List* mergeclauses,
@@ -1507,6 +1507,17 @@ static Plan* create_unique_plan(PlannerInfo* root, UniquePath* best_path)
AttrNumber* groupColIdx = NULL;
int groupColPos;
ListCell* l = NULL;
+ Oid* groupCollations;
+
+ /* unique plan may push the expr to subplan, if subplan is cstorescan,
+ * and expr in unique plan have some vector engine not support expr, it may cause error.
+ */
+ if (best_path->umethod != UNIQUE_PATH_NOOP && best_path->subpath->pathtype == T_CStoreScan &&
+ vector_engine_unsupport_expression_walker((Node*)best_path->uniq_exprs)) {
+ Path* resPath = (Path*)create_result_path(root, best_path->subpath->parent, NULL, best_path->subpath);
+ ((ResultPath*)resPath)->ispulledupqual = true;
+ best_path->subpath = resPath;
+ }
subplan = create_plan_recurse(root, best_path->subpath);
@@ -1643,6 +1654,26 @@ static Plan* create_unique_plan(PlannerInfo* root, UniquePath* best_path)
groupOperators[groupColPos++] = eq_oper;
}
+ newtlist = subplan->targetlist;
+ numGroupCols = list_length(uniq_exprs);
+ groupCollations = (Oid*)palloc(numGroupCols * sizeof(Oid));
+
+ groupColPos = 0;
+ foreach(l, uniq_exprs)
+ {
+ Node* uniqexpr = (Node*)lfirst(l);
+ TargetEntry *tle = NULL;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (tle == NULL) /* shouldn't happen */
+ ereport(ERROR, (errmodule(MOD_OPT),
+ errcode(ERRCODE_OPTIMIZER_INCONSISTENT_STATE),
+ (errmsg("failed to find unique expression in subplan tlist"))));
+
+ groupCollations[groupColPos] = exprCollation((Node*) tle->expr);
+ groupColPos++;
+ }
+
/*
* Since the Agg node is going to project anyway, we can give it the
* minimum output tlist, without any stuff we might have added to the
@@ -1665,6 +1696,7 @@ static Plan* create_unique_plan(PlannerInfo* root, UniquePath* best_path)
numGroupCols,
groupColIdx,
groupOperators,
+ groupCollations,
numGroups[0],
subplan,
NULL,
@@ -4599,6 +4631,7 @@ static HashJoin* create_hashjoin_plan(PlannerInfo* root, HashPath* best_path, Pl
HashJoin* join_plan = NULL;
Hash* hash_plan = NULL;
Relids left_relids = NULL;
+ List *hashcollations = NIL;
/* Sort join qual clauses into best execution order */
joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
@@ -4674,12 +4707,19 @@ static HashJoin* create_hashjoin_plan(PlannerInfo* root, HashPath* best_path, Pl
}
}
+ ListCell *lc;
+ foreach(lc, hashclauses)
+ {
+ OpExpr *hclause = lfirst_node(OpExpr, lc);
+ hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
+ }
+
/*
* Build the hash node and hash join node.
*/
hash_plan = make_hash(inner_plan, skewTable, skewColumn, skewInherit, skewColType, skewColTypmod);
- join_plan = make_hashjoin(
- tlist, joinclauses, otherclauses, hashclauses, outer_plan, (Plan*)hash_plan, best_path->jpath.jointype);
+ join_plan = make_hashjoin(tlist, joinclauses, otherclauses, hashclauses, outer_plan, (Plan*)hash_plan,
+ best_path->jpath.jointype, hashcollations);
/*
* @hdfs
@@ -6385,7 +6425,7 @@ HashJoin* create_direct_hashjoin(
}
hash_plan = (Plan*)make_hash(innerPlan, skewTable, skewColumn, skewInherit, skewColType, skewColTypmod);
- join_plan = make_hashjoin(tlist, joinClauses, NIL, hashclauses, outerPlan, hash_plan, joinType);
+ join_plan = make_hashjoin(tlist, joinClauses, NIL, hashclauses, outerPlan, hash_plan, joinType, NULL);
/* estimate the mem_info for join_plan, refered to the function initial_cost_hashjoin */
estimate_directHashjoin_Cost(root, hashclauses, outerPlan, hash_plan, join_plan);
@@ -6597,7 +6637,7 @@ Plan* create_direct_righttree(
}
static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclauses, List* hashclauses, Plan* lefttree,
- Plan* righttree, JoinType jointype)
+ Plan* righttree, JoinType jointype, List *hashcollations)
{
HashJoin* node = makeNode(HashJoin);
Plan* plan = &node->join.plan;
@@ -6610,6 +6650,7 @@ static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclause
node->hashclauses = hashclauses;
node->join.jointype = jointype;
node->join.joinqual = joinclauses;
+ node->hash_collations = hashcollations;
return node;
}
@@ -7308,9 +7349,9 @@ void adjust_all_pathkeys_by_agg_tlist(PlannerInfo* root, List* tlist, WindowList
}
Agg* make_agg(PlannerInfo* root, List* tlist, List* qual, AggStrategy aggstrategy, const AggClauseCosts* aggcosts,
- int numGroupCols, AttrNumber* grpColIdx, Oid* grpOperators, long numGroups, Plan* lefttree, WindowLists* wflists,
- bool need_stream, bool trans_agg, List* groupingSets, Size hash_entry_size, bool add_width,
- AggOrientation agg_orientation, bool unique_check)
+ int numGroupCols, AttrNumber* grpColIdx, Oid* grpOperators, Oid* grpCollations, long numGroups,
+ Plan* lefttree, WindowLists* wflists, bool need_stream, bool trans_agg, List* groupingSets,
+ Size hash_entry_size, bool add_width, AggOrientation agg_orientation, bool unique_check)
{
Agg* node = makeNode(Agg);
Plan* plan = &node->plan;
@@ -7376,6 +7417,7 @@ Agg* make_agg(PlannerInfo* root, List* tlist, List* qual, AggStrategy aggstrateg
node->numGroups = numGroups;
node->skew_optimize = SKEW_RES_NONE;
node->unique_check = unique_check && root->parse->unique_check;
+ node->grp_collations = grpCollations;
#ifdef STREAMPLAN
inherit_plan_locator_info((Plan*)node, lefttree);
@@ -7465,7 +7507,7 @@ Agg* make_agg(PlannerInfo* root, List* tlist, List* qual, AggStrategy aggstrateg
WindowAgg* make_windowagg(PlannerInfo* root, List* tlist, List* windowFuncs, Index winref, int partNumCols,
AttrNumber* partColIdx, Oid* partOperators, int ordNumCols, AttrNumber* ordColIdx, Oid* ordOperators,
- int frameOptions, Node* startOffset, Node* endOffset, Plan* lefttree)
+ int frameOptions, Node* startOffset, Node* endOffset, Plan* lefttree, Oid *partCollations, Oid *ordCollations)
{
WindowAgg* node = makeNode(WindowAgg);
Plan* plan = &node->plan;
@@ -7481,6 +7523,8 @@ WindowAgg* make_windowagg(PlannerInfo* root, List* tlist, List* windowFuncs, Ind
node->frameOptions = frameOptions;
node->startOffset = startOffset;
node->endOffset = endOffset;
+ node->part_collations = partCollations;
+ node->ord_collations = ordCollations;
#ifdef STREAMPLAN
inherit_plan_locator_info((Plan*)node, lefttree);
@@ -7517,7 +7561,7 @@ WindowAgg* make_windowagg(PlannerInfo* root, List* tlist, List* windowFuncs, Ind
}
Group* make_group(PlannerInfo* root, List* tlist, List* qual, int numGroupCols, AttrNumber* grpColIdx,
- Oid* grpOperators, double numGroups, Plan* lefttree)
+ Oid* grpOperators, double numGroups, Plan* lefttree, Oid* grpCollations)
{
Group* node = makeNode(Group);
Plan* plan = &node->plan;
@@ -7531,6 +7575,7 @@ Group* make_group(PlannerInfo* root, List* tlist, List* qual, int numGroupCols,
node->numCols = numGroupCols;
node->grpColIdx = grpColIdx;
node->grpOperators = grpOperators;
+ node->grp_collations = grpCollations;
#ifdef STREAMPLAN
inherit_plan_locator_info((Plan*)node, lefttree);
@@ -7594,6 +7639,7 @@ Unique* make_unique(Plan* lefttree, List* distinctList)
int keyno = 0;
AttrNumber* uniqColIdx = NULL;
Oid* uniqOperators = NULL;
+ Oid* uniqCollations = NULL;
ListCell* slitem = NULL;
#ifdef STREAMPLAN
@@ -7627,6 +7673,7 @@ Unique* make_unique(Plan* lefttree, List* distinctList)
Assert(numCols > 0);
uniqColIdx = (AttrNumber*)palloc(sizeof(AttrNumber) * numCols);
uniqOperators = (Oid*)palloc(sizeof(Oid) * numCols);
+ uniqCollations = (Oid*)palloc(sizeof(Oid) * numCols);
foreach (slitem, distinctList) {
SortGroupClause* sortcl = (SortGroupClause*)lfirst(slitem);
@@ -7634,6 +7681,7 @@ Unique* make_unique(Plan* lefttree, List* distinctList)
uniqColIdx[keyno] = tle->resno;
uniqOperators[keyno] = sortcl->eqop;
+ uniqCollations[keyno] = exprCollation((Node *) tle->expr);
Assert(OidIsValid(uniqOperators[keyno]));
keyno++;
}
@@ -7641,6 +7689,7 @@ Unique* make_unique(Plan* lefttree, List* distinctList)
node->numCols = numCols;
node->uniqColIdx = uniqColIdx;
node->uniqOperators = uniqOperators;
+ node->uniq_collations = uniqCollations;
return node;
}
@@ -7659,6 +7708,7 @@ SetOp* make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan* lefttree, List* di
int keyno = 0;
AttrNumber* dupColIdx = NULL;
Oid* dupOperators = NULL;
+ Oid* dupCollations = NULL;
ListCell* slitem = NULL;
#ifdef STREAMPLAN
@@ -7688,6 +7738,7 @@ SetOp* make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan* lefttree, List* di
Assert(numCols > 0);
dupColIdx = (AttrNumber*)palloc(sizeof(AttrNumber) * numCols);
dupOperators = (Oid*)palloc(sizeof(Oid) * numCols);
+ dupCollations = (Oid*)palloc(sizeof(Oid) * numCols);
foreach (slitem, distinctList) {
SortGroupClause* sortcl = (SortGroupClause*)lfirst(slitem);
@@ -7695,6 +7746,7 @@ SetOp* make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan* lefttree, List* di
dupColIdx[keyno] = tle->resno;
dupOperators[keyno] = sortcl->eqop;
+ dupCollations[keyno] = exprCollation((Node*) tle->expr);
Assert(OidIsValid(dupOperators[keyno]));
keyno++;
}
@@ -7704,6 +7756,7 @@ SetOp* make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan* lefttree, List* di
node->numCols = numCols;
node->dupColIdx = dupColIdx;
node->dupOperators = dupOperators;
+ node->dup_collations = dupCollations;
node->flagColIdx = flagColIdx;
node->firstFlag = firstFlag;
node->numGroups = numGroups;
diff --git a/src/gausskernel/optimizer/plan/planner.cpp b/src/gausskernel/optimizer/plan/planner.cpp
index 404351989..f281f0a86 100755
--- a/src/gausskernel/optimizer/plan/planner.cpp
+++ b/src/gausskernel/optimizer/plan/planner.cpp
@@ -88,6 +88,7 @@
#include "optimizer/stream_remove.h"
#include "executor/node/nodeModifyTable.h"
#include "optimizer/gplanmgr.h"
+#include "instruments/instr_statement.h"
#ifndef MIN
#define MIN(A, B) ((B) < (A) ? (B) : (A))
@@ -148,13 +149,14 @@ static bool choose_hashed_distinct(PlannerInfo* root, double tuple_fraction, dou
int path_width, Cost cheapest_startup_cost, Cost cheapest_total_cost, Distribution* cheapest_distribution,
Cost sorted_startup_cost, Cost sorted_total_cost, Distribution* sorted_distribution, List* sorted_pathkeys,
double dNumDistinctRows, Size hashentrysize);
-static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber** groupColIdx, bool* need_tlist_eval);
+static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber** groupColIdx, bool* need_tlist_eval,
+ Oid** gruopCollations);
static void locate_grouping_columns(PlannerInfo* root, List* tlist, List* sub_tlist, AttrNumber* groupColIdx);
static List* postprocess_setop_tlist(List* new_tlist, List* orig_tlist);
static List* make_windowInputTargetList(PlannerInfo* root, List* tlist, List* activeWindows);
static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List* tlist, int numSortCols,
AttrNumber* sortColIdx, int* partNumCols, AttrNumber** partColIdx, Oid** partOperators, int* ordNumCols,
- AttrNumber** ordColIdx, Oid** ordOperators);
+ AttrNumber** ordColIdx, Oid** ordOperators, Oid** partCollations, Oid** ordCollations);
static List* add_groupingIdExpr_to_tlist(List* tlist);
static List* get_group_expr(List* sortrefList, List* tlist);
static void build_grouping_itst_keys(PlannerInfo* root, List* active_windows);
@@ -891,7 +893,11 @@ PlannedStmt* standard_planner(Query* parse, int cursorOptions, ParamListInfo bou
result->noanalyze_rellist = (List*)copyObject(t_thrd.postgres_cxt.g_NoAnalyzeRelNameList);
result->hasIgnore = parse->hasIgnore;
- if (ENABLE_CACHEDPLAN_MGR) {
+ if (ENABLE_CACHEDPLAN_MGR && u_sess->pcache_cxt.is_plan_exploration) {
+ ereport(DEBUG2,
+ (errmodule(MOD_OPT),
+ errmsg(" ThreadId: %d: append PlannerInfo to session. PlannerInfo's memorycxt is \"%s\" and parent is \"%s\".",
+ gettid(), root->planner_cxt->name, root->planner_cxt->parent->name)));
u_sess->pcache_cxt.explored_plan_info = root;
}
@@ -909,6 +915,8 @@ PlannedStmt* standard_planner(Query* parse, int cursorOptions, ParamListInfo bou
result->query_string = NULL;
result->MaxBloomFilterNum = root->glob->bloomfilter.bloomfilter_index + 1;
+ if (instr_stmt_plan_need_report_cause_type())
+ result->cause_type = instr_stmt_plan_get_cause_type();
/* record which suplan belongs to which thread */
#ifdef ENABLE_MULTIPLE_NODES
if (IS_STREAM_PLAN) {
@@ -1698,8 +1706,11 @@ Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_ro
* Note that both havingQual and parse->jointree->quals are in
* implicitly-ANDed-list form at this point, even though they are declared
* as Node *.
+ * Also, HAVING quals should not be transfered into WHERE clauses, since
+ * the rownum is expected to be assigned to tuples before filtered by
+ * other HAVING quals.
*/
- if (!parse->unique_check) {
+ if (!parse->unique_check && !expression_contains_rownum((Node*)parse->havingQual)) {
newHaving = NIL;
foreach(l, (List *) parse->havingQual) {
Node *havingclause = (Node *)lfirst(l);
@@ -2742,6 +2753,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
List* sub_tlist = NIL;
double sub_limit_tuples;
AttrNumber* groupColIdx = NULL;
+ Oid* groupCollation = NULL;
bool need_try_fdw_plan = false;
bool need_tlist_eval = true;
Path* cheapest_path = NULL;
@@ -2892,7 +2904,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
* Generate appropriate target list for subplan; may be different from
* tlist if grouping or aggregation is needed.
*/
- sub_tlist = make_subplanTargetList(root, tlist, &groupColIdx, &need_tlist_eval);
+ sub_tlist = make_subplanTargetList(root, tlist, &groupColIdx, &need_tlist_eval, &groupCollation);
/* Set matching and superset key for planner info of current query level */
if (IS_STREAM_PLAN) {
@@ -3452,6 +3464,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
numGroupCols,
groupColIdx,
extract_grouping_ops(parse->groupClause),
+ groupCollation,
localNumGroup,
result_plan,
wflists,
@@ -3595,6 +3608,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
numGroupCols,
groupColIdx,
extract_grouping_ops(parse->groupClause),
+ extract_grouping_collations(parse->groupClause, tlist),
localNumGroup,
result_plan,
wflists,
@@ -3725,6 +3739,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
numGroupCols,
groupColIdx,
extract_grouping_ops(parse->groupClause),
+ extract_grouping_collations(parse->groupClause, tlist),
localNumGroup,
result_plan,
wflists,
@@ -3811,7 +3826,9 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
groupColIdx,
extract_grouping_ops(parse->groupClause),
dNumGroups[0],
- result_plan);
+ result_plan,
+ extract_grouping_collations(parse->groupClause,
+ needs_stream && need_tlist_eval ? result_plan->targetlist : tlist));
next_is_second_level_group = true;
}
@@ -3962,6 +3979,8 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
int ordNumCols;
AttrNumber* ordColIdx = NULL;
Oid* ordOperators = NULL;
+ Oid* partCollations = NULL;
+ Oid* ordCollations = NULL;
window_pathkeys = make_pathkeys_for_window(root, wc, tlist, true);
@@ -4001,7 +4020,9 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
&partOperators,
&ordNumCols,
&ordColIdx,
- &ordOperators);
+ &ordOperators,
+ &partCollations,
+ &ordCollations);
} else {
/* empty window specification, nothing to sort */
current_pathkeys = NIL;
@@ -4035,7 +4056,9 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
wc->frameOptions,
wc->startOffset,
wc->endOffset,
- result_plan);
+ result_plan,
+ partCollations,
+ ordCollations);
#ifdef STREAMPLAN
if (IS_STREAM_PLAN && is_execute_on_datanodes(result_plan) && !is_replicated_plan(result_plan)) {
result_plan = (Plan*)mark_windowagg_stream(root, result_plan, tlist, wc, current_pathkeys, wflists);
@@ -4156,6 +4179,7 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction)
list_length(parse->distinctClause),
extract_grouping_cols(parse->distinctClause, result_plan->targetlist),
extract_grouping_ops(parse->distinctClause),
+ extract_grouping_collations(parse->distinctClause, result_plan->targetlist),
(long)Min(numDistinctRows[0], (double)LONG_MAX),
result_plan,
NULL,
@@ -4616,6 +4640,7 @@ static Plan* build_grouping_chain(PlannerInfo* root, Query* parse, List** tlist,
list_length((List*)linitial(gsets)),
new_grpColIdx,
extract_grouping_ops(groupClause),
+ extract_grouping_collations(parse->groupClause, *tlist),
numGroups,
sort_plan,
wflists,
@@ -4657,6 +4682,7 @@ static Plan* build_grouping_chain(PlannerInfo* root, Query* parse, List** tlist,
numGroupCols,
top_grpColIdx,
extract_grouping_ops(groupClause),
+ extract_grouping_collations(parse->groupClause, newTlist),
numGroups,
result_plan,
wflists,
@@ -6862,7 +6888,8 @@ static bool choose_hashed_distinct(PlannerInfo* root, double tuple_fraction, dou
*
* The result is the targetlist to be passed to query_planner.
*/
-static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber** groupColIdx, bool* need_tlist_eval)
+static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber** groupColIdx,
+ bool* need_tlist_eval, Oid** gruopCollations)
{
Query* parse = root->parse;
List* sub_tlist = NIL;
@@ -6871,6 +6898,7 @@ static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber**
int numCols;
*groupColIdx = NULL;
+ *gruopCollations = NULL;
/*
* If we're not grouping or aggregating, there's nothing to do here;
@@ -6912,6 +6940,14 @@ static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber**
grpColIdx = (AttrNumber*)palloc0(sizeof(AttrNumber) * numCols);
*groupColIdx = grpColIdx;
+ Oid* grpCollations = NULL;
+ if (parse->groupingSets) {
+ grpCollations = (Oid *)palloc0(sizeof(Oid) * (numCols + 1));
+ } else {
+ grpCollations = (Oid *)palloc0(sizeof(Oid) * numCols);
+ }
+ *gruopCollations = grpCollations;
+
foreach (tl, sub_tlist) {
TargetEntry* tle = (TargetEntry*)lfirst(tl);
int colno;
@@ -6936,6 +6972,7 @@ static List* make_subplanTargetList(PlannerInfo* root, List* tlist, AttrNumber**
"invalid grpColIdx item when adding a grouping column to the result tlist."); /* no dups expected */
grpColIdx[colno] = newtle->resno;
+ grpCollations[colno] = exprCollation((Node *) newtle->expr);
if (!(newtle->expr && IsA(newtle->expr, Var)))
*need_tlist_eval = true; /* tlist contains non Vars */
}
@@ -7403,7 +7440,7 @@ List* make_pathkeys_for_window(PlannerInfo* root, WindowClause* wc, List* tlist,
*/
static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List* tlist, int numSortCols,
AttrNumber* sortColIdx, int* partNumCols, AttrNumber** partColIdx, Oid** partOperators, int* ordNumCols,
- AttrNumber** ordColIdx, Oid** ordOperators)
+ AttrNumber** ordColIdx, Oid** ordOperators, Oid** partCollations, Oid** ordCollations)
{
int numPart = list_length(wc->partitionClause);
int numOrder = list_length(wc->orderClause);
@@ -7416,6 +7453,8 @@ static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List
*ordNumCols = numOrder;
*ordColIdx = sortColIdx + numPart;
*ordOperators = extract_grouping_ops(wc->orderClause);
+ *partCollations = extract_grouping_collations(wc->partitionClause, tlist);
+ *ordCollations = extract_grouping_collations(wc->orderClause, tlist);
} else {
List* sortclauses = NIL;
List* pathkeys = NIL;
@@ -7429,12 +7468,15 @@ static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List
*ordNumCols = 0;
*ordColIdx = (AttrNumber*)palloc(numOrder * sizeof(AttrNumber));
*ordOperators = (Oid*)palloc(numOrder * sizeof(Oid));
+ *partCollations = (Oid*)palloc(numPart * sizeof(Oid));
+ *ordCollations = (Oid*)palloc(numOrder * sizeof(Oid));
sortclauses = NIL;
pathkeys = NIL;
scidx = 0;
foreach (lc, wc->partitionClause) {
SortGroupClause* sgc = (SortGroupClause*)lfirst(lc);
List* new_pathkeys = NIL;
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, tlist);
sortclauses = lappend(sortclauses, sgc);
new_pathkeys = make_pathkeys_for_sortclauses(root, sortclauses, tlist, true);
@@ -7442,6 +7484,7 @@ static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List
/* this sort clause is actually significant */
(*partColIdx)[*partNumCols] = sortColIdx[scidx++];
(*partOperators)[*partNumCols] = sgc->eqop;
+ (*partCollations)[*partNumCols] = exprCollation((Node*)tle->expr);
(*partNumCols)++;
pathkeys = new_pathkeys;
}
@@ -7449,6 +7492,7 @@ static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List
foreach (lc, wc->orderClause) {
SortGroupClause* sgc = (SortGroupClause*)lfirst(lc);
List* new_pathkeys = NIL;
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, tlist);
sortclauses = lappend(sortclauses, sgc);
new_pathkeys = make_pathkeys_for_sortclauses(root, sortclauses, tlist, true);
@@ -7456,6 +7500,7 @@ static void get_column_info_for_window(PlannerInfo* root, WindowClause* wc, List
/* this sort clause is actually significant */
(*ordColIdx)[*ordNumCols] = sortColIdx[scidx++];
(*ordOperators)[*ordNumCols] = sgc->eqop;
+ (*ordCollations)[*ordNumCols] = exprCollation((Node*) tle->expr);
(*ordNumCols)++;
pathkeys = new_pathkeys;
}
@@ -11095,6 +11140,7 @@ static Plan* generate_hashagg_plan(PlannerInfo* root, Plan* plan, List* final_li
numGroupCols,
local_groupColIdx,
groupColOps,
+ NULL,
final_groups,
plan,
wflists,
@@ -11471,6 +11517,7 @@ static Plan* generate_hashagg_plan(PlannerInfo* root, Plan* plan, List* final_li
numGroupCols,
local_groupColIdx,
groupColOps,
+ NULL,
final_groups,
plan,
wflists,
@@ -11511,6 +11558,7 @@ static Plan* generate_hashagg_plan(PlannerInfo* root, Plan* plan, List* final_li
numGroupCols,
local_groupColIdx,
groupColOps,
+ NULL,
(long)Min(local_distinct, (double)LONG_MAX),
plan,
wflists,
@@ -11586,6 +11634,7 @@ static Plan* generate_hashagg_plan(PlannerInfo* root, Plan* plan, List* final_li
numGroupCols,
local_groupColIdx,
groupColOps,
+ NULL,
rows,
plan,
wflists,
@@ -12002,6 +12051,7 @@ static Plan* get_count_distinct_partial_plan(PlannerInfo* root, Plan* result_pla
0,
NULL,
NULL,
+ NULL,
(long)Min(numGroups[0], (double)LONG_MAX),
result_plan,
wflists,
@@ -13219,7 +13269,7 @@ static void init_optimizer_context(PlannerGlobal* glob)
static void deinit_optimizer_context(PlannerGlobal* glob)
{
- if (IS_NEED_FREE_MEMORY_CONTEXT(glob->plannerContext->plannerMemContext)) {
+ if (IS_NEED_FREE_MEMORY_CONTEXT(glob->plannerContext->plannerMemContext) && !u_sess->pcache_cxt.is_plan_exploration) {
MemoryContextDelete(glob->plannerContext->plannerMemContext);
glob->plannerContext->plannerMemContext = NULL;
glob->plannerContext->dataSkewMemContext = NULL;
diff --git a/src/gausskernel/optimizer/plan/streamwalker.cpp b/src/gausskernel/optimizer/plan/streamwalker.cpp
old mode 100644
new mode 100755
index 5766ed0c1..ee5c8be2e
--- a/src/gausskernel/optimizer/plan/streamwalker.cpp
+++ b/src/gausskernel/optimizer/plan/streamwalker.cpp
@@ -369,7 +369,8 @@ static void stream_walker_query_jointree(Query* query, shipping_context *cxt)
expression_tree_walker((Node*)query->jointree->fromlist, (bool (*)())stream_walker, (void *)cxt)) {
cxt->current_shippable = false;
}
- if (query->jointree != NULL && stream_walker((Node*)query->jointree->quals, (void *)cxt)) {
+ if (query->jointree != NULL &&
+ contain_unsupport_expression((Node*)query->jointree->quals, (void *)cxt)) {
cxt->current_shippable = false;
}
}
@@ -915,6 +916,18 @@ static bool contain_unsupport_expression(Node* expr, void* context)
}
}
} break;
+ case T_OpExpr: {
+ OpExpr* op = (OpExpr*)expr;
+ if (contain_unsupport_expression((Node*)op->args, context)) {
+ cxt->current_shippable = false;
+ }
+ } break;
+ case T_BoolExpr: {
+ BoolExpr* be = (BoolExpr*)expr;
+ if (contain_unsupport_expression((Node*)be->args, context)) {
+ cxt->current_shippable = false;
+ }
+ } break;
case T_FuncExpr: {
FuncExpr* func = (FuncExpr*)expr;
if (pgxc_is_shippable_func_contain_any(func->funcid)) {
diff --git a/src/gausskernel/optimizer/plan/subselect.cpp b/src/gausskernel/optimizer/plan/subselect.cpp
index ceb69fe27..bc92b3673 100644
--- a/src/gausskernel/optimizer/plan/subselect.cpp
+++ b/src/gausskernel/optimizer/plan/subselect.cpp
@@ -5996,6 +5996,33 @@ static void add_targetlist_to_group(Query* query)
}
}
+
+/**
+ * @Description remove the targetentry in targetlist whose resjunk is true.
+ *
+ * @in query - query.
+ */
+static void remove_target_not_in_final(Query *query)
+{
+
+ List *old_targetList = NIL;
+ List *new_targetList = NIL;
+
+ old_targetList = query->targetList;
+ query->targetList = NIL;
+ ListCell *lc = NULL;
+
+
+ foreach (lc, old_targetList) {
+ TargetEntry *tg = (TargetEntry *)lfirst(lc);
+ if (tg->resjunk)
+ continue;
+ new_targetList = lappend(new_targetList, tg);
+ }
+ query->targetList = new_targetList;
+}
+
+
/*
* @Description: Convert this any sublink to left join.
* @in root - Per-query information for planning/optimization.
@@ -6047,6 +6074,9 @@ void convert_ORANY_to_join(
/* Judge this quals if only include 'and' and 'equal' oper. */
if (equal_expr(test_quals)) {
+ /* remove the target entry which will not display in the final target list . */
+ remove_target_not_in_final(sub_select);
+
/* add all targetlist to query's group clsuse. */
add_targetlist_to_group(sub_select);
diff --git a/src/gausskernel/optimizer/prep/prepjointree.cpp b/src/gausskernel/optimizer/prep/prepjointree.cpp
index 82d978dfc..35375e81f 100755
--- a/src/gausskernel/optimizer/prep/prepjointree.cpp
+++ b/src/gausskernel/optimizer/prep/prepjointree.cpp
@@ -1894,6 +1894,8 @@ static bool is_safe_append_member(Query* subquery)
}
if (!IsA(jtnode, RangeTblRef))
return false;
+ if (subquery->hintState)
+ return false;
return true;
}
diff --git a/src/gausskernel/optimizer/prep/preprownum.cpp b/src/gausskernel/optimizer/prep/preprownum.cpp
index 1af3bc7e8..bcb28bc01 100755
--- a/src/gausskernel/optimizer/prep/preprownum.cpp
+++ b/src/gausskernel/optimizer/prep/preprownum.cpp
@@ -53,8 +53,8 @@ void preprocess_rownum(PlannerInfo *root, Query *parse)
if (quals == NULL) {
return;
}
- /* If it includes {aggregation function} or {order by} or {group by}, can not be rewrited */
- if (parse->hasAggs || (parse->sortClause != NULL) || (parse->groupClause != NULL)) {
+ /* If it includes {aggregation function} or {order by} or {group by} or {offset}, can not be rewrited */
+ if (parse->hasAggs || (parse->sortClause != NULL) || (parse->groupClause != NULL) || (parse->limitOffset != NULL)) {
return;
}
if (parse->limitCount != NULL) {
diff --git a/src/gausskernel/optimizer/prep/prepunion.cpp b/src/gausskernel/optimizer/prep/prepunion.cpp
index bc01f3bfd..9a255c5b0 100644
--- a/src/gausskernel/optimizer/prep/prepunion.cpp
+++ b/src/gausskernel/optimizer/prep/prepunion.cpp
@@ -969,6 +969,7 @@ static Plan* make_union_unique(
list_length(groupList),
extract_grouping_cols(groupList, plan->targetlist),
extract_grouping_ops(groupList),
+ extract_grouping_collations(groupList, plan->targetlist),
numGroups[0],
plan,
NULL,
diff --git a/src/gausskernel/optimizer/rewrite/rewriteDefine.cpp b/src/gausskernel/optimizer/rewrite/rewriteDefine.cpp
index b58949057..e156930d5 100644
--- a/src/gausskernel/optimizer/rewrite/rewriteDefine.cpp
+++ b/src/gausskernel/optimizer/rewrite/rewriteDefine.cpp
@@ -609,6 +609,9 @@ void DefineQueryRewrite(
replaces[Anum_pg_class_relfrozenxid64 - 1] = true;
values[Anum_pg_class_relfrozenxid64 - 1] = TransactionIdGetDatum(InvalidTransactionId);
+ replaces[Anum_pg_class_reloptions - 1] = true;
+ nulls[Anum_pg_class_reloptions - 1] = true;
+
nctup = (HeapTuple) tableam_tops_modify_tuple(classTup, RelationGetDescr(relationRelation), values, nulls, replaces);
simple_heap_update(relationRelation, &nctup->t_self, nctup);
diff --git a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp
index cc5026c36..795c7155a 100644
--- a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp
+++ b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp
@@ -34,6 +34,7 @@
#include "parser/parse_coerce.h"
#include "parser/parsetree.h"
#include "parser/parse_merge.h"
+#include "parser/parse_hint.h"
#include "rewrite/rewriteDefine.h"
#include "rewrite/rewriteHandler.h"
#include "rewrite/rewriteManip.h"
@@ -4410,6 +4411,7 @@ char* GetCreateTableStmt(Query* parsetree, CreateTableAsStmt* stmt)
/* Start building a CreateStmt for creating the target table */
CreateStmt* create_stmt = makeNode(CreateStmt);
create_stmt->relation = stmt->into->rel;
+ create_stmt->charset = PG_INVALID_ENCODING;
IntoClause* into = stmt->into;
List* tableElts = NIL;
@@ -4475,6 +4477,7 @@ char* GetCreateTableStmt(Query* parsetree, CreateTableAsStmt* stmt)
coldef->cooked_default = NULL;
coldef->update_default = NULL;
coldef->constraints = NIL;
+ coldef->collOid = exprCollation((Node*)tle->expr);
/*
* Set typeOid and typemod. The name of the type is derived while
@@ -4482,6 +4485,7 @@ char* GetCreateTableStmt(Query* parsetree, CreateTableAsStmt* stmt)
*/
tpname->typeOid = exprType((Node*)tle->expr);
tpname->typemod = exprTypmod((Node*)tle->expr);
+ tpname->charset = exprCharset((Node*)tle->expr);
/*
* If the column of source relation is encrypted
@@ -4570,6 +4574,31 @@ static bool selectNeedRecovery(Query* query)
return strcasestr(query->sql_statement, "CONNECT") != NULL;
}
+/*
+ * @Description: copy(shallow) hints which needs to be displayed in top.
+* (e.g. pull "select HINT..." to "Insert HINT INTO XXX SELECT HINT..." )
+ * @in src: hint state.
+ * @out dest: hint state.
+ */
+static void _copy_top_HintState(HintState *dest, HintState *src)
+{
+ if (dest == NULL || src == NULL) {
+ return;
+ }
+
+ dest->stream_hint = src->stream_hint;
+ dest->gather_hint = src->gather_hint;
+ dest->cache_plan_hint = src->cache_plan_hint;
+ dest->set_hint = src->set_hint;
+ dest->no_gpc_hint = src->no_gpc_hint;
+ dest->multi_node_hint = src->multi_node_hint;
+ dest->skew_hint = src->skew_hint;
+ dest->predpush_hint = src->predpush_hint;
+ dest->predpush_same_level_hint = src->predpush_same_level_hint;
+ dest->rewrite_hint = src->rewrite_hint;
+ dest->no_expand_hint = src->no_expand_hint;
+}
+
char* GetInsertIntoStmt(CreateTableAsStmt* stmt)
{
/* Get the SELECT query string */
@@ -4602,9 +4631,11 @@ char* GetInsertIntoStmt(CreateTableAsStmt* stmt)
appendStringInfo(cquery, "INSERT ");
- HintState *hintState = ((Query*)stmt->query)->hintState;
- get_hint_string(hintState, cquery);
-
+ HintState *top_hintState = HintStateCreate();
+ _copy_top_HintState(top_hintState, ((Query *)stmt->query)->hintState);
+ get_hint_string(top_hintState, cquery);
+ if (top_hintState)
+ pfree((void *)top_hintState);
if (relation->schemaname)
appendStringInfo(
cquery, " INTO %s.%s", quote_identifier(relation->schemaname), quote_identifier(relation->relname));
@@ -4681,7 +4712,8 @@ List *query_rewrite_set_stmt(Query *query)
List* querytree_list = NIL;
VariableSetStmt *stmt = (VariableSetStmt *)query->utilityStmt;
- if(DB_IS_CMPT(B_FORMAT) && stmt->kind == VAR_SET_VALUE && u_sess->attr.attr_common.enable_set_variable_b_format) {
+ if(DB_IS_CMPT(B_FORMAT) && stmt->kind == VAR_SET_VALUE &&
+ (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) {
List *resultlist = NIL;
ListCell *l = NULL;
@@ -5041,11 +5073,7 @@ List* QueryRewriteSelectIntoVarList(Node *node)
List *resList = NIL;
int res_len = parsetree->targetList->length;
- StringInfo select_sql = makeStringInfo();
- deparse_query(parsetree, select_sql, NIL, false, false);
-
- StmtResult *result = execute_stmt(select_sql->data, true);
- DestroyStringInfo(select_sql);
+ StmtResult *result = execute_select_into_varlist(parsetree);
if (result->tuples == NULL) {
for (int i = 0; i < res_len; i++) {
diff --git a/src/gausskernel/optimizer/rewrite/rewriteSupport.cpp b/src/gausskernel/optimizer/rewrite/rewriteSupport.cpp
index 46db8ef86..0e18746ec 100644
--- a/src/gausskernel/optimizer/rewrite/rewriteSupport.cpp
+++ b/src/gausskernel/optimizer/rewrite/rewriteSupport.cpp
@@ -215,3 +215,31 @@ Oid get_rewrite_oid_without_relid(const char* rulename, Oid* reloid, bool missin
return ruleoid;
}
+
+Oid get_rewrite_relid(Oid ruleid, bool missing_ok)
+{
+ Oid relid;
+ ScanKeyData entry;
+ SysScanDesc scan;
+ HeapTuple rewrite_tup;
+ Relation rewrite_rel = heap_open(RewriteRelationId, AccessShareLock);
+
+ ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ruleid));
+ scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry);
+ rewrite_tup = systable_getnext(scan);
+ if (!HeapTupleIsValid(rewrite_tup)) {
+ if (missing_ok) {
+ systable_endscan(scan);
+ heap_close(rewrite_rel, AccessShareLock);
+ return InvalidOid;
+ }
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("rule \"%u\" does not exist", ruleid)));
+ }
+ Form_pg_rewrite pg_rewrite = (Form_pg_rewrite)GETSTRUCT(rewrite_tup);
+ relid = pg_rewrite->ev_class;
+ systable_endscan(scan);
+ heap_close(rewrite_rel, AccessShareLock);
+ return relid;
+}
\ No newline at end of file
diff --git a/src/gausskernel/optimizer/util/clauses.cpp b/src/gausskernel/optimizer/util/clauses.cpp
index fcd60db22..af9a836a0 100644
--- a/src/gausskernel/optimizer/util/clauses.cpp
+++ b/src/gausskernel/optimizer/util/clauses.cpp
@@ -137,6 +137,7 @@ static bool is_exec_external_param_const(PlannerInfo* root, Node* node);
static bool is_operator_pushdown(Oid opno);
static bool contain_var_unsubstitutable_functions_walker(Node* node, void* context);
static bool is_accurate_estimatable_func(Oid funcId);
+static void optbase_eval_user_var_in_opexpr(List *args);
/*****************************************************************************
* OPERATOR clause functions
@@ -2392,7 +2393,7 @@ Node* estimate_expression_value(PlannerInfo* root, Node* node, EState* estate)
}
/* --------------------
- * simplify_subselect_expression
+ * simplify_select_into_expression
*
* Only for select ... into varlist statement.
*
@@ -2401,7 +2402,7 @@ Node* estimate_expression_value(PlannerInfo* root, Node* node, EState* estate)
* for the following process to calculate the value.
* --------------------
*/
-Node* simplify_subselect_expression(Node* node, ParamListInfo boundParams)
+Node* simplify_select_into_expression(Node* node, ParamListInfo boundParams)
{
eval_const_expressions_context context;
@@ -2431,19 +2432,39 @@ Node* simplify_subselect_expression(Node* node, ParamListInfo boundParams)
foreach (lc, qt->targetList) {
TargetEntry *te = (TargetEntry *)lfirst(lc);
Node *tn = (Node *)te->expr;
- if(IsA(tn, Param) || IsA(tn, OpExpr)) {
- tn = eval_const_expressions_mutator(tn, &context);
- te = makeTargetEntry((Expr *)tn, attno++, te->resname, false);
- newTargetList = lappend(newTargetList, te);
+ if (IsA(tn, OpExpr)) {
+ /* If the user-defined variable has been defined,
+ * then find the existed value.
+ */
+ optbase_eval_user_var_in_opexpr(((OpExpr *)tn)->args);
}
- }
- if (newTargetList != NIL) {
- qt->targetList = newTargetList;
+ tn = eval_const_expressions_mutator(tn, &context);
+ te = makeTargetEntry((Expr *)tn, attno++, te->resname, false);
+ newTargetList = lappend(newTargetList, te);
}
+ qt->targetList = newTargetList;
return node;
}
+static void optbase_eval_user_var_in_opexpr(List *args)
+{
+ ListCell *argcell = NULL;
+ foreach (argcell, args) {
+ if (IsA(lfirst(argcell), UserVar)) {
+ bool found = false;
+ GucUserParamsEntry *entry = (GucUserParamsEntry *)hash_search(
+ u_sess->utils_cxt.set_user_params_htab,
+ ((UserVar *)lfirst(argcell))->name,
+ HASH_ENTER,
+ &found);
+ if (found) {
+ ((UserVar *)lfirst(argcell))->value = (Expr *)copyObject(entry->value);
+ }
+ }
+ }
+}
+
Node* eval_const_expressions_mutator(Node* node, eval_const_expressions_context* context)
{
if (node == NULL)
diff --git a/src/gausskernel/optimizer/util/plancat.cpp b/src/gausskernel/optimizer/util/plancat.cpp
index 995d8e109..89201b938 100755
--- a/src/gausskernel/optimizer/util/plancat.cpp
+++ b/src/gausskernel/optimizer/util/plancat.cpp
@@ -346,21 +346,31 @@ static RelPageType EstimatePartitionIndexPages(Relation relation, Relation index
BlockNumber partIndexPages = 0;
BlockNumber indexrelPartPages = 0; /* analyzed pages, stored in pg_partition->relpages */
int partitionNum = getNumberOfPartitions(relation);
+ int samplePartitions = 0;
foreach (cell, sampledPartitionIds) {
Oid partOid = lfirst_oid(cell);
Oid partIndexOid = getPartitionIndexOid(indexRelation->rd_id, partOid);
- Partition partIndex = partitionOpen(indexRelation, partIndexOid, AccessShareLock);
+ if (!ConditionalLockPartition(indexRelation->rd_id, partIndexOid, AccessShareLock, PARTITION_LOCK)) {
+ continue;
+ }
+ samplePartitions++;
+ Partition partIndex = partitionOpen(indexRelation, partIndexOid, NoLock);
partIndexPages += PartitionGetNumberOfBlocks(indexRelation, partIndex);
indexrelPartPages += partIndex->pd_part->relpages;
partitionClose(indexRelation, partIndex, AccessShareLock);
}
- indexPages = partIndexPages * (partitionNum / sampledPartitionIds->length);
+
+ /* if all partition is locked, just use relpages in index relation */
+ if (samplePartitions == 0) {
+ return indexRelation->rd_rel->relpages;
+ }
+
+ indexPages = partIndexPages * (partitionNum / samplePartitions);
if (!RelationIsSubPartitioned(relation)) {
- indexPages = partIndexPages * (partitionNum / sampledPartitionIds->length);
if (indexrelPartPages > 0 && partitionNum > ESTIMATE_PARTITION_NUMBER &&
partIndexPages < indexRelation->rd_rel->relpages / ESTIMATE_PARTPAGES_THRESHOLD) {
- if (sampledPartitionIds->length > ESTIMATE_PARTITION_NUMBER_THRESHOLD) {
+ if (samplePartitions > ESTIMATE_PARTITION_NUMBER_THRESHOLD) {
indexPages = partIndexPages * (indexRelation->rd_rel->relpages / (double)indexrelPartPages);
} else {
indexPages = indexRelation->rd_rel->relpages;
diff --git a/src/gausskernel/optimizer/util/pruning.cpp b/src/gausskernel/optimizer/util/pruning.cpp
index ffd03e3bb..79c9580b8 100644
--- a/src/gausskernel/optimizer/util/pruning.cpp
+++ b/src/gausskernel/optimizer/util/pruning.cpp
@@ -40,6 +40,7 @@
#include "utils/rel_gs.h"
#include "utils/syscache.h"
#include "utils/relcache.h"
+#include "utils/partitionkey.h"
#include "catalog/pg_partition_fn.h"
static PruningResult* partitionPruningFromBoolExpr(const BoolExpr* expr, PruningContext* context);
@@ -57,6 +58,8 @@ static void cleanPruningBottom(PruningContext* context, PartitionIdentifier* bot
static void cleanPruningTop(PruningContext* context, PartitionIdentifier* topSeq, Const* value);
static void destroyPruningResultList(List* resultList);
static Node* EvalExprValueWhenPruning(PruningContext* context, Node* node);
+static bool PartitionPruningForPartialListBoundary(ListPartitionMap* listMap, PruningResult* pruningResult,
+ Const** keyValues);
static PruningResult* recordEqualFromOpExpr(PartitionType partType,
const OpExpr* expr, PruningContext* context);
@@ -91,17 +94,22 @@ static void CollectSubpartitionPruningResults(PruningResult* resPartition, Relat
}
int partSeq;
- ListCell *cell = NULL;
+ int partitionno;
+ ListCell *cell1 = NULL;
+ ListCell *cell2 = NULL;
Oid partitionOid = InvalidOid;
- foreach (cell, resPartition->ls_rangeSelectedPartitions) {
- partSeq = lfirst_int(cell);
- partitionOid = getPartitionOidFromSequence(current_relation, partSeq, resPartition->partMap);
+ Assert(list_length(resPartition->ls_rangeSelectedPartitions) == list_length(resPartition->ls_selectedPartitionnos));
+ forboth (cell1, resPartition->ls_rangeSelectedPartitions, cell2, resPartition->ls_selectedPartitionnos) {
+ partSeq = lfirst_int(cell1);
+ partitionno = lfirst_int(cell2);
+ partitionOid = getPartitionOidFromSequence(current_relation, partSeq, partitionno);
SubPartitionPruningResult *subPartPruningRes =
PreGetSubPartitionFullPruningResult(current_relation, partitionOid);
if (subPartPruningRes == NULL) {
continue;
}
subPartPruningRes->partSeq = partSeq;
+ subPartPruningRes->partitionno = partitionno;
resPartition->ls_selectedSubPartitions = lappend(resPartition->ls_selectedSubPartitions, subPartPruningRes);
}
}
@@ -123,9 +131,6 @@ PruningResult* GetPartitionInfo(PruningResult* result, EState* estate, Relation
context.rte = NULL;
context.estate = estate;
context.relation = current_relation;
- /* if the partmap which copied before static pruning exists, it will replace the rel->partMap, seen in
- * GetPartitionMap */
- context.partmap = result->partMap;
if (current_relation->partMap->type == PART_TYPE_LIST || current_relation->partMap->type == PART_TYPE_HASH) {
resPartition = partitionEqualPruningWalker(current_relation->partMap->type, result->expr, &context);
@@ -141,7 +146,7 @@ PruningResult* GetPartitionInfo(PruningResult* result, EState* estate, Relation
return resPartition;
}
if (PointerIsValid(resPartition) && !PruningResultIsFull(resPartition))
- generateListFromPruningBM(resPartition);
+ generateListFromPruningBM(resPartition, current_relation->partMap);
CollectSubpartitionPruningResults(resPartition, current_relation);
@@ -168,7 +173,8 @@ PruningResult* copyPruningResult(PruningResult* srcPruningResult)
newpruningInfo->paramArg = (Param *)copyObject(srcPruningResult->paramArg);
newpruningInfo->expr = (Expr *)copyObject(srcPruningResult->expr);
newpruningInfo->exprPart = (OpExpr *)copyObject(srcPruningResult->exprPart);
- newpruningInfo->partMap = (PartitionMap *)CopyPartitionMap(srcPruningResult->partMap);
+ newpruningInfo->isPbeSinlePartition = srcPruningResult->isPbeSinlePartition;
+ newpruningInfo->ls_selectedPartitionnos = (List*)copyObject(srcPruningResult->ls_selectedPartitionnos);
return newpruningInfo;
} else {
@@ -176,13 +182,15 @@ PruningResult* copyPruningResult(PruningResult* srcPruningResult)
}
}
-void generateListFromPruningBM(PruningResult* result)
+void generateListFromPruningBM(PruningResult* result, PartitionMap *partmap)
{
int partitions = 0;
int i = 0;
int tmpcheck = 0;
+ int partitionno = INVALID_PARTITION_NO;
Bitmapset* tmpset = NULL;
result->ls_rangeSelectedPartitions = NULL;
+ result->ls_selectedPartitionnos = NULL;
tmpset = bms_copy(result->bm_rangeSelectedPartitions);
partitions = bms_num_members(result->bm_rangeSelectedPartitions);
@@ -192,6 +200,12 @@ void generateListFromPruningBM(PruningResult* result)
AssertEreport(-1 != tmpcheck, MOD_OPT, "");
if (-1 != tmpcheck) {
result->ls_rangeSelectedPartitions = lappend_int(result->ls_rangeSelectedPartitions, tmpcheck);
+
+ if (PointerIsValid(partmap)) {
+ partitionno = GetPartitionnoFromSequence(partmap, tmpcheck);
+ PARTITIONNO_VALID_ASSERT(partitionno);
+ }
+ result->ls_selectedPartitionnos = lappend_int(result->ls_selectedPartitionnos, partitionno);
}
}
bms_free_ext(tmpset);
@@ -220,11 +234,13 @@ List* restrictInfoListToExprList(List* restrictInfoList)
* return value: non-eliminated partitions.
*/
PruningResult* partitionPruningForRestrictInfo(
- PlannerInfo* root, RangeTblEntry* rte, Relation rel, List* restrictInfoList, PartitionMap *partmap)
+ PlannerInfo* root, RangeTblEntry* rte, Relation rel, List* restrictInfoList)
{
PruningResult* result = NULL;
Expr* expr = NULL;
+ incre_partmap_refcount(rel->partMap);
+
if (0 == list_length(restrictInfoList)) {
result = getFullPruningResult(rel);
} else {
@@ -246,28 +262,25 @@ PruningResult* partitionPruningForRestrictInfo(
}
if (PointerIsValid(result) && !PruningResultIsFull(result)) {
- generateListFromPruningBM(result);
+ generateListFromPruningBM(result, rel->partMap);
}
if (RelationIsSubPartitioned(rel) && PointerIsValid(result)) {
Bitmapset *partIdx = NULL;
List* part_seqs = result->ls_rangeSelectedPartitions;
- ListCell *cell = NULL;
+ List* partitionnos = result->ls_selectedPartitionnos;
+ ListCell *cell1 = NULL;
+ ListCell *cell2 = NULL;
RangeTblEntry *partRte = (RangeTblEntry *)copyObject(rte);
- foreach (cell, part_seqs)
+ forboth(cell1, part_seqs, cell2, partitionnos)
{
- int part_seq = lfirst_int(cell);
- Oid partOid = getPartitionOidFromSequence(rel, part_seq, partmap);
- Partition partTable = tryPartitionOpen(rel, partOid, AccessShareLock);
- if (!partTable) {
- PartStatus currStatus = PartitionGetMetadataStatus(partOid, false);
- if (currStatus != PART_METADATA_INVISIBLE) {
- ReportPartitionOpenError(rel, partOid);
- }
- continue;
- }
+ int part_seq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
+ Oid partOid = getPartitionOidFromSequence(rel, part_seq, partitionno);
+ Partition partTable = PartitionOpenWithPartitionno(rel, partOid, partitionno, NoLock);
Relation partRel = partitionGetRelation(rel, partTable);
+ incre_partmap_refcount(partRel->partMap);
PruningResult *subResult = NULL;
partRte->relid = partOid;
@@ -278,29 +291,33 @@ PruningResult* partitionPruningForRestrictInfo(
}
if (PointerIsValid(subResult) && !PruningResultIsEmpty(subResult)) {
- generateListFromPruningBM(subResult);
+ generateListFromPruningBM(subResult, partRel->partMap);
if (bms_num_members(subResult->bm_rangeSelectedPartitions) > 0) {
SubPartitionPruningResult *subPruning = makeNode(SubPartitionPruningResult);
subPruning->partSeq = part_seq;
+ subPruning->partitionno = partitionno;
subPruning->bm_selectedSubPartitions = subResult->bm_rangeSelectedPartitions;
subPruning->ls_selectedSubPartitions = subResult->ls_rangeSelectedPartitions;
+ subPruning->ls_selectedSubPartitionnos = subResult->ls_selectedPartitionnos;
result->ls_selectedSubPartitions = lappend(result->ls_selectedSubPartitions,
subPruning);
partIdx = bms_add_member(partIdx, part_seq);
}
}
+ decre_partmap_refcount(partRel->partMap);
releaseDummyRelation(&partRel);
- partitionClose(rel, partTable, AccessShareLock);
+ partitionClose(rel, partTable, NoLock);
}
// adjust
if (!bms_equal(result->bm_rangeSelectedPartitions, partIdx)) {
result->bm_rangeSelectedPartitions = partIdx;
- generateListFromPruningBM(result);
+ generateListFromPruningBM(result, rel->partMap);
}
}
+ decre_partmap_refcount(rel->partMap);
return result;
}
@@ -338,6 +355,8 @@ PruningResult* singlePartitionPruningForRestrictInfo(Oid partitionOid, Relation
return NULL;
}
+ incre_partmap_refcount(rel->partMap);
+
pruningRes = makeNode(PruningResult);
pruningRes->state = PRUNING_RESULT_SUBSET;
@@ -402,15 +421,18 @@ PruningResult* singlePartitionPruningForRestrictInfo(Oid partitionOid, Relation
pruningRes->bm_rangeSelectedPartitions = bms_make_singleton(partitionSeq);
- generateListFromPruningBM(pruningRes);
+ generateListFromPruningBM(pruningRes, rel->partMap);
if (RelationIsSubPartitioned(rel)) {
SubPartitionPruningResult *subPartPruningRes = PreGetSubPartitionFullPruningResult(rel, partitionOid);
if (subPartPruningRes == NULL) {
+ decre_partmap_refcount(rel->partMap);
return pruningRes;
}
subPartPruningRes->partSeq = partitionSeq;
+ subPartPruningRes->partitionno = GetPartitionnoFromSequence(rel->partMap, partitionSeq);
pruningRes->ls_selectedSubPartitions = lappend(pruningRes->ls_selectedSubPartitions, subPartPruningRes);
}
+ decre_partmap_refcount(rel->partMap);
return pruningRes;
}
@@ -425,6 +447,8 @@ PruningResult* SingleSubPartitionPruningForRestrictInfo(Oid subPartitionOid, Rel
{
int partitionSeq = 0;
int subPartitionSeq = 0;
+ int partitionno = INVALID_PARTITION_NO;
+ int subpartitionno = INVALID_PARTITION_NO;
PruningResult* pruningRes = NULL;
if (!PointerIsValid(rel) || !OidIsValid(subPartitionOid) || !OidIsValid(partOid)) {
@@ -440,7 +464,9 @@ PruningResult* SingleSubPartitionPruningForRestrictInfo(Oid subPartitionOid, Rel
pruningRes = makeNode(PruningResult);
pruningRes->state = PRUNING_RESULT_SUBSET;
+ incre_partmap_refcount(rel->partMap);
partitionSeq = getPartitionElementsIndexByOid(rel, partOid);
+ partitionno = GetPartitionnoFromSequence(rel->partMap, partitionSeq);
/* In normal condition, it should never happen.
* But if the Query is from a view/rule contains a subpartition, this case may happen if the parent partition of
* this subpartition is dropped by DDL operation, such as DROP/TRUNCATE (UPDATE GLOBAL INDEX) */
@@ -455,7 +481,9 @@ PruningResult* SingleSubPartitionPruningForRestrictInfo(Oid subPartitionOid, Rel
Partition part = partitionOpen(rel, partOid, NoLock);
Relation partRel = partitionGetRelation(rel, part);
+ incre_partmap_refcount(partRel->partMap);
subPartitionSeq = getPartitionElementsIndexByOid(partRel, subPartitionOid);
+ subpartitionno = GetPartitionnoFromSequence(partRel->partMap, subPartitionSeq);
/* In normal condition, it should never happen.
* But if the Query is from a view/rule contains a subpartition, this case may happen if the subpartition is dropped
* by DDL operation, such as DROP/SPLIT/MERGE/TRUNCATE (UPDATE GLOBAL INDEX)/EXCHANGE (UPDATE GLOBAL INDEX) */
@@ -467,6 +495,7 @@ PruningResult* SingleSubPartitionPruningForRestrictInfo(Oid subPartitionOid, Rel
errhint("Check if this query contains a view that refrences the target subpartition. "
"If so, REBUILD this view.")));
}
+ decre_partmap_refcount(partRel->partMap);
releaseDummyRelation(&partRel);
partitionClose(rel, part, NoLock);
@@ -475,11 +504,16 @@ PruningResult* SingleSubPartitionPruningForRestrictInfo(Oid subPartitionOid, Rel
bms_add_member(subPartPruningRes->bm_selectedSubPartitions, subPartitionSeq);
subPartPruningRes->ls_selectedSubPartitions =
lappend_int(subPartPruningRes->ls_selectedSubPartitions, subPartitionSeq);
+ subPartPruningRes->ls_selectedSubPartitionnos =
+ lappend_int(subPartPruningRes->ls_selectedSubPartitionnos, subpartitionno);
subPartPruningRes->partSeq = partitionSeq;
+ subPartPruningRes->partitionno = GetPartitionnoFromSequence(rel->partMap, partitionSeq);
pruningRes->ls_selectedSubPartitions = lappend(pruningRes->ls_selectedSubPartitions, subPartPruningRes);
pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, partitionSeq);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
pruningRes->bm_rangeSelectedPartitions = bms_make_singleton(partitionSeq);
+ decre_partmap_refcount(rel->partMap);
return pruningRes;
}
@@ -692,6 +726,9 @@ static PruningResult* partitionPruningFromBoolExpr(const BoolExpr* expr, Pruning
iterator = partitionPruningWalker(arg, context);
if (iterator->paramArg != NULL || iterator->exprPart != NULL) {
+ if (expr->boolop == OR_EXPR) {
+ iterator->isPbeSinlePartition = false;
+ }
return iterator;
}
resultList = lappend(resultList, iterator);
@@ -740,6 +777,9 @@ static PruningResult* partitionPruningFromBoolExpr(PartitionType partType, const
iterator = partitionEqualPruningWalker(partType, arg, context);
if (iterator->paramArg != NULL || iterator->exprPart != NULL) {
+ if (expr->boolop == OR_EXPR) {
+ iterator->isPbeSinlePartition = false;
+ }
return iterator;
}
resultList = lappend(resultList, iterator);
@@ -825,6 +865,83 @@ static PruningResult* partitionPruningFromNullTest(NullTest* expr, PruningContex
return result;
}
+static PruningResult* ListPartitionPruningFromIsNotNull(ListPartitionMap* listPartMap, PruningResult* pruning,
+ int attrOffset)
+{
+ if (listPartMap->partitionKey->dim1 == 1) {
+ pruning->state = PRUNING_RESULT_FULL;
+ pruning->isPbeSinlePartition = false;
+ return pruning;
+ }
+
+ int count;
+ for (int i = 0; i < listPartMap->listElementsNum; i++) {
+ ListPartElement* part = &listPartMap->listElements[i];
+ /* The default partition should be selected, whether single-key or multi-keys partition. */
+ if (part->boundary[0].values[0]->ismaxvalue) {
+ pruning->bm_rangeSelectedPartitions = bms_add_member(pruning->bm_rangeSelectedPartitions, i);
+ continue;
+ }
+ for (int j = 0; j < part->len; j++) {
+ if (!part->boundary[j].values[attrOffset]->constisnull) {
+ pruning->bm_rangeSelectedPartitions = bms_add_member(pruning->bm_rangeSelectedPartitions, i);
+ break;
+ }
+ }
+ }
+ count = bms_num_members(pruning->bm_rangeSelectedPartitions);
+ if (count > 0) {
+ pruning->state = PRUNING_RESULT_SUBSET;
+ } else {
+ pruning->state = PRUNING_RESULT_EMPTY;
+ }
+ pruning->isPbeSinlePartition = (count == 1);
+ return pruning;
+}
+
+static PruningResult* ListPartitionPruningFromIsNull(ListPartitionMap* listPartMap, PruningResult* pruning,
+ int attrOffset)
+{
+ if (listPartMap->partitionKey->dim1 == 1) {
+ int defaultPartitionIndex = -1;
+ for (int i = 0; i < listPartMap->listElementsNum; i++) {
+ ListPartElement *list = &listPartMap->listElements[i];
+ if (list->boundary[0].values[0]->ismaxvalue) {
+ defaultPartitionIndex = i;
+ break;
+ }
+ }
+ if (defaultPartitionIndex >= 0) {
+ pruning->state = PRUNING_RESULT_SUBSET;
+ pruning->isPbeSinlePartition = true;
+ pruning->bm_rangeSelectedPartitions = bms_make_singleton(defaultPartitionIndex);
+ } else {
+ pruning->state = PRUNING_RESULT_EMPTY;
+ pruning->isPbeSinlePartition = false;
+ }
+ return pruning;
+ }
+ /*
+ * The partition key value of the multi-keys list partition can be NULL.
+ * NullTest should be recorded in the boundary for pruning with other OpExpr or NullTest.
+ */
+ PruningBoundary* boundary = makePruningBoundary(listPartMap->partitionKey->dim1);
+ Oid typid, typcoll;
+ int typmod;
+
+ get_atttypetypmodcoll(listPartMap->relid, listPartMap->partitionKey->values[attrOffset],
+ &typid, &typmod, &typcoll);
+ boundary->minClose[attrOffset] = true;
+ boundary->min[attrOffset] = PointerGetDatum(makeNullConst(typid, typmod, typcoll));
+ boundary->maxClose[attrOffset] = true;
+ boundary->max[attrOffset] = PointerGetDatum(makeNullConst(typid, typmod, typcoll));
+ boundary->state = PRUNING_RESULT_SUBSET;
+ pruning->boundary = boundary;
+ pruning->state = PRUNING_RESULT_SUBSET;
+ pruning->isPbeSinlePartition = false;
+ return pruning;
+}
+
static PruningResult* partitionPruningFromNullTest(PartitionType partType, NullTest* expr, PruningContext* context)
{
PruningResult* result = NULL;
@@ -855,36 +972,23 @@ static PruningResult* partitionPruningFromNullTest(PartitionType partType, NullT
ListPartitionMap* listPartMap = (ListPartitionMap*)(context->relation->partMap);
partKeyNum = listPartMap->partitionKey->dim1;
attrOffset = varIsInPartitionKey(var->varattno, listPartMap->partitionKey, partKeyNum);
- } else {
- HashPartitionMap* hashPartMap = (HashPartitionMap*)(context->relation->partMap);
- partKeyNum = hashPartMap->partitionKey->dim1;
- attrOffset = varIsInPartitionKey(var->varattno, hashPartMap->partitionKey, partKeyNum);
+ if (attrOffset < 0 || attrOffset >= partKeyNum) {
+ result->state = PRUNING_RESULT_FULL;
+ return result;
+ }
+ if (expr->nulltesttype == IS_NULL) {
+ return ListPartitionPruningFromIsNull((ListPartitionMap*)context->relation->partMap, result, attrOffset);
+ }
+ return ListPartitionPruningFromIsNotNull((ListPartitionMap*)context->relation->partMap, result, attrOffset);
}
+ /* HASH partition */
+ HashPartitionMap* hashPartMap = (HashPartitionMap*)(context->relation->partMap);
+ partKeyNum = hashPartMap->partitionKey->dim1;
+ attrOffset = varIsInPartitionKey(var->varattno, hashPartMap->partitionKey, partKeyNum);
if (attrOffset != 0) {
result->state = PRUNING_RESULT_FULL;
return result;
}
-
- if (expr->nulltesttype == IS_NULL && partType == PART_TYPE_LIST) {
- ListPartitionMap* listPartMap = (ListPartitionMap*)(context->relation->partMap);
- bool hasDefault = false;
- int defaultPartitionIndex = -1;
- for (int i = 0; i < listPartMap->listElementsNum; i++) {
- ListPartElement list = listPartMap->listElements[i];
- if (list.boundary[0]->ismaxvalue) {
- hasDefault = true;
- defaultPartitionIndex = i;
- break;
- }
- }
- if (hasDefault) {
- result->state = PRUNING_RESULT_SUBSET;
- result->isPbeSinlePartition = true;
- result->bm_rangeSelectedPartitions = bms_make_singleton(defaultPartitionIndex);
- return result;
- }
- }
-
if (expr->nulltesttype != IS_NULL) {
result->state = PRUNING_RESULT_FULL;
} else {
@@ -1571,12 +1675,18 @@ static PruningResult* RecordEqualFromOpExprPart(const PartitionType partType, co
result->isPbeSinlePartition = false;
return result;
} else if (paramArg != NULL) {
- result->paramArg = paramArg;
- result->state = PRUNING_RESULT_SUBSET;
- if (0 == strcmp("=", opName)) {
- result->isPbeSinlePartition = true;
+ if (paramArg->paramkind != PARAM_EXTERN) {
+ result->state = PRUNING_RESULT_FULL;
+ result->isPbeSinlePartition = false;
+ return result;
+ } else {
+ result->paramArg = paramArg;
+ result->state = PRUNING_RESULT_SUBSET;
+ if (0 == strcmp("=", opName)) {
+ result->isPbeSinlePartition = (partKeyNum == 1);
+ }
+ return result;
}
- return result;
}
if (constArg->constisnull) {
@@ -1604,7 +1714,7 @@ static PruningResult* RecordEqualFromOpExprPart(const PartitionType partType, co
boundary->state = PRUNING_RESULT_SUBSET;
result->state = PRUNING_RESULT_SUBSET;
- result->isPbeSinlePartition = true;
+ result->isPbeSinlePartition = (partKeyNum == 1);
} else {
boundary->state = PRUNING_RESULT_FULL;
result->state = PRUNING_RESULT_FULL;
@@ -1807,7 +1917,8 @@ void partitionPruningFromBoundary(PruningContext *context, PruningResult* prunin
}
}
- compare = partitonKeyCompare(bottomValue, topValue, pruningResult->boundary->partitionKeyNum);
+ compare = partitonKeyCompare(bottomValue, topValue, pruningResult->boundary->partitionKeyNum,
+ ((GetPartitionMap(context))->type == PART_TYPE_LIST));
if (compare > 0) {
pruningResult->state = PRUNING_RESULT_EMPTY;
return;
@@ -1818,6 +1929,11 @@ void partitionPruningFromBoundary(PruningContext *context, PruningResult* prunin
if ((context->pruningType == PruningPartition) && (
(GetPartitionMap(context))->type == PART_TYPE_LIST ||
(GetPartitionMap(context))->type == PART_TYPE_HASH)) {
+ /* If the boundary does not contain all key values, prune in PartitionPruningForPartialListBoundary. */
+ if ((GetPartitionMap(context))->type == PART_TYPE_LIST && PartitionPruningForPartialListBoundary(
+ (ListPartitionMap*)GetPartitionMap(context), pruningResult, bottomValue)) {
+ return;
+ }
partitionRoutingForValueEqual(
context->relation, bottomValue, pruningResult->boundary->partitionKeyNum, true, u_sess->opt_cxt.bottom_seq);
u_sess->opt_cxt.top_seq->partArea = u_sess->opt_cxt.bottom_seq->partArea;
@@ -2156,11 +2272,11 @@ static PruningBoundary* mergeBoundary(PruningBoundary* leftBoundary, PruningBoun
}
if (!PointerIsValid(leftBoundary)) {
- return copyBoundary(leftBoundary);
+ return copyBoundary(rightBoundary);
}
if (!PointerIsValid(rightBoundary)) {
- return copyBoundary(rightBoundary);
+ return copyBoundary(leftBoundary);
}
AssertEreport(leftBoundary->partitionKeyNum == rightBoundary->partitionKeyNum,
@@ -2197,7 +2313,7 @@ static PruningBoundary* mergeBoundary(PruningBoundary* leftBoundary, PruningBoun
result->min[i] = PointerGetDatum(copyObject((void*)rightValue));
result->minClose[i] = rightBoundary->minClose[i];
} else if (PointerIsValid(leftValue) && PointerIsValid(rightValue)) {
- compare = partitonKeyCompare(&leftValue, &rightValue, 1);
+ compare = partitonKeyCompare(&leftValue, &rightValue, 1, true);
if (compare > 0) {
result->min[i] = PointerGetDatum(copyObject((void*)leftValue));
result->minClose[i] = leftBoundary->minClose[i];
@@ -2224,7 +2340,7 @@ static PruningBoundary* mergeBoundary(PruningBoundary* leftBoundary, PruningBoun
result->max[i] = PointerGetDatum(copyObject((void*)rightValue));
result->maxClose[i] = rightBoundary->maxClose[i];
} else if (PointerIsValid(leftValue) && PointerIsValid(rightValue)) {
- compare = partitonKeyCompare(&leftValue, &rightValue, 1);
+ compare = partitonKeyCompare(&leftValue, &rightValue, 1, true);
if (compare > 0) {
result->max[i] = PointerGetDatum(copyObject((void*)rightValue));
result->maxClose[i] = rightBoundary->maxClose[i];
@@ -2244,7 +2360,7 @@ static PruningBoundary* mergeBoundary(PruningBoundary* leftBoundary, PruningBoun
leftValue = (Const*)DatumGetPointer(result->min[i]);
rightValue = (Const*)DatumGetPointer(result->max[i]);
if (leftValue != NULL && rightValue != NULL) {
- compare = partitonKeyCompare(&leftValue, &rightValue, 1);
+ compare = partitonKeyCompare(&leftValue, &rightValue, 1, true);
if (compare > 0 || (compare == 0 && !(result->minClose[i] && result->maxClose[i]))) {
result->state = PRUNING_RESULT_EMPTY;
break;
@@ -2346,6 +2462,10 @@ void destroyPruningResult(PruningResult* pruningResult)
list_free_ext(pruningResult->ls_rangeSelectedPartitions);
pruningResult->ls_rangeSelectedPartitions = NIL;
}
+ if (PointerIsValid(pruningResult->ls_selectedPartitionnos)) {
+ list_free_ext(pruningResult->ls_selectedPartitionnos);
+ pruningResult->ls_selectedPartitionnos = NIL;
+ }
if (PointerIsValid(pruningResult->expr)) {
pfree(pruningResult->expr);
pruningResult->expr = NULL;
@@ -2358,10 +2478,6 @@ void destroyPruningResult(PruningResult* pruningResult)
pfree(pruningResult->paramArg);
pruningResult->paramArg = NULL;
}
- if (PointerIsValid(pruningResult->partMap)) {
- DestroyPartitionMap(pruningResult->partMap);
- pruningResult->partMap = NULL;
- }
pfree_ext(pruningResult);
}
@@ -2383,6 +2499,50 @@ static void destroyPruningResultList(List* resultList)
return;
}
+static Oid GetPartitionOidFromPartitionno(Relation relation, int partitionno)
+{
+ int totalnum;
+ int i;
+ Oid result = InvalidOid;
+ int resultno;
+ PartitionMap *partmap = relation->partMap;
+
+ if (partitionno <= 0) {
+ return InvalidOid;
+ }
+
+ if (partmap->type == PART_TYPE_RANGE || partmap->type == PART_TYPE_INTERVAL) {
+ totalnum = ((RangePartitionMap*)partmap)->rangeElementsNum;
+ for (i = 0; i < totalnum; i++) {
+ resultno = ((RangePartitionMap*)partmap)->rangeElements[i].partitionno;
+ if (partitionno == resultno) {
+ result = ((RangePartitionMap*)partmap)->rangeElements[i].partitionOid;
+ break;
+ }
+ }
+ } else if (partmap->type == PART_TYPE_LIST) {
+ totalnum = ((ListPartitionMap*)partmap)->listElementsNum;
+ for (i = 0; i < totalnum; i++) {
+ resultno = ((ListPartitionMap*)partmap)->listElements[i].partitionno;
+ if (partitionno == resultno) {
+ result = ((ListPartitionMap*)partmap)->listElements[i].partitionOid;
+ break;
+ }
+ }
+ } else if (partmap->type == PART_TYPE_HASH) {
+ totalnum = ((HashPartitionMap*)partmap)->hashElementsNum;
+ for (i = 0; i < totalnum; i++) {
+ resultno = ((HashPartitionMap*)partmap)->hashElements[i].partitionno;
+ if (partitionno == resultno) {
+ result = ((HashPartitionMap*)partmap)->hashElements[i].partitionOid;
+ break;
+ }
+ }
+ }
+
+ return result;
+}
+
/*
* @@GaussDB@@
* Target : data partition
@@ -2390,73 +2550,63 @@ static void destroyPruningResultList(List* resultList)
* Description :
* Notes : start with 0
*/
-Oid getPartitionOidFromSequence(Relation relation, int partSeq, PartitionMap *oldmap)
+Oid getPartitionOidFromSequence(Relation relation, int partSeq, int partitionno)
{
- /* if the partmap which copied before static pruning exists, it will replace the rel->partMap */
- PartitionMap *partmap = oldmap ? oldmap : relation->partMap;
+#define ReportPartseqOutRange() \
+ do { \
+ PARTITION_LOG( \
+ "partSeq: %d out range of current relation partMap element num: %d. partitionno %d will be used", \
+ partSeq, elementsNum, partitionno); \
+ } while (0)
Oid result = InvalidOid;
+ int resultno = 0;
+ int elementsNum;
AssertEreport(PointerIsValid(relation), MOD_OPT, "Unexpected NULL pointer for relation.");
- AssertEreport(PointerIsValid(partmap), MOD_OPT, "Unexpected NULL pointer for relation->partMap.");
- if (partmap->type == PART_TYPE_RANGE || partmap->type == PART_TYPE_INTERVAL) {
- int rangeElementsNum = ((RangePartitionMap*)partmap)->rangeElementsNum;
- if (partSeq < rangeElementsNum) {
- result = ((RangePartitionMap*)partmap)->rangeElements[partSeq].partitionOid;
+ AssertEreport(PointerIsValid(relation->partMap), MOD_OPT, "Unexpected NULL pointer for relation->partMap.");
+ if (relation->partMap->type == PART_TYPE_RANGE || relation->partMap->type == PART_TYPE_INTERVAL) {
+ elementsNum = ((RangePartitionMap*)relation->partMap)->rangeElementsNum;
+ if (partSeq < elementsNum) {
+ result = ((RangePartitionMap*)relation->partMap)->rangeElements[partSeq].partitionOid;
+ resultno = ((RangePartitionMap*)relation->partMap)->rangeElements[partSeq].partitionno;
} else {
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("partSeq: %d out range of current relation partMap element num: %d.",
- partSeq,
- rangeElementsNum)));
+ ReportPartseqOutRange();
}
- /* do simple check, as rangeElements already be sorted */
- if (partSeq > 0 &&
- result == ((RangePartitionMap*)partmap)->rangeElements[partSeq - 1].partitionOid) {
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("Duplicate range partition map oids: %u, please try again.", result)));
- }
- } else if (PART_TYPE_LIST == partmap->type) {
- int listElementsNum = ((ListPartitionMap*)partmap)->listElementsNum;
- if (partSeq < listElementsNum) {
- result = ((ListPartitionMap*)partmap)->listElements[partSeq].partitionOid;
+ } else if (relation->partMap->type == PART_TYPE_LIST) {
+ elementsNum = ((ListPartitionMap*)relation->partMap)->listElementsNum;
+ if (partSeq < elementsNum) {
+ result = ((ListPartitionMap*)relation->partMap)->listElements[partSeq].partitionOid;
+ resultno = ((ListPartitionMap*)relation->partMap)->listElements[partSeq].partitionno;
} else {
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("partSeq: %d out range of current relation partMap element num: %d.",
- partSeq,
- listElementsNum)));
+ ReportPartseqOutRange();
}
- /* do simple check, as rangeElements already be sorted */
- if (partSeq > 0 &&
- result == ((ListPartitionMap*)partmap)->listElements[partSeq - 1].partitionOid) {
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("Duplicate range partition map oids: %u, please try again.", result)));
- }
- } else if (PART_TYPE_HASH == partmap->type) {
- int hashElementsNum = ((HashPartitionMap*)partmap)->hashElementsNum;
- if (partSeq < hashElementsNum) {
- result = ((HashPartitionMap*)partmap)->hashElements[partSeq].partitionOid;
+ } else if (relation->partMap->type == PART_TYPE_HASH) {
+ elementsNum = ((HashPartitionMap*)relation->partMap)->hashElementsNum;
+ if (partSeq < elementsNum) {
+ result = ((HashPartitionMap*)relation->partMap)->hashElements[partSeq].partitionOid;
+ resultno = ((HashPartitionMap*)relation->partMap)->hashElements[partSeq].partitionno;
} else {
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("partSeq: %d out range of current relation partMap element num: %d.",
- partSeq,
- hashElementsNum)));
+ ReportPartseqOutRange();
}
- /* do simple check, as rangeElements already be sorted */
- if (partSeq > 0 &&
- result == ((HashPartitionMap*)partmap)->hashElements[partSeq - 1].partitionOid) {
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("Duplicate range partition map oids: %u, please try again.", result)));
- }
- } else {
- ereport(ERROR,
- (errmodule(MOD_OPT),
- errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Unupport partition strategy \"%d\"", partmap->type)));
+ }
+
+ /* if partSeq is out of range, or partitionno does not match */
+ if (!OidIsValid(result) && partitionno > 0) {
+ result = GetPartitionOidFromPartitionno(relation, partitionno);
+ } else if (partitionno > 0 && partitionno != resultno) {
+ PARTITION_LOG("partitionno does not match, src is %d, dest is %u. src partitionno will be used",
+ partitionno, resultno);
+ result = GetPartitionOidFromPartitionno(relation, partitionno);
+ }
+
+ if (!OidIsValid(result) && partitionno > 0) {
+ bool issubpartition = RelationIsPartitionOfSubPartitionTable(relation);
+ ereport(ERROR, (errmodule(MOD_OPT), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not find %s oid from %s %d for relation \"%s\"",
+ issubpartition ? "subpartition" : "partition",
+ issubpartition ? "subpartitionno" : "partitionno",
+ partitionno,
+ RelationGetRelationName(relation))));
}
return result;
@@ -2496,12 +2646,17 @@ void ConstructConstFromValues(Datum* datums, const bool* nulls, Oid* attrs, cons
return;
}
-SubPartitionPruningResult* GetSubPartitionPruningResult(List* selectedSubPartitions, int partSeq)
+SubPartitionPruningResult* GetSubPartitionPruningResult(List* selectedSubPartitions, int partSeq, int partitionno)
{
ListCell* cell = NULL;
foreach (cell, selectedSubPartitions) {
SubPartitionPruningResult* subPartPruningResult = (SubPartitionPruningResult*)lfirst(cell);
if (subPartPruningResult->partSeq == partSeq) {
+ if (subPartPruningResult->partitionno != partitionno) {
+ ereport(ERROR,
+ (errcode(ERRCODE_INTERNAL_ERROR), errmsg("the partitionno does not match, src is %d, dest is %u",
+ partitionno, subPartPruningResult->partitionno)));
+ }
return subPartPruningResult;
}
}
@@ -2540,4 +2695,74 @@ PruningResult* PartitionPruningForPartitionList(RangeTblEntry* rte, Relation rel
pruningRes->state = PRUNING_RESULT_SUBSET;
MergePartitionListsForPruning(rte, rel, pruningRes);
return pruningRes;
-}
\ No newline at end of file
+}
+
+static bool PartialListBoundaryMatched(ListPartElement* part, List* keyPos, Const** keyValue)
+{
+ if (part->boundary[0].values[0]->ismaxvalue) {
+ return true;
+ }
+
+ for (int i = 0; i < part->len; i++) {
+ PartitionKey* bound = &part->boundary[i];
+ ListCell* keyCell = NULL;
+ foreach(keyCell, keyPos) {
+ int id = lfirst_int(keyCell);
+ if (ConstCompareWithNull(keyValue[id], bound->values[id]) != 0) {
+ break;
+ }
+ }
+ if (keyCell == NULL) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * @@GaussDB@@
+ * Brief : Determine the number of boundary key values and prune partitions if need.
+ * Description : For multi-keys list partition table, pruning result may include many partitions
+ * if the boundary does not contain all the key values.
+ * In this case, partitionRoutingForValueEqual cannot be used to prune partitions.
+ * return value : Whether the boundary does not contain all key values and the pruning ends.
+ */
+static bool PartitionPruningForPartialListBoundary(ListPartitionMap* listMap, PruningResult* pruningResult,
+ Const** keyValues)
+{
+ List* keyPos = NULL;
+ Bitmapset* listBms = NULL;
+
+ for (int i = 0; i < pruningResult->boundary->partitionKeyNum; i++) {
+ if (keyValues[i] != NULL) {
+ keyPos = lappend_int(keyPos, i);
+ }
+ }
+ /* If all key values exist, return false. */
+ if (list_length(keyPos) == pruningResult->boundary->partitionKeyNum) {
+ list_free_ext(keyPos);
+ return false;
+ }
+
+ incre_partmap_refcount(&listMap->type);
+ for (int i = 0; i < listMap->listElementsNum; i++) {
+ if (PartialListBoundaryMatched(&listMap->listElements[i], keyPos, keyValues)) {
+ listBms = bms_add_member(listBms, i);
+ }
+ }
+ decre_partmap_refcount(&listMap->type);
+ list_free_ext(keyPos);
+ if (PointerIsValid(pruningResult->bm_rangeSelectedPartitions)) {
+ Bitmapset* tempBms = bms_intersect(pruningResult->bm_rangeSelectedPartitions, listBms);
+ bms_free_ext(pruningResult->bm_rangeSelectedPartitions);
+ bms_free_ext(listBms);
+ pruningResult->bm_rangeSelectedPartitions = tempBms;
+ } else {
+ pruningResult->bm_rangeSelectedPartitions = listBms;
+ }
+ if (pruningResult->boundary) {
+ destroyPruningBoundary(pruningResult->boundary);
+ pruningResult->boundary = NULL;
+ }
+ return true;
+}
diff --git a/src/gausskernel/optimizer/util/subpartitionpruning.cpp b/src/gausskernel/optimizer/util/subpartitionpruning.cpp
index 57b164ee7..095504e18 100644
--- a/src/gausskernel/optimizer/util/subpartitionpruning.cpp
+++ b/src/gausskernel/optimizer/util/subpartitionpruning.cpp
@@ -46,46 +46,67 @@ SubPartitionPruningResult* getSubPartitionFullPruningResult(Relation relation)
relation->partMap->type == PART_TYPE_HASH ||
relation->partMap->type == PART_TYPE_INTERVAL);
+ incre_partmap_refcount(relation->partMap);
+
SubPartitionPruningResult* subPartPruningRes = makeNode(SubPartitionPruningResult);
if (relation->partMap->type == PART_TYPE_RANGE || relation->partMap->type == PART_TYPE_INTERVAL) {
rangePartitionMap = (RangePartitionMap *)relation->partMap;
/* construct range bitmap */
for (i = 0; i < rangePartitionMap->rangeElementsNum; i++) {
+ int partitionno = rangePartitionMap->rangeElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
subPartPruningRes->bm_selectedSubPartitions =
bms_add_member(subPartPruningRes->bm_selectedSubPartitions, i);
subPartPruningRes->ls_selectedSubPartitions = lappend_int(subPartPruningRes->ls_selectedSubPartitions, i);
+ subPartPruningRes->ls_selectedSubPartitionnos =
+ lappend_int(subPartPruningRes->ls_selectedSubPartitionnos, partitionno);
}
} else if (relation->partMap->type == PART_TYPE_LIST) {
listPartitionMap = (ListPartitionMap *)relation->partMap;
for (i = 0; i < listPartitionMap->listElementsNum; i++) {
+ int partitionno = listPartitionMap->listElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
subPartPruningRes->bm_selectedSubPartitions =
bms_add_member(subPartPruningRes->bm_selectedSubPartitions, i);
subPartPruningRes->ls_selectedSubPartitions = lappend_int(subPartPruningRes->ls_selectedSubPartitions, i);
+ subPartPruningRes->ls_selectedSubPartitionnos =
+ lappend_int(subPartPruningRes->ls_selectedSubPartitionnos, partitionno);
}
} else if (relation->partMap->type == PART_TYPE_HASH) {
hashPartitionMap = (HashPartitionMap *)relation->partMap;
for (i = 0; i < hashPartitionMap->hashElementsNum; i++) {
+ int partitionno = hashPartitionMap->hashElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
subPartPruningRes->bm_selectedSubPartitions =
bms_add_member(subPartPruningRes->bm_selectedSubPartitions, i);
subPartPruningRes->ls_selectedSubPartitions = lappend_int(subPartPruningRes->ls_selectedSubPartitions, i);
+ subPartPruningRes->ls_selectedSubPartitionnos =
+ lappend_int(subPartPruningRes->ls_selectedSubPartitionnos, partitionno);
}
}
+ decre_partmap_refcount(relation->partMap);
return subPartPruningRes;
}
SubPartitionPruningResult* PreGetSubPartitionFullPruningResult(Relation relation, Oid partitionid)
{
- Partition part = partitionOpen(relation, partitionid, AccessShareLock);
+ Partition part = partitionOpen(relation, partitionid, NoLock);
Relation partRelation = partitionGetRelation(relation, part);
SubPartitionPruningResult *subPartPruningRes = getSubPartitionFullPruningResult(partRelation);
releaseDummyRelation(&partRelation);
- partitionClose(relation, part, AccessShareLock);
+ partitionClose(relation, part, NoLock);
return subPartPruningRes;
}
@@ -116,8 +137,13 @@ PruningResult* getFullPruningResult(Relation relation)
/* construct range bitmap */
for (i = 0; i < rangePartitionMap->rangeElementsNum; i++) {
+ int partitionno = rangePartitionMap->rangeElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i);
pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
}
if (relation->partMap->type != PART_TYPE_INTERVAL) {
pruningRes->intervalOffset = 0;
@@ -126,14 +152,24 @@ PruningResult* getFullPruningResult(Relation relation)
} else if (relation->partMap->type == PART_TYPE_LIST) {
listPartitionMap = (ListPartitionMap*)relation->partMap;
for (i = 0; i < listPartitionMap->listElementsNum; i++) {
+ int partitionno = listPartitionMap->listElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i);
pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
}
} else if (relation->partMap->type == PART_TYPE_HASH) {
hashPartitionMap = (HashPartitionMap*)relation->partMap;
for (i = 0; i < hashPartitionMap->hashElementsNum; i++) {
+ int partitionno = hashPartitionMap->hashElements[i].partitionno;
+ if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) {
+ Assert(partitionno > 0);
+ }
pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i);
pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
}
}
@@ -149,8 +185,11 @@ bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* prun
Oid heapRelOid;
Relation indexRel, heapRel;
bool partitionIndexUnusable = true;
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = pruning_result->ls_rangeSelectedPartitions;
+ List* partitionnos = pruning_result->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
if (pruning_result->expr == NULL) {
if (PointerIsValid(part_seqs))
@@ -163,6 +202,9 @@ bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* prun
errmsg("invalid index oid to check for unusability")));
}
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
heapRelOid = IndexGetRelation(indexOid, false);
heapRel = relation_open(heapRelOid, NoLock);
indexRel = relation_open(indexOid, NoLock);
@@ -183,38 +225,37 @@ bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* prun
errmsg("relation %s is not partitioned when check partition index", RelationGetRelationName(heapRel))));
}
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
Oid indexpartitionid = InvalidOid;
Partition tablepart = NULL;
Partition indexpartition = NULL;
List* partitionIndexOidList = NIL;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
Relation tablepartrel = NULL;
- tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, pruning_result->partMap);
- tablepart = tryPartitionOpen(heapRel, tablepartitionid, AccessShareLock);
- if (!tablepart) {
- PartStatus currStatus = PartitionGetMetadataStatus(tablepartitionid, false);
- if (currStatus != PART_METADATA_INVISIBLE) {
- ReportPartitionOpenError(heapRel, tablepartitionid);
- }
- continue;
- }
+ tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, partitionno);
+ tablepart = PartitionOpenWithPartitionno(heapRel, tablepartitionid, partitionno, NoLock);
/* get index partition and add it to a list for following scan */
if (RelationIsSubPartitioned(heapRel)) {
- ListCell *lc = NULL;
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
tablepartrel = partitionGetRelation(heapRel, tablepart);
SubPartitionPruningResult *subPartPruning =
- GetSubPartitionPruningResult(pruning_result->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(pruning_result->ls_selectedSubPartitions, partSeq, partitionno);
List *subpartList = subPartPruning->ls_selectedSubPartitions;
+ List* subpartitionnos = subPartPruning->ls_selectedSubPartitionnos;
+ Assert(list_length(subpartList) == list_length(subpartitionnos));
- foreach (lc, subpartList)
+ forboth (lc1, subpartList, lc2, subpartitionnos)
{
- int subpartSeq = lfirst_int(lc);
- Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq);
- Partition subpart = partitionOpen(tablepartrel, subpartitionid, AccessShareLock);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
+ Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, NoLock);
partitionIndexOidList = PartitionGetPartIndexList(subpart);
if (!PointerIsValid(partitionIndexOidList)) {
@@ -223,24 +264,24 @@ bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* prun
errmsg("no local indexes found for partition %s", PartitionGetPartitionName(tablepart))));
}
indexpartitionid = searchPartitionIndexOid(indexOid, partitionIndexOidList);
- indexpartition = partitionOpen(indexRel, indexpartitionid, AccessShareLock);
+ indexpartition = partitionOpen(indexRel, indexpartitionid, NoLock);
list_free_ext(partitionIndexOidList);
// found a unusable index partition
if (!indexpartition->pd_part->indisusable) {
partitionIndexUnusable = false;
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(tablepartrel, subpart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(tablepartrel, subpart, NoLock);
break;
}
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(tablepartrel, subpart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(tablepartrel, subpart, NoLock);
}
releaseDummyRelation(&tablepartrel);
- partitionClose(heapRel, tablepart, AccessShareLock);
+ partitionClose(heapRel, tablepart, NoLock);
if (!partitionIndexUnusable)
break;
} else {
@@ -252,17 +293,17 @@ bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* prun
}
indexpartitionid = searchPartitionIndexOid(indexOid, partitionIndexOidList);
list_free_ext(partitionIndexOidList);
- indexpartition = partitionOpen(indexRel, indexpartitionid, AccessShareLock);
+ indexpartition = partitionOpen(indexRel, indexpartitionid, NoLock);
// found a unusable index partition
if (!indexpartition->pd_part->indisusable) {
partitionIndexUnusable = false;
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(heapRel, tablepart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(heapRel, tablepart, NoLock);
break;
}
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(heapRel, tablepart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(heapRel, tablepart, NoLock);
}
}
@@ -297,7 +338,10 @@ static IndexesUsableType eliminate_subpartition_index_unusable(Relation heapRel,
Oid indexOid = RelationGetRelid(indexRel);
List* part_seqs = inputPruningResult->ls_rangeSelectedPartitions;
- ListCell* cell = NULL;
+ List* partitionnos = inputPruningResult->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
bool unusable = false;
// first copy out 2 copies
@@ -311,31 +355,40 @@ static IndexesUsableType eliminate_subpartition_index_unusable(Relation heapRel,
bms_free_ext(outIndexUnusable_pr->bm_rangeSelectedPartitions);
outIndexUnusable_pr->bm_rangeSelectedPartitions = NULL;
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
// this is the scaning loop for selected partitions
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
Oid indexpartitionid = InvalidOid;
Partition tablepart = NULL;
Partition indexpartition = NULL;
Relation tablepartrel = NULL;
List* partitionIndexOidList = NIL;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
- tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, inputPruningResult->partMap);
- tablepart = partitionOpen(heapRel, tablepartitionid, AccessShareLock);
+ tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, partitionno);
+ tablepart = PartitionOpenWithPartitionno(heapRel, tablepartitionid, partitionno, NoLock);
tablepartrel = partitionGetRelation(heapRel, tablepart);
/* get index partition and add it to a list for following scan */
- ListCell *lc = NULL;
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
SubPartitionPruningResult *subPartPruning =
- GetSubPartitionPruningResult(inputPruningResult->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(inputPruningResult->ls_selectedSubPartitions, partSeq, partitionno);
List *subPartList = subPartPruning->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruning->ls_selectedSubPartitionnos;
+ Assert(list_length(subPartList) == list_length(subpartitionnos));
- foreach (lc, subPartList)
+ forboth (lc1, subPartList, lc2, subpartitionnos)
{
- int subPartSeq = lfirst_int(lc);
- Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subPartSeq);
- Partition subpart = partitionOpen(tablepartrel, subpartitionid, AccessShareLock);
+ int subPartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
+ Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subPartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, NoLock);
partitionIndexOidList = PartitionGetPartIndexList(subpart);
if (!PointerIsValid(partitionIndexOidList)) {
@@ -344,19 +397,19 @@ static IndexesUsableType eliminate_subpartition_index_unusable(Relation heapRel,
errmsg("no local indexes found for partition %s", PartitionGetPartitionName(tablepart))));
}
indexpartitionid = searchPartitionIndexOid(indexOid, partitionIndexOidList);
- indexpartition = partitionOpen(indexRel, indexpartitionid, AccessShareLock);
+ indexpartition = partitionOpen(indexRel, indexpartitionid, NoLock);
// found a unusable index partition
if (!indexpartition->pd_part->indisusable) {
unusable = true;
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(tablepartrel, subpart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(tablepartrel, subpart, NoLock);
list_free_ext(partitionIndexOidList);
break;
}
list_free_ext(partitionIndexOidList);
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(tablepartrel, subpart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(tablepartrel, subpart, NoLock);
}
releaseDummyRelation(&tablepartrel);
@@ -400,7 +453,10 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
PruningResult* outIndexUnusable_pr = NULL;
int iterators = bms_num_members(inputPruningResult->bm_rangeSelectedPartitions);
List* part_seqs = inputPruningResult->ls_rangeSelectedPartitions;
- ListCell* cell = NULL;
+ List* partitionnos = inputPruningResult->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
Oid indexOid = RelationGetRelid(indexRel);
// first copy out 2 copies
@@ -414,24 +470,21 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
bms_free_ext(outIndexUnusable_pr->bm_rangeSelectedPartitions);
outIndexUnusable_pr->bm_rangeSelectedPartitions = NULL;
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
// this is the scaning loop for selected partitions
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
Oid indexpartitionid = InvalidOid;
Partition tablepart = NULL;
Partition indexpartition = NULL;
List* partitionIndexOidList = NIL;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
- tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, inputPruningResult->partMap);
- tablepart = tryPartitionOpen(heapRel, tablepartitionid, AccessShareLock);
- if (!tablepart) {
- PartStatus currStatus = PartitionGetMetadataStatus(tablepartitionid, false);
- if (currStatus != PART_METADATA_INVISIBLE) {
- ReportPartitionOpenError(heapRel, tablepartitionid);
- }
- continue;
- }
+ tablepartitionid = getPartitionOidFromSequence(heapRel, partSeq, partitionno);
+ tablepart = PartitionOpenWithPartitionno(heapRel, tablepartitionid, partitionno, NoLock);
/* get index partition and add it to a list for following scan */
partitionIndexOidList = PartitionGetPartIndexList(tablepart);
@@ -442,7 +495,7 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
}
indexpartitionid = searchPartitionIndexOid(indexOid, partitionIndexOidList);
list_free_ext(partitionIndexOidList);
- indexpartition = partitionOpen(indexRel, indexpartitionid, AccessShareLock);
+ indexpartition = partitionOpen(indexRel, indexpartitionid, NoLock);
// found a unusable index partition
if (!indexpartition->pd_part->indisusable) {
// delete partSeq from usable and add into unusable
@@ -459,8 +512,8 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
/*
* Already hold parent table lock, it's safe to release lock.
*/
- partitionClose(indexRel, indexpartition, AccessShareLock);
- partitionClose(heapRel, tablepart, AccessShareLock);
+ partitionClose(indexRel, indexpartition, NoLock);
+ partitionClose(heapRel, tablepart, NoLock);
}
// result check
@@ -479,7 +532,7 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
// set back the bit map
if (usable_partition_num > 0) {
outIndexUsable_pr->bm_rangeSelectedPartitions = outIndexUsable_bm;
- generateListFromPruningBM(outIndexUsable_pr);
+ generateListFromPruningBM(outIndexUsable_pr, heapRel->partMap);
// set the output
if (indexUsablePruningResult != NULL) {
*indexUsablePruningResult = outIndexUsable_pr;
@@ -488,7 +541,7 @@ IndexesUsableType eliminate_partition_index_unusable(Relation heapRel, Relation
// set back the bit map
if (unusable_partition_num > 0) {
outIndexUnusable_pr->bm_rangeSelectedPartitions = outIndexUnusable_bm;
- generateListFromPruningBM(outIndexUnusable_pr);
+ generateListFromPruningBM(outIndexUnusable_pr, heapRel->partMap);
// set the output
if (indexUnusablePruningResult != NULL) {
*indexUnusablePruningResult = outIndexUnusable_pr;
@@ -528,6 +581,9 @@ IndexesUsableType eliminate_partition_index_unusable(Oid indexOid, PruningResult
}
heapRelOid = IndexGetRelation(indexOid, false);
+ /* cannot lock heap in case deadlock, we need process invalid messages here */
+ AcceptInvalidationMessages();
+
heapRel = relation_open(heapRelOid, NoLock);
indexRel = relation_open(indexOid, NoLock);
/* Global partition index Just return FULL or NONE */
@@ -662,6 +718,56 @@ static List* list_insert_ordered_sppr(List* list, SubPartitionPruningResult* spp
return list;
}
+static void GeneratePartitionnoFromPruningResult(Relation rel, PruningResult *pruningRes)
+{
+ int partseq;
+ int subpartseq;
+ int partitionno;
+ int subpartitionno;
+ int partOid;
+ Partition part;
+ Relation partRel;
+ SubPartitionPruningResult *subResult;
+
+ ListCell *cell1 = NULL;
+ ListCell *cell2 = NULL;
+ ListCell *cell3 = NULL;
+
+ list_free_ext(pruningRes->ls_selectedPartitionnos);
+ pruningRes->ls_selectedPartitionnos = NIL;
+ if (!RelationIsSubPartitioned(rel)) {
+ foreach (cell1, pruningRes->ls_rangeSelectedPartitions) {
+ partseq = lfirst_int(cell1);
+ partitionno = GetPartitionnoFromSequence(rel->partMap, partseq);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
+ }
+ } else {
+ forboth(cell1, pruningRes->ls_rangeSelectedPartitions, cell2, pruningRes->ls_selectedSubPartitions) {
+ partseq = lfirst_int(cell1);
+ partitionno = GetPartitionnoFromSequence(rel->partMap, partseq);
+ pruningRes->ls_selectedPartitionnos = lappend_int(pruningRes->ls_selectedPartitionnos, partitionno);
+
+ subResult = (SubPartitionPruningResult *)lfirst(cell2);
+ subResult->partitionno = partitionno;
+
+ list_free_ext(subResult->ls_selectedSubPartitionnos);
+ subResult->ls_selectedSubPartitionnos = NIL;
+
+ partOid = getPartitionOidFromSequence(rel, partseq, partitionno);
+ part = partitionOpen(rel, partOid, NoLock);
+ partRel = partitionGetRelation(rel, part);
+ foreach (cell3, subResult->ls_selectedSubPartitions) {
+ subpartseq = lfirst_int(cell3);
+ subpartitionno = GetPartitionnoFromSequence(partRel->partMap, subpartseq);
+ subResult->ls_selectedSubPartitionnos =
+ lappend_int(subResult->ls_selectedSubPartitionnos, subpartitionno);
+ }
+ releaseDummyRelation(&partRel);
+ partitionClose(rel, part, NoLock);
+ }
+ }
+}
+
void MergePartitionListsForPruning(RangeTblEntry* rte, Relation rel, PruningResult* pruningRes)
{
int partitionSeq = 0;
@@ -672,6 +778,7 @@ void MergePartitionListsForPruning(RangeTblEntry* rte, Relation rel, PruningResu
ListCell* subpartCell = NULL;
SubPartitionPruningResult* sppr = NULL;
+ incre_partmap_refcount(rel->partMap);
forboth(partCell, rte->partitionOidList, subpartCell, rte->subpartitionOidList) {
partOid = lfirst_oid(partCell);
subpartOid = lfirst_oid(subpartCell);
@@ -709,4 +816,7 @@ void MergePartitionListsForPruning(RangeTblEntry* rte, Relation rel, PruningResu
/* add SubPartitionPruningResult in order */
pruningRes->ls_selectedSubPartitions = list_insert_ordered_sppr(pruningRes->ls_selectedSubPartitions, sppr);
}
+ GeneratePartitionnoFromPruningResult(rel, pruningRes);
+
+ decre_partmap_refcount(rel->partMap);
}
\ No newline at end of file
diff --git a/src/gausskernel/optimizer/util/tlist.cpp b/src/gausskernel/optimizer/util/tlist.cpp
index 25d5d1c42..482c51734 100644
--- a/src/gausskernel/optimizer/util/tlist.cpp
+++ b/src/gausskernel/optimizer/util/tlist.cpp
@@ -405,6 +405,30 @@ Oid* extract_grouping_ops(List* groupClause)
return groupOperators;
}
+/*
+ * extract_grouping_collations - make an array of the grouping column collations
+ * for a SortGroupClause list
+ */
+Oid* extract_grouping_collations(List* group_clause, List* tlist)
+{
+ int num_cols = list_length(group_clause);
+ int colno = 0;
+ Oid* grp_collations;
+ ListCell* glitem;
+
+ grp_collations = (Oid *)palloc(sizeof(Oid) * num_cols);
+
+ foreach(glitem, group_clause)
+ {
+ SortGroupClause* groupcl = (SortGroupClause *)lfirst(glitem);
+ TargetEntry* tle = get_sortgroupclause_tle(groupcl, tlist);
+
+ grp_collations[colno++] = exprCollation((Node *) tle->expr);
+ }
+
+ return grp_collations;
+}
+
/*
* get_sortgroupref_clause
* Find the SortGroupClause matching the given SortGroupRef index,
diff --git a/src/gausskernel/process/job/job_scheduler.cpp b/src/gausskernel/process/job/job_scheduler.cpp
index 3bbb4bf71..b823edec4 100755
--- a/src/gausskernel/process/job/job_scheduler.cpp
+++ b/src/gausskernel/process/job/job_scheduler.cpp
@@ -739,7 +739,8 @@ static void ScanExpireJobs()
continue;
}
- if (false == DatumGetBool(DirectFunctionCall2(timestamp_gt, curtime, values[Anum_pg_job_next_run_date - 1]))) {
+ Datum cur_job_start_time = DirectFunctionCall1(timestamp_timestamptz, values[Anum_pg_job_next_run_date - 1]);
+ if (false == DatumGetBool(DirectFunctionCall2(timestamp_gt, curtime, cur_job_start_time))) {
/* skip since it doesnot reach book time */
continue;
}
diff --git a/src/gausskernel/process/postmaster/bgwriter.cpp b/src/gausskernel/process/postmaster/bgwriter.cpp
index b7e85f24c..ede88432f 100755
--- a/src/gausskernel/process/postmaster/bgwriter.cpp
+++ b/src/gausskernel/process/postmaster/bgwriter.cpp
@@ -62,6 +62,7 @@
#include "utils/timestamp.h"
#include "gssignal/gs_signal.h"
#include "replication/slot.h"
+#include "access/hash.h"
/*
* Multiplier to apply to BgWriterDelay when we decide to hibernate.
@@ -706,6 +707,10 @@ void drop_rel_all_forks_buffers()
LWLockAcquire(g_instance.bgwriter_cxt.rel_hashtbl_lock, LW_SHARED);
hash_seq_init(&status, unlink_rel_hashtbl);
while ((temp_entry = (DelFileTag *)hash_seq_search(&status)) != NULL) {
+ if (temp_entry->fileUnlink == false) {
+ continue;
+ }
+
entry = (DelFileTag*)hash_search(rel_bak, (void *)&temp_entry->rnode, HASH_ENTER, &found);
if (!found) {
entry->rnode = temp_entry->rnode;
diff --git a/src/gausskernel/process/postmaster/cbmwriter.cpp b/src/gausskernel/process/postmaster/cbmwriter.cpp
index f77ea0ab5..a38a57330 100755
--- a/src/gausskernel/process/postmaster/cbmwriter.cpp
+++ b/src/gausskernel/process/postmaster/cbmwriter.cpp
@@ -217,6 +217,10 @@ void CBMWriterMain(void)
if (t_thrd.cbm_cxt.shutdown_requested) {
g_instance.proc_base->cbmwriterLatch = NULL;
+
+ /* clean cbm track resources */
+ ResetXlogCbmSys();
+
/* Normal exit from the walwriter is here */
proc_exit(0); /* done */
}
diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp
index f415f005c..4b1d05e77 100755
--- a/src/gausskernel/process/postmaster/pgaudit.cpp
+++ b/src/gausskernel/process/postmaster/pgaudit.cpp
@@ -49,6 +49,7 @@
#include "utils/timestamp.h"
#include "utils/builtins.h"
#include "utils/acl.h"
+#include "utils/elog.h"
#include "auditfuncs.h"
#include "gssignal/gs_signal.h"
@@ -263,6 +264,7 @@ static const char* AuditTypeDescs[] = {"unknown",
"dml_action_select",
"internal_event",
"function_exec",
+ "system_function_exec",
"copy_to",
"copy_from",
"set_parameter",
@@ -276,7 +278,9 @@ static const char* AuditTypeDescs[] = {"unknown",
"ddl_globalconfig",
"ddl_publication_subscription",
"ddl_foreign_data_wrapper",
- "ddl_sql_patch"};
+ "ddl_sql_patch",
+ "ddl_event"
+};
static const int AuditTypeNum = sizeof(AuditTypeDescs) / sizeof(char*);
@@ -1072,6 +1076,23 @@ static void pgaudit_write_file(char* buffer, int count)
securec_check(errorno, "\0", "\0");
errno = 0;
+
+ /* if record time is earlier than current file's create time,
+ * create a new audit file to avoid the confusion caused by system clock change */
+ FILE* fh = NULL;
+ if (g_instance.audit_cxt.audit_indextbl) {
+ AuditIndexItem *cur_item =
+ g_instance.audit_cxt.audit_indextbl->data +
+ g_instance.audit_cxt.audit_indextbl->curidx[t_thrd.audit.cur_thread_idx];
+ if (curtime < cur_item->ctime) {
+ auditfile_close(SYSAUDITFILE_TYPE);
+ fh = auditfile_open((pg_time_t)time(NULL), "a", true);
+ if (fh != NULL) {
+ t_thrd.audit.sysauditFile = fh;
+ }
+ }
+ }
+
retry1:
rc = fwrite(buffer, 1, count, t_thrd.audit.sysauditFile);
@@ -1718,6 +1739,9 @@ static bool audit_type_validcheck(AuditType type)
case AUDIT_DDL_VIEW:
type_status = CHECK_AUDIT_DDL(DDL_VIEW);
break;
+ case AUDIT_DDL_EVENT:
+ type_status = CHECK_AUDIT_DDL(DDL_EVENT);
+ break;
case AUDIT_DDL_TRIGGER:
type_status = CHECK_AUDIT_DDL(DDL_TRIGGER);
break;
@@ -1781,6 +1805,9 @@ static bool audit_type_validcheck(AuditType type)
case AUDIT_FUNCTION_EXEC:
type_status = (unsigned int)u_sess->attr.attr_security.Audit_Exec;
break;
+ case AUDIT_SYSTEM_FUNCTION_EXEC:
+ type_status = (unsigned int)u_sess->attr.attr_security.audit_system_function_exec;
+ break;
case AUDIT_POLICY_EVENT:
case MASKING_POLICY_EVENT:
case SECURITY_EVENT:
@@ -1803,6 +1830,9 @@ static bool audit_type_validcheck(AuditType type)
ereport(WARNING, (errmsg("unknown audit type, discard it.")));
break;
}
+ if (audit_check_full_audit_user() && type != AUDIT_UNKNOWN_TYPE) {
+ type_status = 1;
+ }
return type_status > 0;
}
@@ -1950,6 +1980,10 @@ void audit_report(AuditType type, AuditResult result, const char *object_name, c
if (!audit_get_clientinfo(type, object_name, event_info)) {
return;
}
+ /* judge if the remote_host info in the blacklist */
+ if (audit_check_client_blacklist(event_info.client_info)) {
+ return;
+ }
char *userid = event_info.userid;
const char* username = event_info.username;
const char* dbname = event_info.dbname;
@@ -2944,7 +2978,7 @@ static bool pgaudit_check_system(TimestampTz begtime, TimestampTz endtime, uint3
curr_filetime = time_t_to_timestamptz(item->ctime);
/* check whether the item is the last item */
if ((index >= earliest_idx && index < t_thrd.audit.audit_indextbl->latest_idx)) {
- if (curr_filetime <= begtime || curr_filetime <= endtime) {
+ if (curr_filetime <= endtime) {
satisfied = true;
}
} else {
@@ -2996,7 +3030,7 @@ static TimestampTz pgaudit_headertime(uint32 fnum, const char *audit_directory)
securec_check_intval(rc, , time_t_to_timestamptz(0));
/* Open the audit file to scan the audit record. */
- fd = open(t_thrd.audit.pgaudit_filepath, O_RDWR, pgaudit_filemode);
+ fd = open(pgaudit_filepath, O_RDWR, pgaudit_filemode);
if (fd < 0) {
ereport(LOG,
(errcode_for_file_access(), errmsg("could not open audit file \"%s\": %m", pgaudit_filepath)));
diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp
index e1293e486..0c1269d0f 100644
--- a/src/gausskernel/process/postmaster/postmaster.cpp
+++ b/src/gausskernel/process/postmaster/postmaster.cpp
@@ -413,7 +413,8 @@ static void CreateHaListenSocket(void);
static void RemoteHostInitilize(Port* port);
static int StartupPacketInitialize(Port* port);
static void PsDisplayInitialize(Port* port);
-static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_saved);
+static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_saved, char** first_saved_listen_addr,
+ bool only_refresh_file);
static void UpdateArchiveSlotStatus();
static ServerMode get_cur_mode(void);
@@ -505,6 +506,8 @@ typedef struct {
LWLock* mainLWLockArray;
PMSignalData* PMSignalState;
+ char LocalAddrList[MAXLISTEN][IP_LEN];
+ int LocalIpNum;
HaShmemData* HaShmData;
TimestampTz PgStartTime;
@@ -1075,7 +1078,39 @@ static void print_port_info()
return;
}
-static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_saved)
+/*
+ * If host is internal IP, then write it into postmaster.pid
+ */
+static void refresh_datadir_lock_file(char* host, bool* listen_addr_saved, char** first_saved_listen_addr)
+{
+ errno_t rc;
+ struct sockaddr_in cur_host_addr;
+ rc = memset_s(&cur_host_addr, sizeof(cur_host_addr), 0, sizeof(cur_host_addr));
+ securec_check(rc, "\0", "\0");
+ cur_host_addr.sin_family = AF_INET;
+ cur_host_addr.sin_addr.s_addr = inet_addr(host);
+ if (is_cluster_internal_IP(*(struct sockaddr*)&cur_host_addr)) {
+ AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, host);
+ *listen_addr_saved = true;
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]refresh_datadir_lock_file write %s into postmaster.pid", host)));
+ }
+ if (*first_saved_listen_addr == NULL) {
+ *first_saved_listen_addr = pstrdup(host);
+ }
+}
+
+bool is_not_wildcard(void* val1, void* val2)
+{
+ ListCell* cell = (ListCell*)val1;
+ char* nodename = (char*)val2;
+
+ char* curhost = (char*)lfirst(cell);
+ return (strcmp(curhost, nodename) == 0) ? false : true;
+}
+
+static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_saved, char** first_saved_listen_addr,
+ bool only_refresh_file = false)
{
int i = 0;
int success = 0;
@@ -1084,9 +1119,12 @@ static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_save
for (i = 1; i < MAX_REPLNODE_NUM; i++) {
if (replConnArray[i] != NULL) {
if (!(*listen_addr_saved) &&
- !IsInplicitIp(replConnArray[i]->localhost)) {
- AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, replConnArray[i]->localhost);
- *listen_addr_saved = true;
+ !IsInplicitIp(replConnArray[i]->localhost) &&
+ replConnArray[i]->localport == g_instance.attr.attr_network.PoolerPort) {
+ refresh_datadir_lock_file(replConnArray[i]->localhost, listen_addr_saved, first_saved_listen_addr);
+ }
+ if (only_refresh_file) {
+ continue;
}
if (IsAlreadyListen(replConnArray[i]->localhost,
replConnArray[i]->localport)) {
@@ -1102,7 +1140,8 @@ static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_save
MAXLISTEN,
false,
false,
- false);
+ false,
+ REPL_LISTEN_CHANEL);
if (status == STATUS_OK) {
success++;
} else {
@@ -1116,19 +1155,109 @@ static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_save
}
}
- if (success == 0) {
+ if (!only_refresh_file && success == 0) {
ReportAlarmAbnormalDataHAInstListeningSocket();
ereport(WARNING, (errmsg("could not create any HA TCP/IP sockets")));
}
}
-bool isNotWildcard(void* val1, void* val2)
+/*
+ * 1. Listen Repl IP if necessary.
+ * 2. Record the first successful host addr which does not mean 'localhost' in lockfile.
+ * Inner maintanence tools, such as cm_agent and gs_ctl, will use that host for connecting dn.
+ * Only accept internal IP which method is trust/gss
+ */
+static void ha_listen_and_refresh_conf(bool only_refresh_file)
{
- ListCell* cell = (ListCell*)val1;
- char* nodename = (char*)val2;
+ bool listen_addr_saved = false;
+ int use_pooler_port = -1;
+ char *first_saved_listen_addr = NULL;
+ char *first_inplicit_addr = NULL;
+ List* elemlist = NULL;
+ ListCell* l = NULL;
- char* curhost = (char*)lfirst(cell);
- return (strcmp(curhost, nodename) == 0) ? false : true;
+#ifdef ENABLE_MULTIPLE_NODES
+ char *rawstring = pstrdup(g_instance.attr.attr_network.ListenAddresses);
+#else
+ char *rawstring = pstrdup(u_sess->attr.attr_network.ListenAddresses);
+#endif
+ /* Parse string into list of identifiers */
+ if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
+ /* syntax error in list */
+ ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("ha_listen_and_refresh_conf invalid list syntax for \"listen_addresses\": %s", rawstring)));
+ list_free_ext(elemlist);
+ }
+
+ bool haswildcard = false;
+ foreach(l, elemlist) {
+ char* curhost = (char*)lfirst(l);
+ if (strcmp(curhost, "*") == 0) {
+ haswildcard = true;
+ break;
+ }
+ }
+
+ if (haswildcard == true) {
+ char *wildcard = "*";
+ elemlist = list_cell_clear(elemlist, (void *)wildcard, is_not_wildcard);
+ }
+ foreach(l, elemlist) {
+ char* curhost = (char*)lfirst(l);
+ /*
+ * If IP has been listen successfully, then can write it to postmaster.pid
+ * Inner maintanence tools will connect to PoolerPort, so we can only compare this port
+ */
+ if (!IsAlreadyListen(curhost, g_instance.attr.attr_network.PoolerPort)) {
+ continue;
+ }
+ use_pooler_port = NeedPoolerPort(curhost);
+ if (t_thrd.xlog_cxt.server_mode == NORMAL_MODE || use_pooler_port == -1) {
+ if (!listen_addr_saved && !IsInplicitIp(curhost)) {
+ refresh_datadir_lock_file(curhost, &listen_addr_saved, &first_saved_listen_addr);
+ }
+ }
+ if (first_inplicit_addr == NULL && IsInplicitIp(curhost)) {
+ first_inplicit_addr = pstrdup(curhost);
+ }
+ }
+ list_free_ext(elemlist);
+ pfree(rawstring);
+
+ /*
+ * Then we use ReplConnArray. Because this list not changes frequently and usually has trust/gss method
+ */
+ if (t_thrd.xlog_cxt.server_mode != NORMAL_MODE) {
+ SetListenSocket(t_thrd.postmaster_cxt.ReplConnArray, &listen_addr_saved, &first_saved_listen_addr,
+ only_refresh_file);
+ ReportResumeAbnormalDataHAInstListeningSocket();
+ }
+ SetListenSocket(t_thrd.postmaster_cxt.CrossClusterReplConnArray, &listen_addr_saved,
+ &first_saved_listen_addr, only_refresh_file);
+ ReportResumeAbnormalDataHAInstListeningSocket();
+
+ /*
+ * If no valid TCP ports, write an empty line for listen address,
+ * indicating the Unix socket must be used. Note that this line is not
+ * added to the lock file until there is a socket backing it.
+ */
+ if (!listen_addr_saved) {
+ if (first_inplicit_addr != NULL) {
+ AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, first_inplicit_addr);
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]refresh inplicit_addr %s into postmaster.pid", first_inplicit_addr)));
+ } else if (first_saved_listen_addr == NULL) {
+ AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, "");
+ ereport(WARNING, (
+ errmsg("No explicit IP is configured for listen_addresses GUC.")));
+ } else {
+ AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, first_saved_listen_addr);
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]refresh saved_addr %s into postmaster.pid", first_saved_listen_addr)));
+ }
+ }
+ pfree_ext(first_inplicit_addr);
+ pfree_ext(first_saved_listen_addr);
}
void initKnlRTOContext(void)
@@ -1184,6 +1313,495 @@ void check_short_optOfVoid(char *optstring, int argc, char *const *argv)
}
}
+#ifndef ENABLE_MULTIPLE_NODES
+bool get_addr_from_socket(int sock, struct sockaddr *saddr)
+{
+ if (sock == PGINVALID_SOCKET) {
+ return false;
+ }
+
+ socklen_t slen;
+ errno_t rc = memset_s(saddr, sizeof(sockaddr), 0, sizeof(sockaddr));
+ securec_check(rc, "\0", "\0");
+ slen = sizeof(sockaddr);
+ if (comm_getsockname(sock, saddr, (socklen_t*)&slen) < 0) {
+ return false;
+ }
+ return true;
+}
+
+int get_ip_port_from_addr(char* sock_ip, int* port, struct sockaddr saddr)
+{
+ if (sock_ip == NULL) {
+ return -1;
+ }
+
+ char* result = NULL;
+ if (AF_INET6 == ((struct sockaddr *) &saddr)->sa_family) {
+ result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *) &saddr)->sin6_addr, 128, sock_ip, IP_LEN);
+ if (NULL == result) {
+ ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT)));
+ }
+ *port = ntohs(((struct sockaddr_in6 *) &saddr)->sin6_port);
+ return AF_INET6;
+ } else if (AF_INET == ((struct sockaddr *) &saddr)->sa_family) {
+ result = inet_net_ntop(AF_INET, &((struct sockaddr_in *) &saddr)->sin_addr, 32, sock_ip, IP_LEN);
+ if (NULL == result) {
+ ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT)));
+ }
+ *port = ntohs(((struct sockaddr_in *) &saddr)->sin_port);
+ return AF_INET;
+ } else if (AF_UNIX == ((struct sockaddr *) &saddr)->sa_family) {
+ return AF_UNIX;
+ }
+ return -1;
+}
+
+static bool cmp_ip_with_replication(char* sock_ip, int port, ReplConnInfo** replConnArray, int family)
+{
+ if (sock_ip == NULL || replConnArray == NULL) {
+ return false;
+ }
+ for (int i = 0; i < MAX_REPLNODE_NUM; i++) {
+ if (replConnArray[i] == NULL || replConnArray[i]->localhost == NULL) {
+ continue;
+ }
+ switch (family) {
+ case AF_INET6: {
+ char* ip_no_zone = NULL;
+ char ip_no_zone_data[IP_LEN] = {0};
+ /* remove any '%zone' part from an IPv6 address string */
+ ip_no_zone = remove_ipv6_zone((char *)replConnArray[i]->localhost, ip_no_zone_data, IP_LEN);
+ if (strcmp(sock_ip, ip_no_zone) == 0 && port == replConnArray[i]->localport) {
+ return true;
+ }
+ if (strncmp(ip_no_zone, LOCAL_HOST, MAX_IP_STR_LEN) == 0 &&
+ (strncmp(sock_ip, LOOP_IPV6_IP, MAX_IP_STR_LEN) == 0 ||
+ strncmp(sock_ip, ip_no_zone, MAX_IP_STR_LEN) == 0) &&
+ port == replConnArray[i]->localport) {
+ return true;
+ }
+ break;
+ } case AF_INET: {
+ if (strcmp(sock_ip, replConnArray[i]->localhost) == 0 && port == replConnArray[i]->localport) {
+ return true;
+ }
+ if (strncmp(replConnArray[i]->localhost, LOCAL_HOST, MAX_IP_STR_LEN) == 0 &&
+ (strncmp(sock_ip, LOOP_IP_STRING, MAX_IP_STR_LEN) == 0 ||
+ strncmp(sock_ip, replConnArray[i]->localhost, MAX_IP_STR_LEN) ==0) &&
+ port == replConnArray[i]->localport) {
+ return true;
+ }
+ break;
+ } default: {
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+static void swap_pointer_context(void* p1, int p1_length, void* p2, int p2_length)
+{
+ if (p1_length != p2_length) {
+ ereport(ERROR, (
+ errmsg("swap_pointer_context error, p1_length %d, p2_length %d", p1_length, p2_length)));
+ }
+
+ errno_t rc = EOK;
+ void *tmp = palloc0(p1_length);
+ rc = memcpy_s(tmp, p1_length, p1, p1_length);
+ securec_check(rc, "", "");
+ rc = memcpy_s(p1, p1_length, p2, p1_length);
+ securec_check(rc, "", "");
+ rc = memcpy_s(p2, p1_length, tmp, p1_length);
+ securec_check(rc, "", "");
+ pfree(tmp);
+}
+
+/*
+ * Print listen_addresses info, include socket/IP/port/could close. Just for debug
+ */
+static void reload_listen_addresses_err_info()
+{
+ for (int i = 0; i < MAXLISTEN; i++) {
+ if (g_instance.listen_cxt.ListenSocket[i] == PGINVALID_SOCKET) {
+ continue;
+ }
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]print_debuginfo num %d, sock %d, ip %s, port %d, has_listened %s, "
+ "channel type %d",
+ i, g_instance.listen_cxt.ListenSocket[i],
+ g_instance.listen_cxt.all_listen_addr_list[i],
+ g_instance.listen_cxt.all_listen_port_list[i],
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] ? "true" : "false",
+ g_instance.listen_cxt.listen_chanel_type[i])));
+ }
+}
+
+#define rebuild_post_port_listen \
+ (new_listened_list_type[i] == BOTH_PORT_SOCKETS || new_listened_list_type[i] == POST_PORT_SOCKET)
+#define rebuild_pooler_port_listen \
+ (new_listened_list_type[i] == BOTH_PORT_SOCKETS || new_listened_list_type[i] == POOLER_PORT_SOCKET)
+#define is_ext_listen_addresses (g_instance.listen_cxt.listen_chanel_type[i] == EXT_LISTEN_CHANEL)
+#define is_dolphin_listen_addresses (g_instance.listen_cxt.listen_chanel_type[i] == DOLPHIN_LISTEN_CHANEL)
+static void rebuild_listen_address_socket()
+{
+ if (u_sess->attr.attr_network.ListenAddresses == NULL || dummyStandbyMode) {
+ return;
+ }
+
+ char* rawstring = NULL;
+ List* elemlist = NULL;
+ ListCell* l = NULL;
+ ListCell* elem = NULL;
+ errno_t rc = EOK;
+ int i = 0, j = 0, k = 0;
+ /* Need a modifiable copy of u_sess->attr.attr_network.ListenAddresses */
+ rawstring = pstrdup(u_sess->attr.attr_network.ListenAddresses);
+ /* Parse string into list of identifiers */
+ if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
+ /* syntax error in list */
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("[reload listen IP]rebuild_listen_address_socket invalid list syntax for \"listen_addresses\" %s",
+ u_sess->attr.attr_network.ListenAddresses)));
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ return;
+ }
+
+ bool haswildcard = false;
+ foreach(l, elemlist) {
+ char* curhost = (char*)lfirst(l);
+ if (strcmp(curhost, "*") == 0) {
+ haswildcard = true;
+ break;
+ }
+ }
+
+ if (haswildcard == true) {
+ char *wildcard = "*";
+ elemlist = list_cell_clear(elemlist, (void *)wildcard, is_not_wildcard);
+ }
+
+ for (i = 0; i < MAXLISTEN; i++) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = true;
+ t_thrd.postmaster_cxt.local_listen_addr_can_stop[i] = true;
+ }
+
+ int checked_num = 0;
+ int status = STATUS_OK;
+ bool has_checked = false;
+ int new_listened_num = 0;
+ char new_listened_list[MAXLISTEN][IP_LEN] = {'\0'};
+ int new_listened_list_type[MAXLISTEN] = {BOTH_PORT_SOCKETS};
+
+ /* loop new listen_addresses IP */
+ foreach(l, elemlist) {
+ char* curhost = (char*)lfirst(l);
+
+ /* Deduplicatd listen IP */
+ int check = 0;
+ foreach(elem, elemlist) {
+ if (check >= checked_num) {
+ break;
+ }
+ if (strcmp(curhost, (char*)lfirst(elem)) == 0) {
+ has_checked = true;
+ break;
+ }
+ check++;
+ }
+ checked_num++;
+ if (has_checked) {
+ has_checked = false;
+ continue;
+ }
+
+ bool samed_ip_post_port = false; /* normal port */
+ bool samed_ip_pooler_port = false; /* port + 1 */
+ for (i = 0; i < MAXLISTEN; i++) {
+ int sock = g_instance.listen_cxt.ListenSocket[i];
+ if (sock == PGINVALID_SOCKET) {
+ continue;
+ }
+ struct sockaddr saddr;
+ if (!get_addr_from_socket(sock, &saddr)) {
+ continue;
+ }
+
+ char sock_ip[IP_LEN] = {0};
+ int port = 0;
+
+ int family = get_ip_port_from_addr(sock_ip, &port, saddr);
+ if (family == -1) {
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]inet_net_ntop get invalid socket family, sock %d", sock)));
+ list_free_ext(elemlist);
+ pfree(rawstring);
+ return;
+ } else if (family == AF_UNIX) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ continue;
+ }
+
+ if (CheckSockAddr(&saddr, curhost, g_instance.attr.attr_network.PostPortNumber)) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("[reload listen IP]\"listen_addresses\" already listen IP %s, port %d",
+ curhost, g_instance.attr.attr_network.PostPortNumber)));
+ samed_ip_post_port = true;
+ }
+
+ if (CheckSockAddr(&saddr, curhost, g_instance.attr.attr_network.PoolerPort)) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("[reload listen IP]\"listen_addresses\" already listen IP %s, port %d",
+ curhost, g_instance.attr.attr_network.PoolerPort)));
+ samed_ip_pooler_port = true;
+ }
+ }
+ if (samed_ip_post_port && samed_ip_pooler_port) {
+ continue;
+ }
+
+ /* add new ip to listen */
+ rc = strcpy_s(new_listened_list[new_listened_num], IP_LEN, curhost);
+ securec_check(rc, "", "");
+ if (!samed_ip_post_port && !samed_ip_pooler_port) {
+ new_listened_list_type[new_listened_num] = BOTH_PORT_SOCKETS;
+ } else {
+ if (!samed_ip_post_port) {
+ new_listened_list_type[new_listened_num] = POST_PORT_SOCKET; // normal port
+ } else if (!samed_ip_pooler_port) {
+ new_listened_list_type[new_listened_num] = POOLER_PORT_SOCKET; // port + 1
+ }
+ }
+ new_listened_num++;
+ }
+ list_free_ext(elemlist);
+ pfree(rawstring);
+
+ /* Then we check socket according to rules and mark it to 'false', which means it should not be closed */
+ for (i = 0; i < MAXLISTEN; i++) {
+ int sock = g_instance.listen_cxt.ListenSocket[i];
+ if (sock == PGINVALID_SOCKET) {
+ continue;
+ }
+
+ if (!t_thrd.postmaster_cxt.all_listen_addr_can_stop[i]) {
+ continue;
+ }
+ /* If socket is ext_listen_addresses, should not close it */
+ if (is_ext_listen_addresses) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ continue;
+ }
+ /*
+ * But if socket is dolphin listen address, we will close it. Dolphin socket is listened with
+ * , so we will close this socket if listen_addresses changed.
+ */
+ if (is_dolphin_listen_addresses) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = true;
+ continue;
+ }
+ struct sockaddr saddr;
+ if (!get_addr_from_socket(sock, &saddr)) {
+ continue;
+ }
+ char sock_ip[IP_LEN] = {0};
+ int port = 0;
+
+ int family = get_ip_port_from_addr(sock_ip, &port, saddr);
+ if (family == -1) {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]inet_net_ntop get invalid socket family, sock %d", sock)));
+ } else if (family == AF_UNIX) {
+ continue;
+ }
+ /* listen_addresses only listen port and port + 1. Other port number should not be closed */
+ if (port != g_instance.attr.attr_network.PostPortNumber && port != g_instance.attr.attr_network.PoolerPort) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ continue;
+ }
+ /* If listen_addresses includes '*', we will close repl socket and rebuild */
+ if (!haswildcard && (cmp_ip_with_replication(sock_ip, port, t_thrd.postmaster_cxt.ReplConnArray, family) ||
+ cmp_ip_with_replication(sock_ip, port, t_thrd.postmaster_cxt.CrossClusterReplConnArray, family))) {
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ }
+ }
+
+ /* finally, we should refresh LocalAddrList */
+ for (i = 0; i < t_thrd.postmaster_cxt.LocalIpNum; i++) {
+ for (j = 0; j < MAXLISTEN; j++) {
+ int sock = g_instance.listen_cxt.ListenSocket[j];
+ if (sock == PGINVALID_SOCKET || t_thrd.postmaster_cxt.all_listen_addr_can_stop[j]) {
+ continue;
+ }
+ struct sockaddr saddr;
+ if (!get_addr_from_socket(sock, &saddr)) {
+ continue;
+ }
+ char sock_ip[IP_LEN] = {0};
+ int port = 0;
+
+ int family = get_ip_port_from_addr(sock_ip, &port, saddr);
+ if (family == -1) {
+ ereport(ERROR, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]inet_net_ntop get invalid socket family, sock %d", sock)));
+ } else if (family == AF_UNIX) {
+ continue;
+ }
+ if (strcmp(sock_ip, t_thrd.postmaster_cxt.LocalAddrList[i]) == 0) {
+ t_thrd.postmaster_cxt.local_listen_addr_can_stop[i] = false;
+ }
+ }
+ }
+ reload_listen_addresses_err_info();
+ /* resort global and local listen_addr_list */
+ for (i = 0, j = 0, k = 0; i < MAXLISTEN; i++) {
+ if (g_instance.listen_cxt.ListenSocket[i] == PGINVALID_SOCKET) {
+ continue;
+ }
+ /*
+ * [j] indicates the first position of all socket list which can be closed.
+ * If listen addr could not stop, we should reserve it and swap it with [j], and move j backward.
+ */
+ if (!t_thrd.postmaster_cxt.all_listen_addr_can_stop[i]) {
+ if (i > j) {
+ swap_pointer_context(g_instance.listen_cxt.all_listen_addr_list[i], IP_LEN,
+ g_instance.listen_cxt.all_listen_addr_list[j], IP_LEN);
+ swap_pointer_context(&g_instance.listen_cxt.all_listen_port_list[i], sizeof(int),
+ &g_instance.listen_cxt.all_listen_port_list[j], sizeof(int));
+ swap_pointer_context(&g_instance.listen_cxt.ListenSocket[i], sizeof(int),
+ &g_instance.listen_cxt.ListenSocket[j], sizeof(int));
+ swap_pointer_context(&g_instance.listen_cxt.listen_sock_type[i], sizeof(int),
+ &g_instance.listen_cxt.listen_sock_type[j], sizeof(int));
+ swap_pointer_context(&t_thrd.postmaster_cxt.all_listen_addr_can_stop[i], sizeof(bool),
+ &t_thrd.postmaster_cxt.all_listen_addr_can_stop[j], sizeof(bool));
+ swap_pointer_context(&g_instance.listen_cxt.listen_chanel_type[i], sizeof(int),
+ &g_instance.listen_cxt.listen_chanel_type[j], sizeof(int));
+ }
+ j++;
+ }
+ /*
+ * [k] indicates the first position of local addr list which can be removed.
+ * If listen addr could not stop, we should reserve it and swap it with [k], and move k backward.
+ */
+ if (!t_thrd.postmaster_cxt.local_listen_addr_can_stop[i]) {
+ if (i > k) {
+ swap_pointer_context(t_thrd.postmaster_cxt.LocalAddrList[i], IP_LEN,
+ t_thrd.postmaster_cxt.LocalAddrList[k], IP_LEN);
+ swap_pointer_context(&t_thrd.postmaster_cxt.local_listen_addr_can_stop[i], sizeof(bool),
+ &t_thrd.postmaster_cxt.local_listen_addr_can_stop[k], sizeof(bool));
+ }
+ k++;
+ }
+ }
+
+ t_thrd.postmaster_cxt.LocalIpNum = k;
+ for (i = k; i < MAXLISTEN; i++) {
+ if (g_instance.listen_cxt.ListenSocket[i] == PGINVALID_SOCKET) {
+ continue;
+ }
+ rc = memset_s(t_thrd.postmaster_cxt.LocalAddrList[i], IP_LEN, '\0', IP_LEN);
+ securec_check(rc, "", "");
+ }
+
+ /* shutdown socket which is not in listen_addresses */
+ for (i = j; i < MAXLISTEN; i++) {
+ if (g_instance.listen_cxt.ListenSocket[i] == PGINVALID_SOCKET) {
+ continue;
+ }
+ rc = memset_s(g_instance.listen_cxt.all_listen_addr_list[i], IP_LEN, '\0', IP_LEN);
+ securec_check(rc, "", "");
+ StreamClose(g_instance.listen_cxt.ListenSocket[i]);
+ g_instance.listen_cxt.ListenSocket[i] = PGINVALID_SOCKET;
+ g_instance.listen_cxt.listen_sock_type[i] = UNUSED_LISTEN_SOCKET;
+ g_instance.listen_cxt.all_listen_port_list[i] = -1;
+ }
+
+ /* listen socket for new IP */
+ for (i = 0; i < new_listened_num; i++) {
+ char* curhost = new_listened_list[i];
+ ereport(DEBUG5, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]rebuild listen IP: %s, listen type %d",
+ curhost,
+ new_listened_list_type[i])));
+ if (rebuild_post_port_listen) {
+ if (strcmp(curhost, "*") == 0)
+ status = StreamServerPort(AF_UNSPEC,
+ NULL,
+ (unsigned short)g_instance.attr.attr_network.PostPortNumber,
+ g_instance.attr.attr_network.UnixSocketDir,
+ g_instance.listen_cxt.ListenSocket,
+ MAXLISTEN,
+ true,
+ true,
+ false,
+ NORMAL_LISTEN_CHANEL);
+ else
+ status = StreamServerPort(AF_UNSPEC,
+ curhost,
+ (unsigned short)g_instance.attr.attr_network.PostPortNumber,
+ g_instance.attr.attr_network.UnixSocketDir,
+ g_instance.listen_cxt.ListenSocket,
+ MAXLISTEN,
+ true,
+ true,
+ false,
+ NORMAL_LISTEN_CHANEL);
+ if (status != STATUS_OK) {
+ print_port_info();
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]could not create listen socket for \"%s:%d\"",
+ curhost,
+ g_instance.attr.attr_network.PostPortNumber)));
+ }
+ }
+
+ if (rebuild_pooler_port_listen) {
+ if (strcmp(curhost, "*") == 0) {
+ status = StreamServerPort(AF_UNSPEC,
+ NULL,
+ (unsigned short)g_instance.attr.attr_network.PoolerPort,
+ g_instance.attr.attr_network.UnixSocketDir,
+ g_instance.listen_cxt.ListenSocket,
+ MAXLISTEN,
+ false,
+ false,
+ false,
+ NORMAL_LISTEN_CHANEL);
+ } else {
+ status = StreamServerPort(AF_UNSPEC,
+ curhost,
+ (unsigned short)g_instance.attr.attr_network.PoolerPort,
+ g_instance.attr.attr_network.UnixSocketDir,
+ g_instance.listen_cxt.ListenSocket,
+ MAXLISTEN,
+ false,
+ false,
+ false,
+ NORMAL_LISTEN_CHANEL);
+ }
+
+ if (status != STATUS_OK)
+ ereport(WARNING, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("[reload listen IP]could not create ha listen socket for \"%s:%d\"",
+ curhost,
+ g_instance.attr.attr_network.PoolerPort)));
+ }
+ }
+ reload_listen_addresses_err_info();
+
+ /* Add ip to postmaster.pid */
+ ha_listen_and_refresh_conf(true);
+}
+#endif
+
/*
* Postmaster main entry point
*/
@@ -1193,7 +1811,6 @@ int PostmasterMain(int argc, char* argv[])
int status = STATUS_OK;
char* output_config_variable = NULL;
char* userDoption = NULL;
- bool listen_addr_saved = false;
int use_pooler_port = -1;
int i;
OptParseContext optCtxt;
@@ -1793,26 +2410,54 @@ int PostmasterMain(int argc, char* argv[])
process_shared_preload_libraries();
/*
- * Establish input sockets.
- */
- for (i = 0; i < MAXLISTEN; i++)
- g_instance.listen_cxt.ListenSocket[i] = PGINVALID_SOCKET;
+ * Load configuration files for client authentication.
+ * Load pg_hba.conf before communication thread.
+ * We will check whether listen_addresses IP is internal of not. If it is, then write this IP to postmaster.pid.
+ * So load_hba() should be before than AddToDataDirLockFile().
+ */
+ int loadhbaCount = 0;
+ while (!load_hba()) {
+ check_old_hba(true);
+ loadhbaCount++;
+ if (loadhbaCount >= 3) {
+ /*
+ * It makes no sense to continue if we fail to load the HBA file,
+ * since there is no way to connect to the database in this case.
+ */
+ ereport(FATAL, (errmsg("could not load pg_hba.conf")));
+ }
+ pg_usleep(200000L); /* sleep 200ms for reload next time */
+ }
- if (g_instance.attr.attr_network.ListenAddresses && !dummyStandbyMode) {
+ /*
+ * Establish input sockets.
+ */
+ for (i = 0; i < MAXLISTEN; i++) {
+ g_instance.listen_cxt.ListenSocket[i] = PGINVALID_SOCKET;
+ t_thrd.postmaster_cxt.all_listen_addr_can_stop[i] = false;
+ }
+
+ char *listen_addresses =
+#ifdef ENABLE_MULTIPLE_NODES
+ g_instance.attr.attr_network.ListenAddresses;
+#else
+ u_sess->attr.attr_network.ListenAddresses;
+#endif
+ if (listen_addresses && !dummyStandbyMode) {
char* rawstring = NULL;
List* elemlist = NULL;
ListCell* l = NULL;
int success = 0;
/*
- * start commproxy if needed
- */
+ * start commproxy if needed
+ */
if (CommProxyNeedSetup()) {
CommProxyStartUp();
}
- /* Need a modifiable copy of g_instance.attr.attr_network.ListenAddresses */
- rawstring = pstrdup(g_instance.attr.attr_network.ListenAddresses);
+ /* Need a modifiable copy of listen_addresses */
+ rawstring = pstrdup(listen_addresses);
/* Parse string into list of identifiers */
if (!SplitIdentifierString(rawstring, ',', &elemlist)) {
@@ -1832,7 +2477,7 @@ int PostmasterMain(int argc, char* argv[])
if (haswildcard == true) {
char *wildcard = "*";
- elemlist = list_cell_clear(elemlist, (void *)wildcard, isNotWildcard);
+ elemlist = list_cell_clear(elemlist, (void *)wildcard, is_not_wildcard);
}
foreach (l, elemlist) {
@@ -1847,7 +2492,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
true,
true,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
else
status = StreamServerPort(AF_UNSPEC,
curhost,
@@ -1857,7 +2503,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
true,
true,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
if (status == STATUS_OK)
success++;
@@ -1886,7 +2533,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
false,
false,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
} else {
status = StreamServerPort(AF_UNSPEC,
curhost,
@@ -1896,7 +2544,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
false,
false,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
}
if (status != STATUS_OK)
@@ -1904,15 +2553,6 @@ int PostmasterMain(int argc, char* argv[])
(errmsg("could not create ha listen socket for \"%s:%d\"",
curhost,
g_instance.attr.attr_network.PoolerPort)));
-
- /*
- * Record the first successful host addr which does not mean 'localhost' in lockfile.
- * Inner maintanence tools, such as cm_agent and gs_ctl, will use that host for connecting cn.
- */
- if (!listen_addr_saved && !IsInplicitIp(curhost)) {
- AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, curhost);
- listen_addr_saved = true;
- }
}
}
@@ -1923,12 +2563,8 @@ int PostmasterMain(int argc, char* argv[])
pfree(rawstring);
}
- if (t_thrd.xlog_cxt.server_mode != NORMAL_MODE) {
- SetListenSocket(t_thrd.postmaster_cxt.ReplConnArray, &listen_addr_saved);
- ReportResumeAbnormalDataHAInstListeningSocket();
- }
- SetListenSocket(t_thrd.postmaster_cxt.CrossClusterReplConnArray, &listen_addr_saved);
- ReportResumeAbnormalDataHAInstListeningSocket();
+ /* Listen ha IP and refresh postmaster.pid */
+ ha_listen_and_refresh_conf(false);
#ifdef USE_BONJOUR
/* Register for Bonjour only if we opened TCP socket(s) */
@@ -1979,7 +2615,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
false,
true,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
if (status != STATUS_OK)
ereport(FATAL,
@@ -1996,7 +2633,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
false,
false,
- false);
+ false,
+ NORMAL_LISTEN_CHANEL);
if (status != STATUS_OK)
ereport(FATAL,
@@ -2020,7 +2658,8 @@ int PostmasterMain(int argc, char* argv[])
MAXLISTEN,
false,
true,
- true);
+ true,
+ NORMAL_LISTEN_CHANEL);
if (status != STATUS_OK)
ereport(WARNING, (errmsg("could not create Unix-domain for comm socket")));
@@ -2043,16 +2682,6 @@ int PostmasterMain(int argc, char* argv[])
*/
on_proc_exit(CloseServerPorts, 0);
- /*
- * If no valid TCP ports, write an empty line for listen address,
- * indicating the Unix socket must be used. Note that this line is not
- * added to the lock file until there is a socket backing it.
- */
- if (!listen_addr_saved) {
- AddToDataDirLockFile(LOCK_FILE_LINE_LISTEN_ADDR, "");
- ereport(WARNING, (errmsg("No explicit IP is configured for listen_addresses GUC.")));
- }
-
if (g_instance.attr.attr_common.enable_thread_pool) {
/* No need to start thread pool for dummy standby node. */
if (!dummyStandbyMode) {
@@ -2309,24 +2938,6 @@ int PostmasterMain(int argc, char* argv[])
/* init the usedDnSpace hash table */
InitDnHashTable();
- /*
- * Load configuration files for client authentication.
- * Load pg_hba.conf before communication thread.
- */
- int loadhbaCount = 0;
- while (!load_hba()) {
- check_old_hba(true);
- loadhbaCount++;
- if (loadhbaCount >= 3) {
- /*
- * It makes no sense to continue if we fail to load the HBA file,
- * since there is no way to connect to the database in this case.
- */
- ereport(FATAL, (errmsg("could not load pg_hba.conf")));
- }
- pg_usleep(200000L); /* sleep 200ms for reload next time */
- }
-
if (ENABLE_THREAD_POOL_DN_LOGICCONN) {
InitCommLogicResource();
}
@@ -3009,6 +3620,8 @@ static int ServerLoop(void)
}
ereport(LOG, (errmsg("create thread end!")));
+ /* Only after postmaster_main thread starting completed, can reload listen_addresses */
+ t_thrd.postmaster_cxt.can_listen_addresses_reload = true;
for (;;) {
fd_set rmask;
int selres;
@@ -3023,14 +3636,29 @@ static int ServerLoop(void)
#endif
}
- if (g_instance.listen_cxt.reload_fds) {
+#ifndef ENABLE_MULTIPLE_NODES
+ uint32 reload_begin = 0;
+ uint32 reload_end = 1;
+ if (g_instance.listen_cxt.reload_fds || t_thrd.postmaster_cxt.is_listen_addresses_reload) {
+ if (pg_atomic_compare_exchange_u32(&g_instance.listen_cxt.is_reloading_listen_socket, &reload_begin, 1)) {
+ if (t_thrd.postmaster_cxt.is_listen_addresses_reload) {
+ t_thrd.postmaster_cxt.is_listen_addresses_reload = false;
+ rebuild_listen_address_socket();
+ }
+
+ /* rebuild poll fd with new ListenSocket list */
#ifdef HAVE_POLL
- nSockets = initPollfd(ufds);
+ nSockets = initPollfd(ufds);
#else
- nSockets = initMasks(&readmask);
+ nSockets = initMasks(&readmask);
#endif
- g_instance.listen_cxt.reload_fds = false;
+ g_instance.listen_cxt.reload_fds = false;
+ (void)pg_atomic_compare_exchange_u32(&g_instance.listen_cxt.is_reloading_listen_socket, &reload_end, 0);
+ } else {
+ ereport(WARNING, (errmsg("PostMaster Main is reloading listen socket, please try again")));
+ }
}
+#endif
/*
* Wait for a connection request to arrive.
@@ -4054,6 +4682,7 @@ int ProcessStartupPacket(Port* port, bool SSLdone)
} else if (strcmp(valptr, "cm_agent") == 0) {
/* mark remote as cm_agent */
clientIsCmAgent = true;
+ u_sess->proc_cxt.clientIsCMAgent = true;
u_sess->libpq_cxt.IsConnFromCmAgent = true;
ereport(DEBUG5, (errmsg("cm_agent connected")));
} else if (strcmp(valptr, "gs_clean") == 0) {
@@ -4084,6 +4713,7 @@ int ProcessStartupPacket(Port* port, bool SSLdone)
} else if (strcmp(valptr, "gsql") == 0) {
/* mark remote as gsql */
clientIsGsql = true;
+ u_sess->proc_cxt.clientIsGsql = true;
ereport(DEBUG5, (errmsg("gsql connected")));
} else if (strcmp(valptr, "OM") == 0) {
clientIsOM = true;
@@ -8469,17 +9099,17 @@ void PortInitialize(Port* port, knl_thread_arg* arg)
#endif
}
-void CheckClientIp(Port* port)
+/* Check whether the client ip is configured in pg_hba.conf */
+bool CheckClientIp(Port* port)
{
- /* Check whether the client ip is configured in pg_hba.conf */
char ip[IP_LEN] = {'\0'};
if (!check_ip_whitelist(port, ip, IP_LEN)) {
- port->protocol_config->fn_init(); /* initialize libpq to talk to client */
- t_thrd.postgres_cxt.whereToSendOutput = DestRemote; /* now safe to ereport to client */
- ereport(FATAL,
+ ereport(LOG,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\".", ip)));
+ errmsg("no pg_hba.conf entry for host \"%s\".", ip))); /* can not get port->user_name now */
+ return false;
}
+ return true;
}
void initRandomState(TimestampTz start_time, TimestampTz stop_time)
@@ -9201,6 +9831,11 @@ static void sigusr1_handler(SIGNAL_ARGS)
/* promote cascade standby */
if (IsCascadeStandby()) {
t_thrd.xlog_cxt.is_cascade_standby = false;
+ /*
+ * When the server is started in pending mode and notified as cascade standby,
+ * we should set server mode to standby mode before promoting.
+ */
+ t_thrd.xlog_cxt.server_mode = STANDBY_MODE;
if (t_thrd.postmaster_cxt.HaShmData->is_cross_region) {
t_thrd.xlog_cxt.is_hadr_main_standby = true;
}
@@ -10609,6 +11244,10 @@ static bool save_backend_variables(BackendParameters* param, Port* port, HANDLE
param->mainLWLockArray = (LWLock*)t_thrd.shemem_ptr_cxt.mainLWLockArray;
param->PMSignalState = t_thrd.shemem_ptr_cxt.PMSignalState;
+ param->LocalIpNum = t_thrd.postmaster_cxt.LocalIpNum;
+ int rc =
+ memcpy_s(param->LocalAddrList, (MAXLISTEN * IP_LEN), t_thrd.postmaster_cxt.LocalAddrList, (MAXLISTEN * IP_LEN));
+ securec_check(rc, "", "");
param->HaShmData = t_thrd.postmaster_cxt.HaShmData;
param->PgStartTime = t_thrd.time_cxt.pg_start_time;
@@ -10760,6 +11399,10 @@ static void restore_backend_variables(BackendParameters* param, Port* port)
t_thrd.shemem_ptr_cxt.mainLWLockArray = (LWLockPadded*)param->mainLWLockArray;
t_thrd.shemem_ptr_cxt.PMSignalState = param->PMSignalState;
+ t_thrd.postmaster_cxt.LocalIpNum = param->LocalIpNum;
+ rc =
+ memcpy_s(t_thrd.postmaster_cxt.LocalAddrList, (MAXLISTEN * IP_LEN), param->LocalAddrList, (MAXLISTEN * IP_LEN));
+ securec_check(rc, "", "");
t_thrd.postmaster_cxt.HaShmData = param->HaShmData;
t_thrd.time_cxt.pg_start_time = param->PgStartTime;
t_thrd.time_cxt.pg_reload_time = param->PgReloadTime;
@@ -11082,10 +11725,10 @@ bool IsLocalAddr(Port* port)
if (AF_UNIX == laddr->sa_family) {
return true;
}
- for (i = 0; i != g_instance.listen_cxt.LocalIpNum; ++i) {
- if (0 == strcmp(local_ip, g_instance.listen_cxt.LocalAddrList[i]) ||
- (AF_INET == laddr->sa_family && 0 == strcmp("0.0.0.0", g_instance.listen_cxt.LocalAddrList[i])) ||
- (AF_INET6 == laddr->sa_family && 0 == strcmp("::", g_instance.listen_cxt.LocalAddrList[i]))) {
+ for (i = 0; i != t_thrd.postmaster_cxt.LocalIpNum; ++i) {
+ if (0 == strcmp(local_ip, t_thrd.postmaster_cxt.LocalAddrList[i]) ||
+ (AF_INET == laddr->sa_family && 0 == strcmp("0.0.0.0", t_thrd.postmaster_cxt.LocalAddrList[i])) ||
+ (AF_INET6 == laddr->sa_family && 0 == strcmp("::", t_thrd.postmaster_cxt.LocalAddrList[i]))) {
return true;
}
@@ -11093,10 +11736,15 @@ bool IsLocalAddr(Port* port)
if (NULL == result && laddr->sa_family != AF_UNSPEC) {
ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT)));
}
+ for (i = 0; i != t_thrd.postmaster_cxt.LocalIpNum; ++i) {
+ ereport(DEBUG1, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("LocalAddrIP %s, local_ip %s", t_thrd.postmaster_cxt.LocalAddrList[i], local_ip)));
+ }
return false;
#else
- for (i = 0; i != g_instance.listen_cxt.LocalIpNum; ++i) {
- ereport(DEBUG1, (errmsg("LocalAddrIP %s\n", g_instance.listen_cxt.LocalAddrList[i])));
+ for (i = 0; i != t_thrd.postmaster_cxt.LocalIpNum; ++i) {
+ ereport(DEBUG1, (errmodule(MOD_COMM_FRAMEWORK),
+ errmsg("LocalAddrIP %s, local_ip %s", t_thrd.postmaster_cxt.LocalAddrList[i], local_ip)));
}
return true;
#endif
@@ -11279,6 +11927,7 @@ static bool IsLocalPort(Port* port)
if (sockport == g_instance.attr.attr_network.PostPortNumber) {
return true;
}
+ ereport(LOG, (errmsg("LocalAddrPort %d", sockport)));
return false;
}
@@ -11379,6 +12028,11 @@ static bool IsAlreadyListen(const char* ip, int port)
return true;
}
+ if (strcmp(ip, LOCAL_HOST) == 0 &&
+ (strcmp(sock_ip, LOOP_IPV6_IP) == 0 || strcmp(ip, sock_ip) == 0)) {
+ return true;
+ }
+
if ((strcmp(sock_ip, "::") == 0) && (port == ntohs(((struct sockaddr_in6 *) &saddr)->sin6_port))) {
return true;
}
@@ -11387,6 +12041,11 @@ static bool IsAlreadyListen(const char* ip, int port)
return true;
}
+ if (strcmp(ip, LOCAL_HOST) == 0 &&
+ (strcmp(sock_ip, LOOP_IP_STRING) == 0 || strcmp(ip, sock_ip) == 0)) {
+ return true;
+ }
+
if ((strcmp(sock_ip, "0.0.0.0") == 0) && (port == ntohs(((struct sockaddr_in *) &saddr)->sin_port))) {
return true;
}
@@ -11481,7 +12140,8 @@ bool CheckSockAddr(struct sockaddr* sock_addr, const char* szIP, int port)
* According to createmode, create the listen socket
*/
void CreateServerSocket(
- char* ipaddr, int portNumber, int enCreatemode, int* success, bool add_localaddr_flag, bool is_create_psql_sock)
+ char* ipaddr, int portNumber, int enCreatemode, int* success, bool add_localaddr_flag, bool is_create_psql_sock,
+ ListenChanelType channel_type)
{
int status = 0;
int successCount = 0;
@@ -11505,7 +12165,8 @@ void CreateServerSocket(
MAXLISTEN,
add_localaddr_flag,
is_create_psql_sock,
- false);
+ false,
+ channel_type);
} else {
status = StreamServerPort(AF_UNSPEC,
ipaddr,
@@ -11515,7 +12176,8 @@ void CreateServerSocket(
MAXLISTEN,
add_localaddr_flag,
is_create_psql_sock,
- false);
+ false,
+ channel_type);
}
if (status == STATUS_OK) {
successCount++;
@@ -11642,7 +12304,8 @@ static void CreateHaListenSocket(void)
(int)newListenAddrs.lsnArray[i].createmodel,
&success,
false,
- false);
+ false,
+ REPL_LISTEN_CHANEL);
}
if (0 == success) {
@@ -12863,7 +13526,10 @@ int GaussDbThreadMain(knl_thread_arg* arg)
proc_exit(StreamMain());
} break;
case WORKER:
- CheckClientIp(&port); /* For THREADPOOL_WORKER check in InitPort */
+ /* For THREADPOOL_WORKER check in InitPort */
+ if (!CheckClientIp(&port)) {
+ proc_exit(0);
+ }
/* fall through */
case THREADPOOL_WORKER: {
/* Module load callback */
@@ -13467,8 +14133,8 @@ static ThreadMetaData GaussdbThreadGate[] = {
{ GaussDbThreadMain, COMM_RECEIVER, "COMMrecloop", "communicator receiver loop" },
{ GaussDbThreadMain, COMM_AUXILIARY, "COMMaux", "communicator auxiliary" },
{ GaussDbThreadMain, COMM_POOLER_CLEAN, "COMMpoolcleaner", "communicator pooler auto cleaner" },
- { GaussDbThreadMain, LOGICAL_READ_RECORD, "LogicalRead", "LogicalRead pooler auto cleaner" },
- { GaussDbThreadMain, PARALLEL_DECODE, "COMMpoolcleaner", "communicator pooler auto cleaner" },
+ { GaussDbThreadMain, LOGICAL_READ_RECORD, "LogicalReader", "logical reader" },
+ { GaussDbThreadMain, PARALLEL_DECODE, "LogicalDecoder", "logical decoder" },
{ GaussDbThreadMain, UNDO_RECYCLER, "undorecycler", "undo recycler" },
{ GaussDbThreadMain, UNDO_LAUNCHER, "asyncundolaunch", "async undo launcher" },
{ GaussDbThreadMain, UNDO_WORKER, "asyncundoworker", "async undo worker" },
diff --git a/src/gausskernel/process/stream/streamCore.cpp b/src/gausskernel/process/stream/streamCore.cpp
index 9d3fd2278..9736c8b7d 100755
--- a/src/gausskernel/process/stream/streamCore.cpp
+++ b/src/gausskernel/process/stream/streamCore.cpp
@@ -919,7 +919,7 @@ void StreamNodeGroup::destroy(StreamObjStatus status)
if (u_sess->stream_cxt.global_obj != NULL) {
#ifndef ENABLE_MULTIPLE_NODES
if (u_sess->stream_cxt.global_obj->m_portal != NULL) {
- u_sess->stream_cxt.global_obj->m_portal->streamInfo.streamGroup = NULL;
+ u_sess->stream_cxt.global_obj->m_portal->streamInfo.Reset();
}
#endif
u_sess->stream_cxt.global_obj->deInit(status);
@@ -952,6 +952,15 @@ void StreamNodeGroup::syncQuit(StreamObjStatus status)
u_sess->stream_cxt.enter_sync_point == true)
return;
+ /* add trace info while smp not correct. */
+ if (t_thrd.log_cxt.errordata_stack_depth == (ERRORDATA_STACK_SIZE - 1) && StreamTopConsumerAmI()) {
+ if (u_sess->stream_cxt.global_obj != NULL) {
+ ereport(LOG, (errmsg("[StreamSyncQuit] global_obj: %lu, runtime_mem_cxt: %lu",
+ (uint64)u_sess->stream_cxt.global_obj, (uint64)u_sess->stream_cxt.stream_runtime_mem_cxt)));
+ return;
+ }
+ }
+
/* We must relase all pthread mutex by my thread, Or it will dead lock. But it is not a good solution. */
// lock the same thread mutex can't be conflict in one thread.
ResourceOwnerReleaseAllXactPthreadMutex();
@@ -1012,14 +1021,14 @@ void StreamNodeGroup::syncQuit(StreamObjStatus status)
pgstat_report_waitstatus(oldStatus);
}
-void StreamNodeGroup::ReleaseStreamGroup(bool resetSession)
+void StreamNodeGroup::ReleaseStreamGroup(bool resetSession, StreamObjStatus status)
{
if (u_sess->stream_cxt.global_obj != NULL) {
StreamTopConsumerIam();
/* Set sync point for waiting all stream threads complete. */
- StreamNodeGroup::syncQuit(STREAM_COMPLETE);
+ StreamNodeGroup::syncQuit(status);
UnRegisterStreamSnapshots();
- StreamNodeGroup::destroy(STREAM_COMPLETE);
+ StreamNodeGroup::destroy(status);
if (!resetSession) {
/* reset some flag related to stream */
ResetStreamEnv();
diff --git a/src/gausskernel/process/stream/streamProducer.cpp b/src/gausskernel/process/stream/streamProducer.cpp
old mode 100644
new mode 100755
index 2c4006924..2e4fbd7cb
--- a/src/gausskernel/process/stream/streamProducer.cpp
+++ b/src/gausskernel/process/stream/streamProducer.cpp
@@ -222,7 +222,16 @@ void StreamProducer::init(TupleDesc desc, StreamTxnContext txnCxt, ParamListInfo
m_sliceBoundary = m_consumerNodes->boundaries;
m_parentPlanNodeId = parentPlanNodeId;
m_desc = CreateTupleDescCopyConstr(desc);
+#ifdef ENABLE_MULTIPLE_NODES
m_params = params;
+#else
+ /* smp in producer should copy paramlist to avoid main thread exit that memory not available. */
+ if (u_sess->SPI_cxt._connected >= 0) {
+ m_params = copyParamList(params);
+ } else {
+ m_params = params;
+ }
+#endif
m_streamTxnCxt = txnCxt;
m_streamTxnCxt.CurrentTransactionState =
diff --git a/src/gausskernel/process/tcop/auditfuncs.cpp b/src/gausskernel/process/tcop/auditfuncs.cpp
index 14c016833..ab1366c1a 100644
--- a/src/gausskernel/process/tcop/auditfuncs.cpp
+++ b/src/gausskernel/process/tcop/auditfuncs.cpp
@@ -33,6 +33,24 @@
#include "auditfuncs.h"
#include "utils/elog.h"
#include "libpq/libpq-be.h"
+#include "utils/builtins.h"
+#include "miscadmin.h"
+#include "utils/rangetypes.h"
+#include "utils/inet.h"
+#include "fmgr.h"
+#include "utils/nabstime.h"
+#include "access/tupmacs.h"
+#include "utils/fmgrtab.h"
+#include "lib/stringinfo.h"
+#include "utils/cash.h"
+#include "utils/lsyscache.h"
+#include "catalog/namespace.h"
+#include "utils/uuid.h"
+#include "utils/builtins.h"
+#include "utils/date.h"
+#include "utils/numeric.h"
+#include "utils/numeric_gs.h"
+#include "workload/cpwlm.h"
#define AUDIT_BUFFERSIZ 512
@@ -68,6 +86,7 @@ static void pgaudit_ddl_trigger(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_user(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_view(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_matview(const char* objectname, const char* cmdtext);
+static void pgaudit_ddl_event(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_function(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_package(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext);
@@ -92,6 +111,12 @@ static void pgaudit_ddl_synonym(const char* objectName, const char* cmdText);
static void pgaudit_ddl_textsearch(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_publication_subscription(const char* objectname, const char* cmdtext);
static void pgaudit_ddl_fdw(const char* objectname, const char* cmdtext);
+static char* audit_get_func_args(FunctionCallInfo fcinfo);
+static char* audit_get_text_array_value(ArrayType* array, int* numitems);
+
+#define BUF_LENGTH 64
+#define PG_GETARG_COMMANDID(n) DatumGetCommandId(PG_GETARG_DATUM(n))
+#define SYSTEM_FUNC_DEFAULT_VALUE "NULL"
static const AuditFuncMap g_auditFuncMap[] = {
{OBJECT_SCHEMA, pgaudit_ddl_schema},
@@ -120,7 +145,8 @@ static const AuditFuncMap g_auditFuncMap[] = {
{OBJECT_TSCONFIGURATION, pgaudit_ddl_textsearch},
{OBJECT_PUBLICATION, pgaudit_ddl_publication_subscription},
{OBJECT_SUBSCRIPTION, pgaudit_ddl_publication_subscription},
- {OBJECT_FDW, pgaudit_ddl_fdw}
+ {OBJECT_FDW, pgaudit_ddl_fdw},
+ {OBJECT_EVENT, pgaudit_ddl_event}
};
static const int g_auditFuncMapNum = sizeof(g_auditFuncMap) / sizeof(AuditFuncMap);
@@ -360,6 +386,7 @@ static void pgaudit_ddl_database_object(
case AUDIT_DDL_TRIGGER:
case AUDIT_DDL_USER:
case AUDIT_DDL_VIEW:
+ case AUDIT_DDL_EVENT:
case AUDIT_DDL_RESOURCEPOOL:
case AUDIT_DDL_GLOBALCONFIG:
case AUDIT_DDL_WORKLOAD:
@@ -400,8 +427,10 @@ void pgaudit_dml_table(const char* objectname, const char* cmdtext)
AuditResult audit_result = AUDIT_OK;
char* mask_string = NULL;
Assert(cmdtext);
- if (u_sess->attr.attr_security.Audit_DML == 0)
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if (u_sess->attr.attr_security.Audit_DML == 0 && !is_full_audit_user) {
return;
+ }
mask_string = maskPassword(cmdtext);
if (mask_string == NULL)
@@ -420,9 +449,10 @@ void pgaudit_dml_table_select(const char* objectname, const char* cmdtext)
AuditType audit_type = AUDIT_DML_ACTION_SELECT;
AuditResult audit_result = AUDIT_OK;
char* mask_string = NULL;
- Assert(cmdtext);
- if (u_sess->attr.attr_security.Audit_DML_SELECT == 0)
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if (u_sess->attr.attr_security.Audit_DML_SELECT == 0 && !is_full_audit_user) {
return;
+ }
mask_string = maskPassword(cmdtext);
if (mask_string == NULL)
@@ -532,6 +562,20 @@ static void pgaudit_ddl_view(const char* objectname, const char* cmdtext)
return;
}
+static void pgaudit_ddl_event(const char* objectname, const char* cmdtext)
+{
+ AuditType audit_type = AUDIT_DDL_EVENT;
+ AuditResult audit_result = AUDIT_OK;
+
+ Assert(cmdtext != NULL);
+ if (!CHECK_AUDIT_DDL(DDL_EVENT)) {
+ return;
+ }
+
+ pgaudit_ddl_database_object(audit_type, audit_result, objectname, cmdtext);
+ return;
+}
+
/*
* Brief : pgaudit_ddl_matview(char* objectname, const char* cmdtext)
* Description : Audit the operations of matview
@@ -633,7 +677,7 @@ static void pgaudit_grant_or_revoke_role(bool isgrant, const char* objectname, c
char* mask_string = NULL;
Assert(cmdtext != NULL);
- if (u_sess->attr.attr_security.Audit_PrivilegeAdmin == 0) {
+ if (u_sess->attr.attr_security.Audit_PrivilegeAdmin == 0 && !audit_check_full_audit_user()) {
return;
}
mask_string = maskPassword(cmdtext);
@@ -1034,8 +1078,9 @@ static void pgaudit_process_set_parameter(const char* objectname, const char* cm
char* mask_string = NULL;
Assert(cmdtext != NULL);
- if (u_sess->attr.attr_security.Audit_Set == 0)
+ if (u_sess->attr.attr_security.Audit_Set == 0 && !audit_check_full_audit_user()) {
return;
+ }
/* make the cmdtext which may contain senstive info like password. */
mask_string = maskPassword(cmdtext);
@@ -1077,6 +1122,11 @@ static void pgaudit_process_drop_objects(Node* node, const char* querystring)
objectname = rel->relname;
pgaudit_ddl_view(objectname, querystring);
} break;
+ case OBJECT_EVENT: {
+ rel = makeRangeVarFromNameList(names);
+ objectname = rel->relname;
+ pgaudit_ddl_view(objectname, querystring);
+ } break;
case OBJECT_MATVIEW: {
rel = makeRangeVarFromNameList(names);
objectname = rel->relname;
@@ -1474,6 +1524,18 @@ static void pgaudit_ProcessUtility(processutility_context* processutility_cxt,
ViewStmt* viewstmt = (ViewStmt*)(parsetree);
pgaudit_ddl_view(viewstmt->view->relname, queryString);
} break;
+ case T_CreateEventStmt: {
+ CreateEventStmt* eventstmt = (CreateEventStmt*)(parsetree);
+ pgaudit_ddl_event(eventstmt->event_name->relname, queryString);
+ } break;
+ case T_AlterEventStmt: {
+ AlterEventStmt* eventstmt = (AlterEventStmt*)(parsetree);
+ pgaudit_ddl_event(eventstmt->event_name->relname, queryString);
+ } break;
+ case T_DropEventStmt: {
+ DropEventStmt* eventstmt = (DropEventStmt*)(parsetree);
+ pgaudit_ddl_event(eventstmt->event_name->relname, queryString);
+ } break;
case T_CreateEnumStmt: {
CreateEnumStmt* enumstmt = (CreateEnumStmt*)(parsetree);
@@ -1852,18 +1914,19 @@ static void pgaudit_ExecutorEnd(QueryDesc* queryDesc)
void light_pgaudit_ExecutorEnd(Query* query)
{
char* object_name = NULL;
+ bool is_full_audit_user = audit_check_full_audit_user();
switch (query->commandType) {
case CMD_INSERT:
case CMD_DELETE:
case CMD_UPDATE:
- if (u_sess->attr.attr_security.Audit_DML != 0) {
+ if (u_sess->attr.attr_security.Audit_DML != 0 || is_full_audit_user) {
object_name = pgaudit_get_relation_name(query->rtable);
pgaudit_dml_table(object_name, query->sql_statement);
}
break;
case CMD_SELECT:
- if (u_sess->attr.attr_security.Audit_DML_SELECT != 0) {
+ if (u_sess->attr.attr_security.Audit_DML_SELECT != 0 || is_full_audit_user) {
object_name = pgaudit_get_relation_name(query->rtable);
pgaudit_dml_table_select(object_name, query->sql_statement);
}
@@ -1873,3 +1936,505 @@ void light_pgaudit_ExecutorEnd(Query* query)
break;
}
}
+
+/*
+ * Brief : check if src_str includes target_str
+ * Description : split src_str by delimiter and search for target_str
+ */
+bool audit_search_str(char* src_str, char* target_str)
+{
+ bool is_found = false;
+ char* ptok = NULL;
+ char* sub_str = NULL;
+ char* src_str_tmp = NULL;
+ char* src_str_trim = NULL;
+ src_str_tmp = pstrdup(src_str);
+ if (src_str_tmp == NULL) {
+ pfree(src_str_tmp);
+ return is_found;
+ }
+ sub_str = (char*)strchr(src_str, ',');
+ if (sub_str == NULL) {
+ src_str_trim = trim(src_str_tmp);
+ if (strcmp(target_str, src_str_trim) == 0) {
+ pfree(src_str_tmp);
+ return true;
+ }
+ } else {
+ sub_str = strtok_s(src_str_tmp, ",", &ptok);
+ while (sub_str != NULL) {
+ src_str_trim = trim(sub_str);
+ if (strcmp(target_str, src_str_trim) == 0) {
+ is_found = true;
+ break;
+ }
+ sub_str = strtok_s(NULL, ",", &ptok);
+ }
+ }
+ pfree(src_str_tmp);
+ return is_found;
+}
+
+
+/*
+ * Brief : check if the client_info is in the blackist, consists of clients
+ * whoes audit records should not be sent to audit pipe
+ * Description : search No_Audit_Client for current clientconn_info, if exists, skip audit_report
+ */
+bool audit_check_client_blacklist(char* client_info)
+{
+ bool is_blacklist = false;
+ /* check if apply audit blacklist */
+ if (u_sess->attr.attr_security.no_audit_client == NULL || strlen(u_sess->attr.attr_security.no_audit_client) == 0) {
+ return is_blacklist;
+ }
+ is_blacklist = audit_search_str(u_sess->attr.attr_security.no_audit_client, client_info);
+ return is_blacklist;
+}
+
+/*
+ * Brief : check if current user is under comprehensive audit
+ * Description : search Full_Audit_Users for current username, if exists, open all the audit options
+ */
+bool audit_check_full_audit_user()
+{
+ bool is_full_audit = false;
+ char* username = NULL;
+ if (u_sess->proc_cxt.MyProcPort == NULL) {
+ return is_full_audit;
+ }
+ if (u_sess->misc_cxt.CurrentUserName != NULL) {
+ username = (char*)u_sess->misc_cxt.CurrentUserName;
+ } else {
+ username = u_sess->proc_cxt.MyProcPort->user_name;
+ }
+ if (strlen(u_sess->attr.attr_security.full_audit_users) == 0 || username == NULL) {
+ return is_full_audit;
+ }
+ is_full_audit = audit_search_str(u_sess->attr.attr_security.full_audit_users, username);
+ return is_full_audit;
+}
+
+
+/*
+ * Brief : check if the system function should be audited
+ * Description : search list g_audit_system_funcs for func_name, if exists, do audit
+ */
+static bool audit_is_system_func(char *func_name)
+{
+ bool is_audit = false;
+ int i;
+ for (i = 0; g_audit_system_funcs[i] != NULL; i++) {
+ if (strcmp((const char*)func_name, g_audit_system_funcs[i]) == 0) {
+ is_audit = true;
+ }
+ }
+ if (!is_audit) {
+ List* search_path = fetch_search_path(false);
+ char* nspname = NULL;
+
+ if (search_path == NIL) {
+ list_free_ext(search_path);
+ return is_audit;
+ }
+ nspname = get_namespace_name(linitial_oid(search_path));
+ func_name = quote_qualified_identifier(nspname, func_name);
+ list_free_ext(search_path);
+ for (i = 0; g_audit_system_funcs[i] != NULL; i++) {
+ if (strcmp((const char*)func_name, g_audit_system_funcs[i]) == 0) {
+ is_audit = true;
+ }
+ }
+ }
+ return is_audit;
+}
+
+/*
+ * Brief : check if the system function includes crypt information
+ * Description : search list g_audit_crypt_funcs for func_name
+ */
+static bool audit_is_crypt_func(char *func_name)
+{
+ bool is_crypt = false;
+ for (int i = 0; g_audit_crypt_funcs[i] != NULL; i++) {
+ if (strcmp((const char*)func_name, g_audit_crypt_funcs[i]) == 0) {
+ is_crypt = true;
+ }
+ }
+ return is_crypt;
+}
+
+/*
+ * Brief : generate audit record of system function in white list, and do audit_report
+ * Description : extract funtion name, oid and parameters of system function
+ */
+void audit_system_function(FunctionCallInfo fcinfo, const AuditResult result)
+{
+ Oid fn_oid = fcinfo->flinfo->fn_oid;
+ char* fn_signature = format_procedure(fn_oid);
+ /* extract function name */
+ char* func_name = NULL;
+ char* next_token = NULL;
+ const char* token = "(";
+ func_name = strtok_s((char*)format_procedure(fn_oid), token, &next_token);
+ if (func_name == NULL) {
+ return;
+ }
+ /* check if current function is system function and included the white list */
+ if (fn_oid >= FirstBootstrapObjectId || !audit_is_system_func(func_name)) {
+ return;
+ }
+ /* if function includes crypt infomation, skip extract arguments */
+ bool is_crypt = audit_is_crypt_func(func_name);
+ char details[PGAUDIT_MAXLENGTH];
+ errno_t rcs = EOK;
+ if (is_crypt) {
+ rcs = snprintf_s(details, PGAUDIT_MAXLENGTH, PGAUDIT_MAXLENGTH - 1,
+ "Execute system function(oid = %u). args = %s",
+ fn_oid, CRYPT_FUNC_ARG);
+ } else {
+ char* fn_args = audit_get_func_args(fcinfo);
+ rcs = snprintf_s(details, PGAUDIT_MAXLENGTH, PGAUDIT_MAXLENGTH - 1,
+ "Execute system function(oid = %u). args = %s",
+ fn_oid, fn_args);
+ pfree(fn_args);
+ }
+ securec_check_ss(rcs, "\0", "\0");
+ audit_report(AUDIT_SYSTEM_FUNCTION_EXEC, result, fn_signature, details);
+}
+
+/*
+ * Brief : generate the detail_info by extract arguments of system function
+ * Description : traverse fcinfo to get arguments value and form string
+ */
+static char* audit_get_func_args(FunctionCallInfo fcinfo)
+{
+ char* arg;
+ StringInfoData buf;
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "%s(", "");
+
+ for (int i = 0; i < fcinfo->nargs; i++) {
+ if (i > 0) {
+ appendStringInfoString(&buf, ",");
+ }
+ arg = audit_get_value_bytype(fcinfo, i);
+ appendStringInfo(&buf, "%s", arg);
+ }
+ appendStringInfoString(&buf, ")");
+ char* result = buf.data;
+ return result;
+}
+
+/*
+ * Brief : extract args from the fcinfo
+ * Description : get value from fcinfo by different arguments type and sequence number
+ */
+char* audit_get_value_bytype(FunctionCallInfo fcinfo, int n_arg)
+{
+ char* value = (char*)palloc(BUF_LENGTH);
+ errno_t nRet = EOK;
+ /* paramater can be NULL if it has default value */
+ if (PG_ARGISNULL(n_arg)) {
+ nRet = strncpy_s(value, BUF_LENGTH, SYSTEM_FUNC_DEFAULT_VALUE, BUF_LENGTH - 1);
+ securec_check(nRet, "\0", "\0");
+ return value;
+ }
+ Oid typeOid = fcinfo->argTypes[n_arg];
+ switch (typeOid) {
+ case TEXTARRAYOID: {
+ int option_nitems;
+ ArrayType* option_array = PG_GETARG_ARRAYTYPE_P(n_arg);
+ pfree(value);
+ value = audit_get_text_array_value(option_array, &option_nitems);
+ break;
+ }
+ case BOOLOID: {
+ bool arg = PG_GETARG_BOOL(n_arg);
+ nRet = strncpy_s(value, BUF_LENGTH, (arg) ? "true" : "false", BUF_LENGTH - 1);
+ securec_check(nRet, "\0", "\0");
+ break;
+ }
+ case BYTEAOID: {
+ bytea* v = PG_GETARG_BYTEA_P(n_arg);
+ pfree(value);
+ value = VARDATA_ANY(v);
+ break;
+ }
+ case CHAROID: {
+ char v = PG_GETARG_CHAR(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%c", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case NAMEOID: {
+ pfree(value);
+ value = NameStr(*PG_GETARG_NAME(n_arg));
+ break;
+ }
+ case INT8OID: {
+ int64 v = PG_GETARG_INT64(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INT2OID: {
+ int2 v = PG_GETARG_INT16(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%d", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INT4OID: {
+ int4 v = PG_GETARG_INT32(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%ld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case REGPROCOID: {
+ RegProcedure v = PG_GETARG_OID(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%u", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case TEXTOID: {
+ pfree(value);
+ value = text_to_cstring(PG_GETARG_TEXT_P(n_arg));
+ break;
+ }
+ case OIDOID: {
+ Oid v = PG_GETARG_OID(n_arg);
+ if (v) {
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%u", v);
+ securec_check_ss(nRet, "\0", "\0");
+ }
+ break;
+ }
+ case TIDOID: {
+ ThreadId v = (unsigned long)PG_GETARG_INT64(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case XIDOID: {
+ TransactionId v = PG_GETARG_TRANSACTIONID(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%llu", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case CIDOID: {
+ CommandId v = PG_GETARG_COMMANDID(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lu", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case SHORTXIDOID: {
+ ShortTransactionId v = PG_GETARG_SHORTTRANSACTIONID(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lu", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case FLOAT4OID: {
+ float4 v = PG_GETARG_FLOAT4(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%.4f", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case FLOAT8OID: {
+ float8 v = PG_GETARG_FLOAT8(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%.8f", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case ABSTIMEOID: {
+ AbsoluteTime v = PG_GETARG_ABSOLUTETIME(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%ld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case RELTIMEOID: {
+ RelativeTime v = PG_GETARG_RELATIVETIME(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%ld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INTERVALOID: {
+ Interval* v = PG_GETARG_INTERVAL_P(n_arg);
+ TmToChar tmtc;
+ struct pg_tm* tm = NULL;
+ ZERO_tmtc(&tmtc);
+ tm = tmtcTm(&tmtc);
+ if (interval2tm(*v, tm, &tmtcFsec(&tmtc)) != 0) {
+ nRet = strncpy_s(value, BUF_LENGTH, SYSTEM_FUNC_DEFAULT_VALUE, BUF_LENGTH - 1);
+ } else {
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1,
+ "%d year %d mon %d day %02d:%02d:%02d", tm->tm_year,
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+ }
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case TINTERVALOID: {
+ TimeInterval v = PG_GETARG_TIMEINTERVAL(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%d", v->data);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case CASHOID: {
+ Cash v = PG_GETARG_CASH(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INETOID:case CIDROID: {
+ inet* v = PG_GETARG_INET_PP(n_arg);
+ pfree(value);
+ value = (char*)VARDATA_ANY(v);
+ break;
+ }
+ case BPCHAROID: {
+ BpChar* v = PG_GETARG_BPCHAR_PP(n_arg);
+ pfree(value);
+ value = VARDATA_ANY(v);
+ break;
+ }
+ case DATEOID: {
+ DateADT v = PG_GETARG_DATEADT(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%ld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case TIMEOID: {
+ TimeADT v = PG_GETARG_TIMEADT(n_arg);
+#ifdef HAVE_INT64_TIMESTAMP
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+#else
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lf", v);
+#endif
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case TIMESTAMPOID: {
+ Timestamp v = PG_GETARG_TIMESTAMP(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case TIMESTAMPTZOID: {
+ TimestampTz v = PG_GETARG_TIMESTAMPTZ(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lld", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case CSTRINGOID:
+ case NVARCHAR2OID:
+ case VARCHAROID:
+ case RAWOID:
+ case SMALLDATETIMEOID: {
+ pfree(value);
+ value = PG_GETARG_CSTRING(n_arg);
+ break;
+ }
+ case TIMETZOID: {
+ TimeTzADT* v = PG_GETARG_TIMETZADT_P(n_arg);
+#ifdef HAVE_INT64_TIMESTAMP
+ int64 time = v->time;
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "time %ld zone %lu", time, v->zone);
+#else
+ float8 time = v->time;
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "time %f zone %lu", time, v->zone);
+#endif
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INT1OID: {
+ int1 v = PG_GETARG_INT8(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%s", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case UUIDOID: {
+ pg_uuid_t* v_uuid = PG_GETARG_UUID_P(n_arg);
+ char* v = (char*)v_uuid->data;
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%s", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case ANYENUMOID: {
+ uint32 v = (uint32)PG_GETARG_OID(n_arg);
+ nRet = snprintf_s(value, BUF_LENGTH, BUF_LENGTH - 1, "%lu", v);
+ securec_check_ss(nRet, "\0", "\0");
+ break;
+ }
+ case INT2VECTOROID: {
+ int2vector *key = (int2vector *)PG_GETARG_POINTER(n_arg);
+ pfree(value);
+ value = (char *)key->values;
+ break;
+ }
+ case OIDVECTOROID: {
+ oidvector *key = (oidvector *)PG_GETARG_POINTER(n_arg);
+ pfree(value);
+ value = (char *)key->values;
+ break;
+ }
+ default: {
+ ereport(LOG, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("UNKOWN PARAMETER TYPE FOR SYSTEM FUNCTION AUDIT.")));
+ nRet = strncpy_s(value, BUF_LENGTH, SYSTEM_FUNC_DEFAULT_VALUE, BUF_LENGTH - 1);
+ securec_check(nRet, "\0", "\0");
+ break;
+ }
+ }
+ return value;
+}
+
+/*
+ * Brief : get values of optional arguments in form of text array
+ * Description : deconstruct a text[] into C-strings
+ */
+static char* audit_get_text_array_value(ArrayType* array, int* numitems)
+{
+ int ndim = ARR_NDIM(array);
+ int* dims = ARR_DIMS(array);
+ int nitems;
+ int16 typlen;
+ bool typbyval = false;
+ char typalign;
+ char* ptr = NULL;
+ bits8* bitmap = NULL;
+ uint32 bitmask;
+ int i;
+ char* val = NULL;
+
+ StringInfoData buf;
+ initStringInfo(&buf);
+ Assert(ARR_ELEMTYPE(array) == TEXTOID);
+ *numitems = nitems = ArrayGetNItems(ndim, dims);
+ get_typlenbyvalalign(ARR_ELEMTYPE(array), &typlen, &typbyval, &typalign);
+
+ ptr = ARR_DATA_PTR(array);
+ bitmap = ARR_NULLBITMAP(array);
+ bitmask = 1;
+ for (i = 0; i < nitems; i++) {
+ if (i > 0)
+ appendStringInfo(&buf, "%s", ",");
+ if (bitmap && (*bitmap & bitmask) == 0) {
+ val = NULL;
+ } else {
+ val = TextDatumGetCString(PointerGetDatum(ptr));
+ ptr = att_addlength_pointer(ptr, typlen, ptr);
+ ptr = (char*)att_align_nominal(ptr, typalign);
+ }
+ appendStringInfo(&buf, "%s", val);
+ /* advance bitmap pointer if any */
+ if (bitmap) {
+ bitmask <<= 1;
+ if (bitmask == 0x100) {
+ bitmap++;
+ bitmask = 1;
+ }
+ }
+ }
+ char* values = buf.data;
+ return values;
+}
diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp
index 288d91a92..025c25f20 100755
--- a/src/gausskernel/process/tcop/postgres.cpp
+++ b/src/gausskernel/process/tcop/postgres.cpp
@@ -274,7 +274,6 @@ static XLogRecPtr xlogCopyStart = InvalidXLogRecPtr;
static int InteractiveBackend(StringInfo inBuf);
static int interactive_getc(void);
static int ReadCommand(StringInfo inBuf);
-static List* pg_rewrite_query(Query* query);
bool check_log_statement(List* stmt_list);
static int errdetail_execute(List* raw_parsetree_list);
int errdetail_params(ParamListInfo params);
@@ -1106,7 +1105,7 @@ List* pg_analyze_and_rewrite_params(
* Note: query must just have come from the parser, because we do not do
* AcquireRewriteLocks() on it.
*/
-static List* pg_rewrite_query(Query* query)
+List* pg_rewrite_query(Query* query)
{
List* querytree_list = NIL;
PGSTAT_INIT_TIME_RECORD();
@@ -3446,34 +3445,36 @@ static void exec_parse_message(const char* query_string, /* string to execute */
MemoryContextSwitchTo(oldcxt);
}
#endif
- CachedPlanSource * plansource = g_instance.plan_cache->Fetch(query_string, strlen(query_string),
+ psrc = g_instance.plan_cache->Fetch(query_string, strlen(query_string),
numParams, paramTypes, NULL);
- if (plansource != NULL) {
+ if (psrc != NULL) {
bool hasGetLock = false;
if (is_named) {
if (ENABLE_CN_GPC)
- StorePreparedStatementCNGPC(stmt_name, plansource, false, true);
+ StorePreparedStatementCNGPC(stmt_name, psrc, false, true);
else {
- u_sess->pcache_cxt.cur_stmt_psrc = plansource;
- if (g_instance.plan_cache->CheckRecreateCachePlan(plansource, &hasGetLock))
- g_instance.plan_cache->RecreateCachePlan(plansource, stmt_name, NULL, NULL, NULL, hasGetLock);
+ u_sess->pcache_cxt.cur_stmt_psrc = psrc;
+ if (g_instance.plan_cache->CheckRecreateCachePlan(psrc, &hasGetLock))
+ g_instance.plan_cache->RecreateCachePlan(psrc, stmt_name, NULL, NULL, NULL, hasGetLock);
}
goto pass_parsing;
} else {
drop_unnamed_stmt();
- u_sess->pcache_cxt.unnamed_stmt_psrc = plansource;
+ u_sess->pcache_cxt.unnamed_stmt_psrc = psrc;
if (ENABLE_DN_GPC)
u_sess->pcache_cxt.private_refcount--;
/* don't share unnamed invalid or lightproxy plansource */
- if (!g_instance.plan_cache->CheckRecreateCachePlan(plansource, &hasGetLock) && plansource->gplan) {
+ if (!g_instance.plan_cache->CheckRecreateCachePlan(psrc, &hasGetLock) && psrc->gplan) {
goto pass_parsing;
} else {
if (hasGetLock) {
- AcquirePlannerLocks(plansource->query_list, false);
- if (plansource->gplan) {
- AcquireExecutorLocks(plansource->gplan->stmt_list, false);
+ AcquirePlannerLocks(psrc->query_list, false);
+ if (psrc->gplan) {
+ AcquireExecutorLocks(psrc->gplan->stmt_list, false);
}
}
+ /* not pass parsing, set psrc to NULL to prevent accidental modification. */
+ psrc = NULL;
}
}
}
@@ -3794,14 +3795,15 @@ pass_parsing:
/*
* Send ParseComplete.
*/
- bool need_redirect = libpqsw_process_parse_message(commandTag, psrc->query_list);
- if (need_redirect) {
- libpqsw_trace("we find pbe new transfer cmdtag:%s, sql:%s", commandTag, psrc == NULL ? "" : psrc->query_string);
- } else {
- libpqsw_trace("we find pbe new select cmdtag:%s, sql:%s", commandTag, psrc == NULL ? "" : psrc->query_string);
- if (t_thrd.postgres_cxt.whereToSendOutput == DestRemote)
- pq_putemptymessage('1');
+ bool need_redirect = libpqsw_process_parse_message(psrc->commandTag, psrc->query_list);
+ libpqsw_trace("we find pbe new %s cmdtag:%s, sql:%s",
+ need_redirect ? "transfer" : "select",
+ psrc->commandTag == NULL ? "" : psrc->commandTag,
+ psrc->query_string);
+ if (!need_redirect && t_thrd.postgres_cxt.whereToSendOutput == DestRemote) {
+ pq_putemptymessage('1');
}
+
/*
* Emit duration logging if appropriate.
*/
@@ -6171,7 +6173,6 @@ void RecoveryConflictInterrupt(ProcSignalReason reason)
case PROCSIG_RECOVERY_CONFLICT_LOCK:
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
-
/*
* If we aren't in a transaction any longer then ignore.
*/
@@ -6375,6 +6376,9 @@ void ProcessInterrupts(void)
errdetail_recovery_conflict()));
} else if (t_thrd.postgres_cxt.RecoveryConflictPending) {
/* Currently there is only one non-retryable recovery conflict */
+ if (LWLockHeldByMe(ProcArrayLock)) {
+ LWLockRelease(ProcArrayLock);
+ }
Assert(t_thrd.postgres_cxt.RecoveryConflictReason == PROCSIG_RECOVERY_CONFLICT_DATABASE);
pgstat_report_recovery_conflict(t_thrd.postgres_cxt.RecoveryConflictReason);
ereport(FATAL,
@@ -8375,6 +8379,10 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam
t_thrd.log_cxt.msgbuf->cursor = 0;
t_thrd.log_cxt.msgbuf->len = 0;
lc_replan_nodegroup = InvalidOid;
+ /* reset xmin before ReadCommand, in case blocking redo */
+ if (RecoveryInProgress()) {
+ t_thrd.pgxact->xmin = InvalidTransactionId;
+ }
/*
* (1) If we've reached idle state, tell the frontend we're ready for
diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp
index 72b1a88f3..60b1c7d77 100755
--- a/src/gausskernel/process/tcop/utility.cpp
+++ b/src/gausskernel/process/tcop/utility.cpp
@@ -40,6 +40,8 @@
#include "catalog/gs_global_config.h"
#include "catalog/gs_matview_dependency.h"
#include "catalog/gs_matview.h"
+#include "catalog/pg_job.h"
+#include "catalog/gs_job_attribute.h"
#include "commands/alter.h"
#include "commands/async.h"
#include "commands/cluster.h"
@@ -427,6 +429,10 @@ static void check_xact_readonly(Node* parse_tree)
case T_CreatedbStmt:
case T_CreateDomainStmt:
case T_CreateFunctionStmt:
+ case T_CreateEventStmt:
+ case T_AlterEventStmt:
+ case T_DropEventStmt:
+ case T_ShowEventStmt:
case T_CreateRoleStmt:
case T_IndexStmt:
case T_CreatePLangStmt:
@@ -4794,6 +4800,19 @@ void standard_ProcessUtility(processutility_context* processutility_cxt,
#endif
} break;
+ case T_CreateEventStmt: /* CREATE EVENT */
+ CreateEventCommand((CreateEventStmt*)parse_tree);
+ break;
+ case T_AlterEventStmt: /* CREATE EVENT */
+ AlterEventCommand((AlterEventStmt*)parse_tree);
+ break;
+ case T_DropEventStmt: /* DROP EVENT */
+ DropEventCommand((DropEventStmt*)parse_tree);
+ break;
+ case T_ShowEventStmt: /* SHOW EVENTS */
+ ShowEventCommand((ShowEventStmt*)parse_tree, dest);
+ break;
+
case T_CreatePackageStmt: /* CREATE PACKAGE SPECIFICATION*/
{
#ifdef ENABLE_MULTIPLE_NODES
@@ -7306,6 +7325,10 @@ static bool is_stmt_allowed_in_locked_mode(Node* parse_tree, const char* query_s
case T_RemoteQuery:
case T_CleanConnStmt:
case T_CreateFunctionStmt: // @Temp Table. create function's lock check is moved in CreateFunction
+ case T_CreateEventStmt:
+ case T_AlterEventStmt:
+ case T_DropEventStmt:
+ case T_ShowEventStmt:
return ALLOW;
default:
@@ -7727,6 +7750,7 @@ bool UtilityReturnsTuples(Node* parse_tree)
return true;
case T_VariableShowStmt:
+ case T_ShowEventStmt:
return true;
default:
@@ -7775,6 +7799,9 @@ TupleDesc UtilityTupleDescriptor(Node* parse_tree)
return GetPGVariableResultDesc(n->name);
}
+
+ case T_ShowEventStmt:
+ return GetEventResultDesc();
default:
return NULL;
@@ -8574,6 +8601,22 @@ const char* CreateCommandTag(Node* parse_tree)
tag = "CREATE PACKAGE BODY";
break;
}
+ case T_CreateEventStmt: {
+ tag = "CREATE EVENT";
+ break;
+ }
+ case T_AlterEventStmt: {
+ tag = "ALTER EVENT";
+ break;
+ }
+ case T_DropEventStmt: {
+ tag = "DROP EVENT";
+ break;
+ }
+ case T_ShowEventStmt: {
+ tag = "SHOW";
+ break;
+ }
case T_IndexStmt:
tag = "CREATE INDEX";
break;
@@ -9358,6 +9401,12 @@ const char* CreateAlterTableCommandTag(const AlterTableType subtype)
case AT_ReAddConstraint:
tag = "RE ADD CONSTRAINT";
break;
+ case AT_ResetPartitionno:
+ tag = "RESET PARTITIONNO";
+ break;
+ case AT_ModifyColumn:
+ tag = "MODIFY COLUMN";
+ break;
default:
tag = "?\?\?";
@@ -9579,9 +9628,16 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree)
break;
case T_AlterFunctionStmt:
+ case T_CreateEventStmt:
+ case T_AlterEventStmt:
+ case T_DropEventStmt:
lev = LOGSTMT_DDL;
break;
+ case T_ShowEventStmt:
+ lev = LOGSTMT_ALL;
+ break;
+
case T_IndexStmt:
lev = LOGSTMT_DDL;
break;
diff --git a/src/gausskernel/process/threadpool/knl_instance.cpp b/src/gausskernel/process/threadpool/knl_instance.cpp
index 9a539f9a8..f38b898a7 100755
--- a/src/gausskernel/process/threadpool/knl_instance.cpp
+++ b/src/gausskernel/process/threadpool/knl_instance.cpp
@@ -414,6 +414,12 @@ static void knl_g_conn_init(knl_g_conn_context* conn_cxt)
SpinLockInit(&conn_cxt->ConnCountLock);
}
+static void knl_g_listen_sock_init(knl_g_listen_context* listen_sock_cxt)
+{
+ listen_sock_cxt->reload_fds = false;
+ listen_sock_cxt->is_reloading_listen_socket = 0;
+}
+
static void knl_g_executor_init(knl_g_executor_context* exec_cxt)
{
exec_cxt->function_id_hashtbl = NULL;
@@ -982,6 +988,7 @@ void knl_instance_init()
#endif
knl_g_datadir_init(&g_instance.datadir_cxt);
+ knl_g_listen_sock_init(&g_instance.listen_cxt);
}
void add_numa_alloc_info(void* numaAddr, size_t length)
diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp
index e2e23bb54..6d4d7f6aa 100755
--- a/src/gausskernel/process/threadpool/knl_session.cpp
+++ b/src/gausskernel/process/threadpool/knl_session.cpp
@@ -521,6 +521,7 @@ static void knl_u_plancache_init(knl_u_plancache_context* pcache_cxt)
pcache_cxt->gpc_in_batch = false;
pcache_cxt->action = NULL;
pcache_cxt->explored_plan_info = NULL;
+ pcache_cxt->is_plan_exploration = false;
pcache_cxt->generic_roots = NULL;
}
@@ -587,6 +588,7 @@ static void knl_u_proc_init(knl_u_proc_context* proc_cxt)
proc_cxt->clientIsGsdump = false;
proc_cxt->clientIsGsCtl = false;
proc_cxt->clientIsGsroach = false;
+ proc_cxt->clientIsCMAgent = false;
proc_cxt->IsBinaryUpgrade = false;
proc_cxt->IsWLMWhiteList = false;
proc_cxt->sessionBackupState = SESSION_BACKUP_NONE;
@@ -595,6 +597,8 @@ static void knl_u_proc_init(knl_u_proc_context* proc_cxt)
proc_cxt->registerAbortBackupHandlerdone = false;
proc_cxt->gsRewindAddCount = false;
proc_cxt->PassConnLimit = false;
+ proc_cxt->clientIsGsql = false;
+ proc_cxt->gsqlRemainCopyNum = 0;
proc_cxt->sessionBackupState = SESSION_BACKUP_NONE;
proc_cxt->registerExclusiveHandlerdone = false;
}
@@ -962,6 +966,8 @@ static void knl_u_storage_init(knl_u_storage_context* storage_cxt)
storage_cxt->num_bufs_in_block = 0;
storage_cxt->total_bufs_allocated = 0;
storage_cxt->LocalBufferContext = NULL;
+ storage_cxt->partition_dml_oids = NIL;
+ storage_cxt->partition_ddl_oids = NIL;
}
static void knl_u_libpq_init(knl_u_libpq_context* libpq_cxt)
diff --git a/src/gausskernel/process/threadpool/knl_thread.cpp b/src/gausskernel/process/threadpool/knl_thread.cpp
index 538aa893a..f28583207 100755
--- a/src/gausskernel/process/threadpool/knl_thread.cpp
+++ b/src/gausskernel/process/threadpool/knl_thread.cpp
@@ -897,9 +897,9 @@ static void knl_t_utils_init(knl_t_utils_context* utils_cxt)
utils_cxt->partId = (PartitionIdentifier*)palloc0(sizeof(PartitionIdentifier));
utils_cxt->gValueCompareContext = NULL;
utils_cxt->ContextUsedCount = 0;
-#define RANGE_PARTKEYMAXNUM 4
+
int rc = memset_s(
- utils_cxt->valueItemArr, RANGE_PARTKEYMAXNUM * sizeof(Const*), 0, RANGE_PARTKEYMAXNUM * sizeof(Const*));
+ utils_cxt->valueItemArr, PARTITION_PARTKEYMAXNUM * sizeof(Const*), 0, PARTITION_PARTKEYMAXNUM * sizeof(Const*));
securec_check(rc, "\0", "\0");
utils_cxt->CurrentResourceOwner = NULL;
utils_cxt->STPSavedResourceOwner = NULL;
@@ -1532,6 +1532,8 @@ static void knl_t_postmaster_init(knl_t_postmaster_context* postmaster_cxt)
securec_check(rc, "\0", "\0");
postmaster_cxt->HaShmData = NULL;
+ postmaster_cxt->can_listen_addresses_reload = false;
+ postmaster_cxt->is_listen_addresses_reload = false;
postmaster_cxt->IsRPCWorkerThread = false;
postmaster_cxt->audit_primary_start = true;
postmaster_cxt->audit_primary_failover = false;
@@ -1701,7 +1703,13 @@ static void knl_t_dms_context_init(knl_t_dms_context *dms_cxt)
dms_cxt->size = 0;
dms_cxt->file_size = 0;
}
-
+static void knl_t_rc_init(knl_t_rc_context* rc_cxt)
+{
+ errno_t rc = EOK;
+ rc = memset_s(rc_cxt, sizeof(knl_t_rc_context), 0, sizeof(knl_t_rc_context));
+ securec_check(rc, "\0", "\0");
+ return;
+}
#ifdef ENABLE_MOT
static void knl_t_mot_init(knl_t_mot_context* mot_cxt)
{
@@ -1899,6 +1907,7 @@ void knl_thread_init(knl_thread_role role)
KnlDcfContextInit(&t_thrd.dcf_cxt);
knl_t_page_compression_init(&t_thrd.page_compression_cxt);
knl_t_libsw_init(&t_thrd.libsw_cxt);
+ knl_t_rc_init(&t_thrd.rc_cxt);
}
__attribute__ ((__used__)) knl_thrd_context *GetCurrentThread()
diff --git a/src/gausskernel/process/threadpool/threadpool_worker.cpp b/src/gausskernel/process/threadpool/threadpool_worker.cpp
index e330fc3bc..0afdacf68 100644
--- a/src/gausskernel/process/threadpool/threadpool_worker.cpp
+++ b/src/gausskernel/process/threadpool/threadpool_worker.cpp
@@ -919,7 +919,9 @@ static bool InitPort(Port* port)
PortInitialize(port, NULL);
- CheckClientIp(port);
+ if (!CheckClientIp(port)) {
+ return false;
+ }
PreClientAuthorize();
diff --git a/src/gausskernel/runtime/executor/execGrouping.cpp b/src/gausskernel/runtime/executor/execGrouping.cpp
index a4204bb01..415799f87 100644
--- a/src/gausskernel/runtime/executor/execGrouping.cpp
+++ b/src/gausskernel/runtime/executor/execGrouping.cpp
@@ -49,7 +49,7 @@ static int TupleHashTableMatch(const void* key1, const void* key2, Size keysize)
* NB: evalContext is reset each time!
*/
bool execTuplesMatch(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols, AttrNumber* matchColIdx,
- FmgrInfo* eqfunctions, MemoryContext evalContext)
+ FmgrInfo* eqfunctions, MemoryContext evalContext, Oid *collations)
{
MemoryContext oldContext;
bool result = false;
@@ -87,9 +87,16 @@ bool execTuplesMatch(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols,
}
/* Apply the type-specific equality function */
- if (!DatumGetBool(FunctionCall2(&eqfunctions[i], attr1, attr2))) {
- result = false; /* they aren't equal */
- break;
+ if (DB_IS_CMPT(B_FORMAT) && collations != NULL) {
+ if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i], collations[i], attr1, attr2))) {
+ result = false; /* they aren't equal */
+ break;
+ }
+ } else {
+ if (!DatumGetBool(FunctionCall2(&eqfunctions[i], attr1, attr2))) {
+ result = false; /* representing not equal */
+ break;
+ }
}
}
@@ -109,7 +116,7 @@ bool execTuplesMatch(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols,
* Parameters are identical to execTuplesMatch.
*/
bool execTuplesUnequal(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols, AttrNumber* matchColIdx,
- FmgrInfo* eqfunctions, MemoryContext evalContext)
+ FmgrInfo* eqfunctions, MemoryContext evalContext, Oid *collations)
{
MemoryContext oldContext;
bool result = false;
@@ -148,9 +155,17 @@ bool execTuplesUnequal(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols
}
/* Apply the type-specific equality function */
- if (!DatumGetBool(FunctionCall2(&eqfunctions[i], attr1, attr2))) {
- result = true; /* they are unequal */
- break;
+ if (DB_IS_CMPT(B_FORMAT) && collations != NULL) {
+ if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i], collations[i], attr1, attr2))) {
+ result = true; /* they aren't equal */
+ break;
+ }
+ } else {
+ if (!DatumGetBool(FunctionCall2(&eqfunctions[i], attr1, attr2))) {
+ /* representing not equal */
+ result = true;
+ break;
+ }
}
}
@@ -249,7 +264,7 @@ void execTuplesHashPrepare(int numCols, Oid* eqOperators, FmgrInfo** eqFunctions
* storage that will live as long as the hashtable does.
*/
TupleHashTable BuildTupleHashTable(int numCols, AttrNumber* keyColIdx, FmgrInfo* eqfunctions, FmgrInfo* hashfunctions,
- long nbuckets, Size entrysize, MemoryContext tablecxt, MemoryContext tempcxt, int workMem)
+ long nbuckets, Size entrysize, MemoryContext tablecxt, MemoryContext tempcxt, int workMem, Oid *collations)
{
TupleHashTable hashtable;
HASHCTL hash_ctl;
@@ -288,6 +303,7 @@ TupleHashTable BuildTupleHashTable(int numCols, AttrNumber* keyColIdx, FmgrInfo*
hash_ctl.hcxt = tablecxt;
hashtable->hashtab =
hash_create("TupleHashTable", nbuckets, &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ hashtable->tab_collations = collations;
return hashtable;
}
@@ -489,7 +505,11 @@ static uint32 TupleHashTableHash(const void* key, Size keysize)
/* treat nulls as having hash key 0 */
if (!isNull) {
uint32 hkey;
- hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], attr));
+ if (DB_IS_CMPT(B_FORMAT) && hashtable->tab_collations != NULL) {
+ hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->tab_collations[i], attr));
+ } else {
+ hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], attr));
+ }
hashkey ^= hkey;
}
}
@@ -534,9 +554,10 @@ static int TupleHashTableMatch(const void* key1, const void* key2, Size keysize)
slot2 = hashtable->inputslot;
/* For crosstype comparisons, the inputslot must be first */
- if (execTuplesMatch(
- slot2, slot1, hashtable->numCols, hashtable->keyColIdx, hashtable->cur_eq_funcs, hashtable->tempcxt))
+ if (execTuplesMatch(slot2, slot1, hashtable->numCols, hashtable->keyColIdx, hashtable->cur_eq_funcs,
+ hashtable->tempcxt, hashtable->tab_collations)) {
return 0;
- else
+ } else {
return 1;
+ }
}
diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp
index a25e031e3..704262047 100755
--- a/src/gausskernel/runtime/executor/execMain.cpp
+++ b/src/gausskernel/runtime/executor/execMain.cpp
@@ -49,6 +49,7 @@
#include "access/transam.h"
#include "access/xact.h"
#include "access/ustore/knl_uheap.h"
+#include "catalog/pg_partition_fn.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_statistic_ext.h"
#include "catalog/namespace.h"
@@ -512,6 +513,7 @@ void ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count)
}
print_duration(queryDesc);
instr_stmt_report_query_plan(queryDesc);
+ instr_stmt_report_cause_type(queryDesc->plannedstmt->cause_type);
/* sql active feature, opeartor history statistics */
if (can_operator_history_statistics) {
@@ -1204,6 +1206,19 @@ void InitPlan(QueryDesc *queryDesc, int eflags)
bool check = false;
gstrace_entry(GS_TRC_ID_InitPlan);
+
+ /* We release the partition object lock in InitPlan, here the snapshow is already obtained, so instantaneous
+ * inconsistency will never happend. See pg_partition_fn.h for more detail. Distribute mode doesn't support
+ * partition DDL/DML parallel work, no need this action. */
+#ifndef ENABLE_MULTIPLE_NODES
+ ListCell *cell;
+ foreach(cell, u_sess->storage_cxt.partition_dml_oids) {
+ UnlockPartitionObject(lfirst_oid(cell), PARTITION_OBJECT_LOCK_SDEQUENCE, PARTITION_SHARE_LOCK);
+ }
+ list_free_ext(u_sess->storage_cxt.partition_dml_oids);
+ u_sess->storage_cxt.partition_dml_oids = NIL;
+#endif
+
/*
* Do permissions checks
*/
diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp
index 4751c9737..e35189a83 100644
--- a/src/gausskernel/runtime/executor/execQual.cpp
+++ b/src/gausskernel/runtime/executor/execQual.cpp
@@ -74,6 +74,7 @@
#include "catalog/pg_proc_fn.h"
#include "access/tuptoaster.h"
#include "parser/parse_expr.h"
+#include "auditfuncs.h"
/* static function decls */
static bool isAssignmentIndirectionExpr(ExprState* exprstate);
@@ -2153,6 +2154,9 @@ restart:
fcinfo->isnull = false;
rsinfo.isDone = ExprSingleResult;
result = FunctionCallInvoke(fcinfo);
+ if (AUDIT_SYSTEM_EXEC_ENABLED) {
+ audit_system_function(fcinfo, AUDIT_OK);
+ }
*isNull = fcinfo->isnull;
*isDone = rsinfo.isDone;
@@ -2577,6 +2581,9 @@ static Datum ExecMakeFunctionResultNoSets(
result = FunctionCallInvoke(fcinfo);
}
*isNull = fcinfo->isnull;
+ if (AUDIT_SYSTEM_EXEC_ENABLED) {
+ audit_system_function(fcinfo, AUDIT_OK);
+ }
if (has_refcursor && econtext->plpgsql_estate != NULL) {
PLpgSQL_execstate* estate = econtext->plpgsql_estate;
@@ -2931,6 +2938,9 @@ Tuplestorestate* ExecMakeTableFunctionResult(
fcinfo.isnull = false;
rsinfo.isDone = ExprSingleResult;
result = FunctionCallInvoke(&fcinfo);
+ if (AUDIT_SYSTEM_EXEC_ENABLED) {
+ audit_system_function(&fcinfo, AUDIT_OK);
+ }
if (econtext->plpgsql_estate != NULL) {
PLpgSQL_execstate* estate = econtext->plpgsql_estate;
@@ -5981,6 +5991,21 @@ ExprState* ExecInitExpr(Expr* node, PlanState* parent)
return state;
}
+/*
+ * ExecInitExprList: call ExecInitExpr on a repression list, return a list of ExprStates.
+ */
+List* ExecInitExprList(List* nodes, PlanState *parent)
+{
+ List* result = NIL;
+ ListCell* lc = NULL;
+
+ foreach (lc, nodes) {
+ Expr* experssion = (Expr*)lfirst(lc);
+ result = lappend(result, ExecInitExpr(experssion, parent));
+ }
+ return result;
+}
+
/*
* ExecPrepareExpr --- initialize for expression execution outside a normal
* Plan tree context.
diff --git a/src/gausskernel/runtime/executor/execReplication.cpp b/src/gausskernel/runtime/executor/execReplication.cpp
index 14a07cbd8..0f53e3221 100644
--- a/src/gausskernel/runtime/executor/execReplication.cpp
+++ b/src/gausskernel/runtime/executor/execReplication.cpp
@@ -842,14 +842,15 @@ void GetFakeRelAndPart(EState *estate, Relation rel, TupleTableSlot *slot, FakeR
Relation partRelation = NULL;
Partition partition = NULL;
Oid partitionOid;
+ int partitionno = INVALID_PARTITION_NO;
Tuple tuple = tableam_tslot_get_tuple_from_slot(rel, slot);
switch (rel->rd_rel->parttype) {
case PARTTYPE_NON_PARTITIONED_RELATION:
case PARTTYPE_VALUE_PARTITIONED_RELATION:
break;
case PARTTYPE_PARTITIONED_RELATION:
- partitionOid = heapTupleGetPartitionId(rel, tuple);
- searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, rel, partitionOid,
+ partitionOid = heapTupleGetPartitionId(rel, tuple, &partitionno);
+ searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, rel, partitionOid, partitionno,
partRelation, partition, RowExclusiveLock);
relAndPart->partRel = partRelation;
relAndPart->part = partition;
@@ -859,12 +860,13 @@ void GetFakeRelAndPart(EState *estate, Relation rel, TupleTableSlot *slot, FakeR
Relation subPartRel = NULL;
Partition subPart = NULL;
Oid subPartOid;
- partitionOid = heapTupleGetPartitionId(rel, tuple);
- searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, rel, partitionOid,
+ int subpartitionno = INVALID_PARTITION_NO;
+ partitionOid = heapTupleGetPartitionId(rel, tuple, &partitionno);
+ searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, rel, partitionOid, partitionno,
partRelation, partition, RowExclusiveLock);
- subPartOid = heapTupleGetPartitionId(partRelation, tuple);
+ subPartOid = heapTupleGetPartitionId(partRelation, tuple, &subpartitionno);
searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, partRelation, subPartOid,
- subPartRel, subPart, RowExclusiveLock);
+ subpartitionno, subPartRel, subPart, RowExclusiveLock);
relAndPart->partRel = subPartRel;
relAndPart->part = subPart;
diff --git a/src/gausskernel/runtime/executor/execScan.cpp b/src/gausskernel/runtime/executor/execScan.cpp
old mode 100644
new mode 100755
index a2303d41b..a1f7bb597
--- a/src/gausskernel/runtime/executor/execScan.cpp
+++ b/src/gausskernel/runtime/executor/execScan.cpp
@@ -180,10 +180,12 @@ TupleTableSlot* ExecScan(ScanState* node, ExecScanAccessMtd access_mtd, /* funct
* tupleDesc.
*/
if (TupIsNull(slot) || unlikely(executorEarlyStop())) {
- if (proj_info != NULL)
+ if (proj_info != NULL) {
return ExecClearTuple(proj_info->pi_slot);
- else
- return slot;
+ } else {
+ /* slot is not used whild early free happen */
+ return NULL;
+ }
}
/*
diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp
index 2810cb465..22eb185c5 100644
--- a/src/gausskernel/runtime/executor/execUtils.cpp
+++ b/src/gausskernel/runtime/executor/execUtils.cpp
@@ -1338,6 +1338,8 @@ void ExecDeleteIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* es
if (!RelationIsUstoreFormat(heapRelation))
return;
+ AcceptInvalidationMessages();
+
/*
* for each index, form and insert the index tuple
*/
@@ -1361,6 +1363,10 @@ void ExecDeleteIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* es
continue;
}
+ if (!IndexIsUsable(indexRelation->rd_index)) {
+ continue;
+ }
+
/* modifiedIdxAttrs != NULL means updating, not every index are affected */
if (inplaceUpdated && modifiedIdxAttrs != NULL) {
/* Collect attribute Bitmapset of this index, and compare with modifiedIdxAttrs */
@@ -1390,6 +1396,7 @@ void ExecDeleteIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* es
estate->es_query_cxt,
indexRelation,
indexpartitionid,
+ INVALID_PARTITION_NO,
actualindex,
indexpartition,
RowExclusiveLock);
@@ -1460,22 +1467,6 @@ void ExecUHeapDeleteIndexTuplesGuts(
}
}
-static inline int128 datum2autoinc(ConstrAutoInc *cons_autoinc, Datum datum)
-{
- if (cons_autoinc->datum2autoinc_func != NULL) {
- return DatumGetInt128(DirectFunctionCall1((PGFunction)(uintptr_t)cons_autoinc->datum2autoinc_func, datum));
- }
- return DatumGetInt128(datum);
-}
-
-static inline Datum autoinc2datum(ConstrAutoInc *cons_autoinc, int128 autoinc)
-{
- if (cons_autoinc->autoinc2datum_func != NULL) {
- return DirectFunctionCall1((PGFunction)(uintptr_t)cons_autoinc->autoinc2datum_func, Int128GetDatum(autoinc));
- }
- return Int128GetDatum(autoinc);
-}
-
static Tuple autoinc_modify_tuple(TupleDesc desc, EState* estate, TupleTableSlot* slot, Tuple tuple, int128 autoinc)
{
uint32 natts = (uint32)desc->natts;
@@ -1734,6 +1725,7 @@ bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, Relation ta
estate->es_query_cxt,
indexRelation,
indexpartitionid,
+ INVALID_PARTITION_NO,
actualIndex,
indexpartition,
RowExclusiveLock);
@@ -1917,6 +1909,7 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e
estate->es_query_cxt,
indexRelation,
indexpartitionid,
+ INVALID_PARTITION_NO,
actualindex,
indexpartition,
RowExclusiveLock);
diff --git a/src/gausskernel/runtime/executor/functions.cpp b/src/gausskernel/runtime/executor/functions.cpp
index 2e38c3ea9..2d052ec9d 100644
--- a/src/gausskernel/runtime/executor/functions.cpp
+++ b/src/gausskernel/runtime/executor/functions.cpp
@@ -1530,6 +1530,28 @@ static void ShutdownSQLFunction(Datum arg)
fcache->shutdown_reg = false;
}
+/*
+ * check_if_exist_client_logic_type()
+ * check if return value of a list exist client encryption type. if exist, report error.
+ */
+void check_if_exist_client_logic_type(List *tlist, Oid ret_type)
+{
+ if (ret_type != RECORDOID) {
+ return;
+ }
+ ListCell* lc = NULL;
+ foreach (lc, tlist) {
+ TargetEntry* tle = (TargetEntry*)lfirst(lc);
+ Oid tle_type = exprType((Node*)tle->expr);
+ if (IsClientLogicType(tle_type)) {
+ ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED),
+ errmsg("Un-support to RETURN RECORD or RETURN SETOF RECORD when return client encryption columns."),
+ errhint("You possibly can use RETURN table(column_name column_type[,...]) instead of RETURN RECORD.")));
+ }
+ }
+ return;
+}
+
/*
* check_sql_fn_retval() -- check return value of a list of sql parse trees.
*
@@ -1737,6 +1759,7 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool*
/* Is the rowtype fixed, or determined only at runtime? */
if (get_func_result_type(func_id, NULL, &tup_desc) != TYPEFUNC_COMPOSITE) {
+ check_if_exist_client_logic_type(tlist, ret_type);
/*
* Assume we are returning the whole tuple. Crosschecking against
* what the caller expects will happen at runtime.
diff --git a/src/gausskernel/runtime/executor/lightProxy.cpp b/src/gausskernel/runtime/executor/lightProxy.cpp
index 237668730..1b279b7b4 100644
--- a/src/gausskernel/runtime/executor/lightProxy.cpp
+++ b/src/gausskernel/runtime/executor/lightProxy.cpp
@@ -36,6 +36,7 @@
#include "utils/snapmgr.h"
#include "pgstat.h"
#include "pgaudit.h"
+#include "auditfuncs.h"
#include "pgxc/route.h"
#include "libpq/pqformat.h"
#include "gs_policy/policy_common.h"
@@ -1019,8 +1020,9 @@ void lightProxy::runSimpleQuery(StringInfo exec_message)
handleResponse();
/* pgaudit */
- if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0) &&
- u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0 ||
+ is_full_audit_user) && u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
light_pgaudit_ExecutorEnd(m_query);
}
/* unified auditing policy */
@@ -1097,8 +1099,9 @@ int lightProxy::runBatchMsg(StringInfo batch_message, bool sendDMsg, int batch_c
CommandCounterIncrement();
/* pgaudit */
- if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0) &&
- u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0 ||
+ is_full_audit_user) && u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
for (int i = 0; i < batch_count; i++)
light_pgaudit_ExecutorEnd((Query*)linitial(m_cplan->query_list));
}
@@ -1213,8 +1216,9 @@ void lightProxy::runMsg(StringInfo exec_message)
}
/* pgaudit */
- if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0) &&
- u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0 ||
+ is_full_audit_user) && u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
light_pgaudit_ExecutorEnd((Query*)linitial(m_cplan->query_list));
}
/* unified auditing policy */
diff --git a/src/gausskernel/runtime/executor/nodeAgg.cpp b/src/gausskernel/runtime/executor/nodeAgg.cpp
index 8a3b5c135..4677a20b5 100644
--- a/src/gausskernel/runtime/executor/nodeAgg.cpp
+++ b/src/gausskernel/runtime/executor/nodeAgg.cpp
@@ -811,6 +811,7 @@ static void process_ordered_aggregate_multi(
TupleTableSlot* slot2 = peraggstate->uniqslot;
int numTransInputs = peraggstate->numTransInputs;
int numDistinctCols = peraggstate->numDistinctCols;
+ Oid* sortCollations = peraggstate->sortCollations;
Datum newAbbrevVal = (Datum)0;
Datum oldAbbrevVal = (Datum)0;
bool haveOldValue = false;
@@ -830,8 +831,8 @@ static void process_ordered_aggregate_multi(
tableam_tslot_getsomeattrs(slot1, numTransInputs);
if (numDistinctCols == 0 || !haveOldValue || newAbbrevVal != oldAbbrevVal ||
- !execTuplesMatch(
- slot1, slot2, numDistinctCols, peraggstate->sortColIdx, peraggstate->equalfns, workcontext)) {
+ !execTuplesMatch(slot1, slot2, numDistinctCols, peraggstate->sortColIdx,
+ peraggstate->equalfns, workcontext, sortCollations)) {
/* Init FunctionCallInfoData for transition function before loading argument values. */
InitFunctionCallInfoData(fcinfo,
&(peraggstate->transfn),
@@ -1176,7 +1177,8 @@ static void build_hash_table(AggState* aggstate)
entrysize,
aggstate->aggcontexts[0],
tmpmem,
- workMem);
+ workMem,
+ node->grp_collations);
}
/*
@@ -1637,7 +1639,8 @@ static TupleTableSlot* agg_retrieve_direct(AggState* aggstate)
nextSetSize,
node->grpColIdx,
aggstate->phase->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))) {
+ tmpcontext->ecxt_per_tuple_memory,
+ node->grp_collations))) {
aggstate->projected_set += 1;
Assert(aggstate->projected_set < numGroupingSets);
@@ -1748,7 +1751,7 @@ static TupleTableSlot* agg_retrieve_direct(AggState* aggstate)
*/
if (node->aggstrategy == AGG_SORTED) {
if (!execTuplesMatch(firstSlot, outerslot, node->numCols, node->grpColIdx,
- aggstate->phase->eqfunctions, tmpcontext->ecxt_per_tuple_memory)) {
+ aggstate->phase->eqfunctions, tmpcontext->ecxt_per_tuple_memory, node->grp_collations)) {
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
break;
}
diff --git a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp
index 1b7e6885d..cf028dbd2 100644
--- a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp
@@ -1031,36 +1031,45 @@ static void ExecInitPartitionForBitmapHeapScan(BitmapHeapScanState* scanstate, E
scanstate->ss.part_id = 0;
}
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
relistarget = ExecRelationIsTargetRelation(estate, plan->scan.scanrelid);
lock = (relistarget ? RowExclusiveLock : AccessShareLock);
scanstate->ss.lockMode = lock;
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
/* add table partition to list */
- tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, plan->scan.pruningInfo->partMap);
- tablepartition = partitionOpen(currentRelation, tablepartitionid, lock);
+ tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, partitionno);
+ tablepartition = PartitionOpenWithPartitionno(currentRelation, tablepartitionid, partitionno, lock);
scanstate->ss.partitions = lappend(scanstate->ss.partitions, tablepartition);
if (resultPlan->ls_selectedSubPartitions != NIL) {
Relation partRelation = partitionGetRelation(currentRelation, tablepartition);
SubPartitionPruningResult* subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpart_seqs = subPartPruningResult->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpart_seqs) == list_length(subpartitionnos));
List *subpartition = NULL;
- ListCell *lc = NULL;
- foreach (lc, subpart_seqs) {
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
+ forboth (lc1, subpart_seqs, lc2, subpartitionnos) {
Oid subpartitionid = InvalidOid;
- int subpartSeq = lfirst_int(lc);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
- subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq);
- Partition subpart = partitionOpen(partRelation, subpartitionid, lock);
+ subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(partRelation, subpartitionid, subpartitionno, lock);
subpartition = lappend(subpartition, subpart);
}
releaseDummyRelation(&(partRelation));
diff --git a/src/gausskernel/runtime/executor/nodeBitmapIndexscan.cpp b/src/gausskernel/runtime/executor/nodeBitmapIndexscan.cpp
index e27e9ce91..4136698fc 100644
--- a/src/gausskernel/runtime/executor/nodeBitmapIndexscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeBitmapIndexscan.cpp
@@ -556,36 +556,50 @@ void ExecInitPartitionForBitmapIndexScan(BitmapIndexScanState* indexstate, EStat
indexstate->ss.part_id = 0;
}
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+ StringInfo partNameInfo = makeStringInfo();
+ StringInfo partOidInfo = makeStringInfo();
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
Oid indexpartitionid = InvalidOid;
Partition tablePartition = NULL;
List* partitionIndexOidList = NIL;
/* get index partition list for the special index */
- tablepartitionid = getPartitionOidFromSequence(rel, partSeq, plan->scan.pruningInfo->partMap);
- tablePartition = partitionOpen(rel, tablepartitionid, lock);
+ tablepartitionid = getPartitionOidFromSequence(rel, partSeq, partitionno);
+ tablePartition = PartitionOpenWithPartitionno(rel, tablepartitionid, partitionno, lock);
+
+ appendStringInfo(partNameInfo, "%s ", tablePartition->pd_part->relname.data);
+ appendStringInfo(partOidInfo, "%u ", tablepartitionid);
if (RelationIsSubPartitioned(rel)) {
- ListCell *lc = NULL;
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
SubPartitionPruningResult *subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpartList = subPartPruningResult->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpartList) == list_length(subpartitionnos));
List *subIndexList = NULL;
- foreach (lc, subpartList)
+ forboth (lc1, subpartList, lc2, subpartitionnos)
{
- int subpartSeq = lfirst_int(lc);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
Relation tablepartrel = partitionGetRelation(rel, tablePartition);
- Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq);
- Partition subpart = partitionOpen(tablepartrel, subpartitionid, AccessShareLock);
+ Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, AccessShareLock);
partitionIndexOidList = PartitionGetPartIndexList(subpart);
diff --git a/src/gausskernel/runtime/executor/nodeGroup.cpp b/src/gausskernel/runtime/executor/nodeGroup.cpp
index fcb0fbe44..eb7d6c974 100644
--- a/src/gausskernel/runtime/executor/nodeGroup.cpp
+++ b/src/gausskernel/runtime/executor/nodeGroup.cpp
@@ -41,6 +41,7 @@ static TupleTableSlot* ExecGroup(PlanState* state)
AttrNumber* grpColIdx = NULL;
TupleTableSlot* firsttupleslot = NULL;
TupleTableSlot* outerslot = NULL;
+ Oid* grpCollations = NULL;
CHECK_FOR_INTERRUPTS();
@@ -52,6 +53,7 @@ static TupleTableSlot* ExecGroup(PlanState* state)
econtext = node->ss.ps.ps_ExprContext;
numCols = ((Group*)node->ss.ps.plan)->numCols;
grpColIdx = ((Group*)node->ss.ps.plan)->grpColIdx;
+ grpCollations = ((Group*)node->ss.ps.plan)->grp_collations;
/*
* Check to see if we're still projecting out tuples from a previous group
@@ -140,9 +142,10 @@ static TupleTableSlot* ExecGroup(PlanState* state)
* Compare with first tuple and see if this tuple is of the same
* group. If so, ignore it and keep scanning.
*/
- if (!execTuplesMatch(
- firsttupleslot, outerslot, numCols, grpColIdx, node->eqfunctions, econtext->ecxt_per_tuple_memory))
+ if (!execTuplesMatch(firsttupleslot, outerslot, numCols, grpColIdx, node->eqfunctions,
+ econtext->ecxt_per_tuple_memory, grpCollations)) {
break;
+ }
}
/*
diff --git a/src/gausskernel/runtime/executor/nodeHash.cpp b/src/gausskernel/runtime/executor/nodeHash.cpp
index 447e4adf4..2c9fd5523 100644
--- a/src/gausskernel/runtime/executor/nodeHash.cpp
+++ b/src/gausskernel/runtime/executor/nodeHash.cpp
@@ -284,7 +284,7 @@ void ExecEndHash(HashState* node)
* create an empty hashtable data structure for hashjoin.
* ----------------------------------------------------------------
*/
-HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNulls)
+HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNulls, List *hash_collations)
{
HashJoinTable hashtable;
Plan* outerNode = NULL;
@@ -297,6 +297,7 @@ HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNull
int64 local_work_mem = SET_NODEMEM(node->plan.operatorMemKB[0], node->plan.dop);
int64 max_mem = (node->plan.operatorMaxMem > 0) ? SET_NODEMEM(node->plan.operatorMaxMem, node->plan.dop) : 0;
ListCell* ho = NULL;
+ ListCell* hc = NULL;
MemoryContext oldcxt;
/*
@@ -384,8 +385,9 @@ HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNull
hashtable->outer_hashfunctions = (FmgrInfo*)palloc(nkeys * sizeof(FmgrInfo));
hashtable->inner_hashfunctions = (FmgrInfo*)palloc(nkeys * sizeof(FmgrInfo));
hashtable->hashStrict = (bool*)palloc(nkeys * sizeof(bool));
+ hashtable->collations = (Oid *)palloc(nkeys * sizeof(Oid));
i = 0;
- foreach (ho, hashOperators) {
+ forboth (ho, hashOperators, hc, hash_collations) {
Oid hashop = lfirst_oid(ho);
Oid left_hashfn;
Oid right_hashfn;
@@ -398,6 +400,7 @@ HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNull
fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
hashtable->hashStrict[i] = op_strict(hashop);
+ hashtable->collations[i] = lfirst_oid(hc);
i++;
}
@@ -1399,7 +1402,7 @@ bool ExecHashGetHashValue(HashJoinTable hashtable, ExprContext* econtext, List*
/* Compute the hash function */
uint32 hkey;
- hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], keyval));
+ hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
hashkey ^= hkey;
}
@@ -1787,7 +1790,7 @@ static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash* node, int mcvsT
uint32 hashvalue;
int bucket;
- hashvalue = DatumGetUInt32(FunctionCall1(&hashfunctions[0], values[i]));
+ hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0], hashtable->collations[0], values[i]));
/*
* While we have not hit a hole in the hashtable and have not hit
diff --git a/src/gausskernel/runtime/executor/nodeHashjoin.cpp b/src/gausskernel/runtime/executor/nodeHashjoin.cpp
index 01b83edc4..02dc75bd2 100755
--- a/src/gausskernel/runtime/executor/nodeHashjoin.cpp
+++ b/src/gausskernel/runtime/executor/nodeHashjoin.cpp
@@ -186,7 +186,7 @@ static TupleTableSlot* ExecHashJoin(PlanState* state)
}
hashtable = ExecHashTableCreate((Hash*)hashNode->ps.plan, node->hj_HashOperators,
- HJ_FILL_INNER(node) || node->js.nulleqqual != NIL);
+ HJ_FILL_INNER(node) || node->js.nulleqqual != NIL, node->hj_hash_collations);
if (oldcxt) {
/*enable_memory_limit*/
@@ -670,6 +670,7 @@ HashJoinState* ExecInitHashJoin(HashJoin* node, EState* estate, int eflags)
hjstate->hj_OuterHashKeys = lclauses;
hjstate->hj_InnerHashKeys = rclauses;
hjstate->hj_HashOperators = hoperators;
+ hjstate->hj_hash_collations = node->hash_collations;
/* child Hash node needs to evaluate inner hash keys, too */
((HashState*)innerPlanState(hjstate))->hashkeys = rclauses;
diff --git a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp
index ee69070a0..6ba2ff4c0 100644
--- a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp
@@ -959,33 +959,49 @@ void ExecInitPartitionForIndexOnlyScan(IndexOnlyScanState* indexstate, EState* e
indexstate->ss.part_id = 0;
}
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
- foreach (cell, part_seqs) {
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+ StringInfo partNameInfo = makeStringInfo();
+ StringInfo partOidInfo = makeStringInfo();
+
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid indexpartitionid = InvalidOid;
List* partitionIndexOidList = NIL;
Oid tablepartitionid = InvalidOid;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
/* get table partition and add it to a list for following scan */
- tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, plan->scan.pruningInfo->partMap);
- tablepartition = partitionOpen(currentRelation, tablepartitionid, lock);
+ tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, partitionno);
+ tablepartition = PartitionOpenWithPartitionno(currentRelation, tablepartitionid, partitionno, lock);
indexstate->ss.partitions = lappend(indexstate->ss.partitions, tablepartition);
+
+ appendStringInfo(partNameInfo, "%s ", tablepartition->pd_part->relname.data);
+ appendStringInfo(partOidInfo, "%u ", tablepartitionid);
+
if (RelationIsSubPartitioned(currentRelation)) {
- ListCell *lc = NULL;
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
SubPartitionPruningResult *subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpartList = subPartPruningResult->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpartList) == list_length(subpartitionnos));
List *subIndexList = NIL;
List *subPartList = NIL;
Relation tablepartrel = partitionGetRelation(currentRelation, tablepartition);
- foreach (lc, subpartList) {
- int subpartSeq = lfirst_int(lc);
- Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq);
- Partition subpart = partitionOpen(tablepartrel, subpartitionid, AccessShareLock);
+ forboth (lc1, subpartList, lc2, subpartitionnos) {
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
+ Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, AccessShareLock);
subPartList = lappend(subPartList, subpart);
partitionIndexOidList = PartitionGetPartIndexList(subpart);
diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp
index 6d752de3f..c49020bac 100644
--- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp
@@ -1456,36 +1456,51 @@ void ExecInitPartitionForIndexScan(IndexScanState* index_state, EState* estate)
index_state->ss.part_id = 0;
}
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
- foreach (cell, part_seqs) {
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+ StringInfo partNameInfo = makeStringInfo();
+ StringInfo partOidInfo = makeStringInfo();
+
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
Oid indexpartitionid = InvalidOid;
List* partitionIndexOidList = NIL;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
/* get table partition and add it to a list for following scan */
- tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, plan->scan.pruningInfo->partMap);
- table_partition = partitionOpen(current_relation, tablepartitionid, lock);
+ tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, partitionno);
+ table_partition = PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lock);
index_state->ss.partitions = lappend(index_state->ss.partitions, table_partition);
+ appendStringInfo(partNameInfo, "%s ", table_partition->pd_part->relname.data);
+ appendStringInfo(partOidInfo, "%u ", tablepartitionid);
+
if (RelationIsSubPartitioned(current_relation)) {
- ListCell *lc = NULL;
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
SubPartitionPruningResult* subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpartList = subPartPruningResult->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpartList) == list_length(subpartitionnos));
List *subIndexList = NULL;
List *subRelationList = NULL;
- foreach (lc, subpartList)
+ forboth (lc1, subpartList, lc2, subpartitionnos)
{
- int subpartSeq = lfirst_int(lc);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
Relation tablepartrel = partitionGetRelation(current_relation, table_partition);
- Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq);
- Partition subpart = partitionOpen(tablepartrel, subpartitionid, AccessShareLock);
+ Oid subpartitionid = getPartitionOidFromSequence(tablepartrel, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(tablepartrel, subpartitionid, subpartitionno, AccessShareLock);
partitionIndexOidList = PartitionGetPartIndexList(subpart);
diff --git a/src/gausskernel/runtime/executor/nodeLimit.cpp b/src/gausskernel/runtime/executor/nodeLimit.cpp
index f0f34417e..131d95814 100644
--- a/src/gausskernel/runtime/executor/nodeLimit.cpp
+++ b/src/gausskernel/runtime/executor/nodeLimit.cpp
@@ -24,6 +24,9 @@
#include "executor/executor.h"
#include "executor/node/nodeLimit.h"
#include "nodes/nodeFuncs.h"
+#include "instruments/instr_statement.h"
+
+#define REPORT_LIMIT_THRESHOLD 5000 /* report cause_type's threshold for limit */
static TupleTableSlot* ExecLimit(PlanState* state);
static void pass_down_bound(LimitState* node, PlanState* child_node);
@@ -270,6 +273,12 @@ void recompute_limits(LimitState* node)
node->noCount = true;
}
+ /*
+ * Check whether there are risks caused by limit to much rows.
+ */
+ if (!node->noCount && node->count >= REPORT_LIMIT_THRESHOLD)
+ instr_stmt_report_cause_type(NUM_F_LIMIT);
+
/* Reset position to start-of-scan */
node->position = 0;
node->subSlot = NULL;
diff --git a/src/gausskernel/runtime/executor/nodeLockRows.cpp b/src/gausskernel/runtime/executor/nodeLockRows.cpp
index cb6458b89..328b0c625 100755
--- a/src/gausskernel/runtime/executor/nodeLockRows.cpp
+++ b/src/gausskernel/runtime/executor/nodeLockRows.cpp
@@ -23,6 +23,7 @@
#include "access/xact.h"
#include "access/ustore/knl_uheap.h"
+#include "catalog/pg_partition_fn.h"
#include "executor/executor.h"
#include "executor/node/nodeLockRows.h"
#ifdef PGXC
@@ -161,7 +162,7 @@ lnext:
/* if it is a partition */
if (tblid != erm->relation->rd_id) {
searchFakeReationForPartitionOid(estate->esfRelations,
- estate->es_query_cxt, erm->relation, tblid, target_rel,
+ estate->es_query_cxt, erm->relation, tblid, INVALID_PARTITION_NO, target_rel,
target_part, RowShareLock);
Assert(tblid == target_rel->rd_id);
}
@@ -362,6 +363,7 @@ lnext:
estate->es_query_cxt,
erm->relation,
tblid,
+ INVALID_PARTITION_NO,
target_rel,
target_part,
RowShareLock);
diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp
index 581e8e2da..95a145abf 100644
--- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp
+++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp
@@ -886,11 +886,13 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS
RangeTblEntry *rte = exec_rt_fetch(resultRelInfo->ri_RangeTableIndex, estate);
if (RelationIsPartitioned(resultRelationDesc)) {
- partitionid = heapTupleGetPartitionId(resultRelationDesc, tuple);
+ int partitionno = INVALID_PARTITION_NO;
+ partitionid = heapTupleGetPartitionId(resultRelationDesc, tuple, &partitionno);
bool res = trySearchFakeReationForPartitionOid(&estate->esfRelations,
estate->es_query_cxt,
resultRelationDesc,
partitionid,
+ partitionno,
&heaprel,
&partition,
RowExclusiveLock);
@@ -900,11 +902,13 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS
CheckPartitionOidForSpecifiedPartition(rte, partitionid);
if (RelationIsSubPartitioned(resultRelationDesc)) {
- subPartitionId = heapTupleGetPartitionId(heaprel, tuple);
+ int subpartitionno = INVALID_PARTITION_NO;
+ subPartitionId = heapTupleGetPartitionId(heaprel, tuple, &subpartitionno);
searchFakeReationForPartitionOid(estate->esfRelations,
estate->es_query_cxt,
heaprel,
subPartitionId,
+ subpartitionno,
subPartRel,
subPart,
RowExclusiveLock);
@@ -1228,7 +1232,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
if (RelationIsSubPartitioned(result_relation_desc)) {
targetOid = heapTupleGetSubPartitionId(result_relation_desc, tuple);
} else {
- targetOid = heapTupleGetPartitionId(result_relation_desc, tuple);
+ targetOid = heapTupleGetPartitionId(result_relation_desc, tuple, NULL);
}
} else {
targetOid = RelationGetRelid(result_relation_desc);
@@ -1358,10 +1362,13 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
case PARTTYPE_PARTITIONED_RELATION: {
/* get partititon oid for insert the record */
Datum newval = ComputePartKeyExprTuple(result_relation_desc, estate, slot, NULL);
+ int partitionno = INVALID_PARTITION_NO;
if (newval)
- partition_id = heapTupleGetPartitionId(result_relation_desc, (void*)newval, false, estate->es_plannedstmt->hasIgnore);
+ partition_id = heapTupleGetPartitionId(result_relation_desc, (void *)newval, &partitionno,
+ false, estate->es_plannedstmt->hasIgnore);
else
- partition_id = heapTupleGetPartitionId(result_relation_desc, tuple, false, estate->es_plannedstmt->hasIgnore);
+ partition_id = heapTupleGetPartitionId(result_relation_desc, tuple, &partitionno, false,
+ estate->es_plannedstmt->hasIgnore);
/* if cannot find valid partition oid and sql has keyword ignore, return and don't insert */
if (estate->es_plannedstmt->hasIgnore && partition_id == InvalidOid) {
return NULL;
@@ -1373,7 +1380,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
}
bool res = trySearchFakeReationForPartitionOid(&estate->esfRelations, estate->es_query_cxt,
- result_relation_desc, partition_id, &heap_rel, &partition, RowExclusiveLock);
+ result_relation_desc, partition_id, partitionno, &heap_rel, &partition, RowExclusiveLock);
if (!res) {
return NULL;
}
@@ -1412,6 +1419,8 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
case PARTTYPE_SUBPARTITIONED_RELATION: {
Oid partitionId = InvalidOid;
Oid subPartitionId = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
+ int subpartitionno = INVALID_PARTITION_NO;
Relation partRel = NULL;
Partition part = NULL;
Relation subPartRel = NULL;
@@ -1420,9 +1429,11 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
/* get partititon oid for insert the record */
Datum newval = ComputePartKeyExprTuple(result_relation_desc, estate, slot, NULL);
if (newval)
- partitionId = heapTupleGetPartitionId(result_relation_desc, (void*)newval, false, estate->es_plannedstmt->hasIgnore);
+ partitionId = heapTupleGetPartitionId(result_relation_desc, (void *)newval, &partitionno,
+ false, estate->es_plannedstmt->hasIgnore);
else
- partitionId = heapTupleGetPartitionId(result_relation_desc, tuple, false, estate->es_plannedstmt->hasIgnore);
+ partitionId = heapTupleGetPartitionId(result_relation_desc, tuple, &partitionno, false,
+ estate->es_plannedstmt->hasIgnore);
if (estate->es_plannedstmt->hasIgnore && partitionId == InvalidOid) {
return NULL;
}
@@ -1433,7 +1444,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
}
bool res = trySearchFakeReationForPartitionOid(&estate->esfRelations, estate->es_query_cxt,
- result_relation_desc, partitionId, &partRel, &part, RowExclusiveLock);
+ result_relation_desc, partitionId, partitionno, &partRel, &part, RowExclusiveLock);
if (!res) {
return NULL;
}
@@ -1441,9 +1452,11 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
/* get subpartititon oid for insert the record */
Datum newsubval = ComputePartKeyExprTuple(result_relation_desc, estate, slot, partRel);
if (newsubval)
- subPartitionId = heapTupleGetPartitionId(partRel, (void*)newsubval, false, estate->es_plannedstmt->hasIgnore);
+ subPartitionId = heapTupleGetPartitionId(partRel, (void *)newsubval, &subpartitionno, false,
+ estate->es_plannedstmt->hasIgnore);
else
- subPartitionId = heapTupleGetPartitionId(partRel, tuple, false, estate->es_plannedstmt->hasIgnore);
+ subPartitionId = heapTupleGetPartitionId(partRel, tuple, &subpartitionno, false,
+ estate->es_plannedstmt->hasIgnore);
if (estate->es_plannedstmt->hasIgnore && subPartitionId == InvalidOid) {
return NULL;
}
@@ -1454,7 +1467,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple
}
res = trySearchFakeReationForPartitionOid(&estate->esfRelations, estate->es_query_cxt,
- partRel, subPartitionId, &subPartRel, &subPart, RowExclusiveLock);
+ partRel, subPartitionId, subpartitionno, &subPartRel, &subPart, RowExclusiveLock);
if (!res) {
partitionClose(result_relation_desc, part, RowExclusiveLock);
return NULL;
@@ -1725,6 +1738,7 @@ ldelete:
estate->es_query_cxt,
result_relation_desc,
deletePartitionOid,
+ INVALID_PARTITION_NO,
part_relation,
partition,
RowExclusiveLock);
@@ -2019,6 +2033,7 @@ TupleTableSlot* ExecUpdate(ItemPointer tupleid,
Relation fake_part_rel = NULL;
Relation parent_relation = NULL;
Oid new_partId = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
uint64 res_hash;
uint64 hash_del = 0;
bool is_record = false;
@@ -2445,8 +2460,10 @@ lreplace:
if (u_sess->exec_cxt.route->fileExist) {
new_partId = u_sess->exec_cxt.route->partitionId;
+ partitionno = GetCurrentPartitionNo(new_partId);
if (RelationIsSubPartitioned(result_relation_desc)) {
- Partition part = partitionOpen(result_relation_desc, new_partId, RowExclusiveLock);
+ Partition part = PartitionOpenWithPartitionno(result_relation_desc, new_partId,
+ partitionno, RowExclusiveLock);
Relation partRel = partitionGetRelation(result_relation_desc, part);
Datum newsubval = ComputePartKeyExprTuple(result_relation_desc, estate, slot, partRel);
if (newsubval) {
@@ -2457,6 +2474,7 @@ lreplace:
if (u_sess->exec_cxt.route->fileExist) {
new_partId = u_sess->exec_cxt.route->partitionId;
+ partitionno = GetCurrentSubPartitionNo(new_partId);
} else {
int level = can_ignore ? WARNING : ERROR;
ereport(level, (errmodule(MOD_EXECUTOR),
@@ -2527,6 +2545,7 @@ lreplace:
estate->es_query_cxt,
result_relation_desc,
new_partId,
+ partitionno,
fake_part_rel,
partition,
RowExclusiveLock);
@@ -2735,6 +2754,7 @@ lreplace:
estate->es_query_cxt,
result_relation_desc,
oldPartitionOid,
+ partitionno,
old_fake_relation,
old_partition,
RowExclusiveLock);
@@ -2748,13 +2768,14 @@ lreplace:
Relation fake_insert_relation = NULL;
if (need_create_file) {
- new_partId = AddNewIntervalPartition(result_relation_desc, tuple);
+ new_partId = AddNewIntervalPartition(result_relation_desc, tuple, &partitionno);
}
searchFakeReationForPartitionOid(estate->esfRelations,
estate->es_query_cxt,
result_relation_desc,
new_partId,
+ partitionno,
fake_part_rel,
insert_partition,
RowExclusiveLock);
@@ -2965,15 +2986,15 @@ ldelete:
{
Partition insert_partition = NULL;
Relation fake_insert_relation = NULL;
-
if (need_create_file) {
- new_partId = AddNewIntervalPartition(result_relation_desc, tuple);
+ new_partId = AddNewIntervalPartition(result_relation_desc, tuple, &partitionno);
}
bool res = trySearchFakeReationForPartitionOid(&estate->esfRelations,
estate->es_query_cxt,
result_relation_desc,
new_partId,
+ partitionno,
&fake_part_rel,
&insert_partition,
RowExclusiveLock);
@@ -3262,7 +3283,9 @@ static TupleTableSlot* ExecReplace(EState* estate, ModifyTableState* node, Tuple
int2 conflictBucketid = InvalidBktId;
bool isgpi = false;
Oid partitionid = InvalidOid;
+ int partitionno = INVALID_PARTITION_NO;
Oid subPartitionId = InvalidOid;
+ int subpartitionno = INVALID_PARTITION_NO;
Relation targetrel = NULL;
Relation heaprel = NULL;
Relation subPartRel = NULL;
@@ -3277,21 +3300,23 @@ static TupleTableSlot* ExecReplace(EState* estate, ModifyTableState* node, Tuple
heaprel = targetrel;
tuple = tableam_tslot_get_tuple_from_slot(targetrel, slot);
if (RelationIsPartitioned(targetrel)) {
- partitionid = heapTupleGetPartitionId(targetrel, tuple);
+ partitionid = heapTupleGetPartitionId(targetrel, tuple, &partitionno);
searchFakeReationForPartitionOid(estate->esfRelations,
estate->es_query_cxt,
targetrel,
partitionid,
+ partitionno,
heaprel,
partition,
RowExclusiveLock);
if (RelationIsSubPartitioned(targetrel)) {
- subPartitionId = heapTupleGetPartitionId(heaprel, tuple);
+ subPartitionId = heapTupleGetPartitionId(heaprel, tuple, &subpartitionno);
searchFakeReationForPartitionOid(estate->esfRelations,
estate->es_query_cxt,
heaprel,
subPartitionId,
+ subpartitionno,
subPartRel,
subPart,
RowExclusiveLock);
diff --git a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp
index c52e05b80..b096051a9 100644
--- a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp
+++ b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp
@@ -102,7 +102,8 @@ static void build_hash_table(RecursiveUnionState* rustate)
sizeof(RUHashEntryData),
rustate->tableContext,
rustate->tempContext,
- u_sess->attr.attr_memory.work_mem);
+ u_sess->attr.attr_memory.work_mem,
+ NULL);
}
/*
diff --git a/src/gausskernel/runtime/executor/nodeSeqscan.cpp b/src/gausskernel/runtime/executor/nodeSeqscan.cpp
index 40b4e7bfc..e594fb20b 100644
--- a/src/gausskernel/runtime/executor/nodeSeqscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeSeqscan.cpp
@@ -605,32 +605,41 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags)
Partition part = NULL;
PruningResult* resultPlan = GetPartitionPruningResultInInitScanRelation(plan, estate, current_relation);
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
List* subpartition = NIL;
- tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, plan->pruningInfo->partMap);
- part = partitionOpen(current_relation, tablepartitionid, lockmode);
+ tablepartitionid = getPartitionOidFromSequence(current_relation, partSeq, partitionno);
+ part = PartitionOpenWithPartitionno(current_relation, tablepartitionid, partitionno, lockmode);
node->partitions = lappend(node->partitions, part);
if (resultPlan->ls_selectedSubPartitions != NIL) {
Relation partRelation = partitionGetRelation(current_relation, part);
SubPartitionPruningResult *subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq);
+ GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, partSeq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpart_seqs = subPartPruningResult->ls_selectedSubPartitions;
- ListCell *lc = NULL;
- foreach (lc, subpart_seqs) {
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpart_seqs) == list_length(subpartitionnos));
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
+ forboth (lc1, subpart_seqs, lc2, subpartitionnos) {
Oid subpartitionid = InvalidOid;
- int subpartSeq = lfirst_int(lc);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
- subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq);
- Partition subpart = partitionOpen(partRelation, subpartitionid, lockmode);
+ subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(partRelation, subpartitionid, subpartitionno, lockmode);
subpartition = lappend(subpartition, subpart);
}
releaseDummyRelation(&(partRelation));
diff --git a/src/gausskernel/runtime/executor/nodeSetOp.cpp b/src/gausskernel/runtime/executor/nodeSetOp.cpp
index 04182367c..8f57ef497 100644
--- a/src/gausskernel/runtime/executor/nodeSetOp.cpp
+++ b/src/gausskernel/runtime/executor/nodeSetOp.cpp
@@ -144,7 +144,8 @@ static void build_hash_table(SetOpState* setopstate)
sizeof(SetOpHashEntryData),
setopstate->tableContext,
setopstate->tempContext,
- work_mem);
+ work_mem,
+ node->dup_collations);
}
/*
@@ -281,7 +282,7 @@ static TupleTableSlot* setop_retrieve_direct(SetOpState* setopstate)
* Check whether we've crossed a group boundary.
*/
if (!execTuplesMatch(result_tuple_slot, outer_slot, node->numCols, node->dupColIdx, setopstate->eqfunctions,
- setopstate->tempContext)) {
+ setopstate->tempContext, node->dup_collations)) {
/*
* Save the first input tuple of the next group.
*/
diff --git a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp
index cb9bb0826..9988cfe7a 100644
--- a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp
+++ b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp
@@ -230,6 +230,7 @@ static bool unsupported_filter_walker(Node *node, Node *context_node)
* - ExecStartWithOp()
* - ExecEndStartWithOp()
* - ExecReScanStartWithOp()
+ * - ResetRecursiveInner()
*/
StartWithOpState* ExecInitStartWithOp(StartWithOp* node, EState* estate, int eflags)
{
@@ -418,6 +419,30 @@ bool CheckCycleExeception(StartWithOpState *node, TupleTableSlot *slot)
return incycle;
}
+/*
+ * This function is called during executing recursive part(inner plan) of recursive union,
+ * so it would be enough to rescan(reset) only inner_plan of RecursiveUnionState to refresh
+ * last working state including working table.
+ */
+void ResetRecursiveInner(RecursiveUnionState *node)
+{
+ PlanState *outerPlanState = outerPlanState(node);
+ PlanState *innerPlanState = innerPlanState(node);
+ RecursiveUnion *ruPlan = (RecursiveUnion*)node->ps.plan;
+
+ /*
+ * Set recursive term's chgParam to tell it that we'll modify the working
+ * table and therefore it has to rescan.
+ */
+ innerPlanState->chgParam = bms_add_member(innerPlanState->chgParam, ruPlan->wtParam);
+ if (outerPlanState->chgParam == NULL) {
+ ExecReScan(innerPlanState);
+ }
+
+ node->intermediate_empty = true;
+ tuplestore_clear(node->working_table);
+ tuplestore_clear(node->intermediate_table);
+}
/*
* Peeking a tuple's connectable descendants for exactly one level.
@@ -435,8 +460,8 @@ static List* peekNextLevel(TupleTableSlot* startSlot, PlanState* outerNode, int
List* queue = NULL;
RecursiveUnionState* rus = (RecursiveUnionState*) outerNode;
StartWithOpState *swnode = rus->swstate;
- /* clean up RU's old working table */
- ExecReScan(outerNode);
+ /* ReSet RU's inner plan, inlcuding re-scan inner and free its working table */
+ ResetRecursiveInner(rus);
/* pushing the depth-first tuple into RU's working table */
rus->recursing = true;
tuplestore_puttupleslot(rus->working_table, startSlot);
diff --git a/src/gausskernel/runtime/executor/nodeSubplan.cpp b/src/gausskernel/runtime/executor/nodeSubplan.cpp
index ff8145a76..5fe955f52 100644
--- a/src/gausskernel/runtime/executor/nodeSubplan.cpp
+++ b/src/gausskernel/runtime/executor/nodeSubplan.cpp
@@ -462,7 +462,8 @@ void buildSubPlanHash(SubPlanState* node, ExprContext* econtext)
sizeof(TupleHashEntryData),
node->hashtablecxt,
node->hashtempcxt,
- u_sess->attr.attr_memory.work_mem);
+ u_sess->attr.attr_memory.work_mem,
+ node->tab_collations);
if (!subplan->unknownEqFalse) {
if (ncols == 1) {
@@ -481,7 +482,8 @@ void buildSubPlanHash(SubPlanState* node, ExprContext* econtext)
sizeof(TupleHashEntryData),
node->hashtablecxt,
node->hashtempcxt,
- u_sess->attr.attr_memory.work_mem);
+ u_sess->attr.attr_memory.work_mem,
+ node->tab_collations);
}
/*
@@ -577,7 +579,8 @@ bool findPartialMatch(TupleHashTable hashtable, TupleTableSlot* slot, FmgrInfo*
CHECK_FOR_INTERRUPTS();
ExecStoreMinimalTuple(entry->firstTuple, hashtable->tableslot, false);
- if (!execTuplesUnequal(slot, hashtable->tableslot, num_cols, key_col_idx, eqfunctions, hashtable->tempcxt)) {
+ if (!execTuplesUnequal(slot, hashtable->tableslot, num_cols, key_col_idx, eqfunctions,
+ hashtable->tempcxt, hashtable->tab_collations)) {
TermTupleHashIterator(&hashiter);
return true;
}
diff --git a/src/gausskernel/runtime/executor/nodeTidscan.cpp b/src/gausskernel/runtime/executor/nodeTidscan.cpp
index cebfc50f0..8f8721735 100644
--- a/src/gausskernel/runtime/executor/nodeTidscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeTidscan.cpp
@@ -655,7 +655,8 @@ TidScanState* ExecInitTidScan(TidScan* node, EState* estate, int eflags)
scan_handler_tbl_begin_tidscan(currentSubPartitionRel, (ScanState *)tidstate);
} else {
tidstate->ss.ss_currentPartition = partitiontrel;
- tidstate->ss.ss_currentScanDesc = scan_handler_tbl_begin_tidscan(partitiontrel, (ScanState *)tidstate);
+ tidstate->ss.ss_currentScanDesc =
+ scan_handler_tbl_begin_tidscan(partitiontrel, (ScanState *)tidstate);
}
}
}
@@ -768,53 +769,59 @@ static void ExecInitPartitionForTidScan(TidScanState* tidstate, EState* estate)
LOCKMODE lock = NoLock;
Partition table_partition = NULL;
bool relistarget = false;
- ListCell* cell = NULL;
- PruningResult* resultPlan = NULL;
- List* part_seqs = NULL;
-
- /* PBE: Recalculate partitions to be scanned according to parameters */
- if (plan->scan.pruningInfo->expr != NULL) {
- resultPlan = GetPartitionInfo(plan->scan.pruningInfo, estate, current_relation);
- } else {
- resultPlan = plan->scan.pruningInfo;
- }
- part_seqs = resultPlan->ls_rangeSelectedPartitions;
relistarget = ExecRelationIsTargetRelation(estate, plan->scan.scanrelid);
lock = (relistarget ? RowExclusiveLock : AccessShareLock);
tidstate->ss.lockMode = lock;
- if (resultPlan->ls_rangeSelectedPartitions != NULL) {
- plan->scan.itrs = resultPlan->ls_rangeSelectedPartitions->length;
+ PruningResult* pruningResult = NULL;
+ if (plan->scan.pruningInfo->expr != NULL) {
+ pruningResult = GetPartitionInfo(plan->scan.pruningInfo, estate, current_relation);
} else {
- plan->scan.itrs = 0;
+ pruningResult = plan->scan.pruningInfo;
+ }
+ if (pruningResult->ls_rangeSelectedPartitions != NULL) {
+ tidstate->ss.part_id = pruningResult->ls_rangeSelectedPartitions->length;
+ } else {
+ tidstate->ss.part_id = 0;
}
- tidstate->ss.part_id = plan->scan.itrs;
- foreach (cell, part_seqs) {
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
+ List* part_seqs = pruningResult->ls_rangeSelectedPartitions;
+ List* partitionnos = pruningResult->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
+
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid table_partitionid = InvalidOid;
- int part_seq = lfirst_int(cell);
+ int part_seq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
/* add table partition to list */
table_partitionid =
- getPartitionOidFromSequence(current_relation, part_seq, plan->scan.pruningInfo->partMap);
- table_partition = partitionOpen(current_relation, table_partitionid, lock);
+ getPartitionOidFromSequence(current_relation, part_seq, partitionno);
+ table_partition = PartitionOpenWithPartitionno(current_relation, table_partitionid, partitionno, lock);
tidstate->ss.partitions = lappend(tidstate->ss.partitions, table_partition);
- if (resultPlan->ls_selectedSubPartitions != NIL) {
+ if (pruningResult->ls_selectedSubPartitions != NIL) {
Relation partRelation = partitionGetRelation(current_relation, table_partition);
SubPartitionPruningResult* subPartPruningResult =
- GetSubPartitionPruningResult(resultPlan->ls_selectedSubPartitions, part_seq);
+ GetSubPartitionPruningResult(pruningResult->ls_selectedSubPartitions, part_seq, partitionno);
if (subPartPruningResult == NULL) {
continue;
}
List *subpartSeqs = subPartPruningResult->ls_selectedSubPartitions;
+ List *subpartitionnos = subPartPruningResult->ls_selectedSubPartitionnos;
+ Assert(list_length(subpartSeqs) == list_length(subpartitionnos));
List *subpartition = NIL;
- ListCell *lc = NULL;
- foreach (lc, subpartSeqs) {
+ ListCell *lc1 = NULL;
+ ListCell *lc2 = NULL;
+ forboth (lc1, subpartSeqs, lc2, subpartitionnos) {
Oid subpartitionid = InvalidOid;
- int subpartSeq = lfirst_int(lc);
+ int subpartSeq = lfirst_int(lc1);
+ int subpartitionno = lfirst_int(lc2);
- subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq);
- Partition subpart = partitionOpen(partRelation, subpartitionid, lock);
+ subpartitionid = getPartitionOidFromSequence(partRelation, subpartSeq, subpartitionno);
+ Partition subpart =
+ PartitionOpenWithPartitionno(partRelation, subpartitionid, subpartitionno, lock);
subpartition = lappend(subpartition, subpart);
}
releaseDummyRelation(&(partRelation));
diff --git a/src/gausskernel/runtime/executor/nodeUnique.cpp b/src/gausskernel/runtime/executor/nodeUnique.cpp
index b08f2e3b5..08b234d57 100644
--- a/src/gausskernel/runtime/executor/nodeUnique.cpp
+++ b/src/gausskernel/runtime/executor/nodeUnique.cpp
@@ -81,9 +81,10 @@ static TupleTableSlot* ExecUnique(PlanState* state) /* return: a tuple or NULL *
* If so then we loop back and fetch another new tuple from the
* subplan.
*/
- if (!execTuplesMatch(
- slot, result_tuple_slot, plan_node->numCols, plan_node->uniqColIdx, node->eqfunctions, node->tempContext))
+ if (!execTuplesMatch(slot, result_tuple_slot, plan_node->numCols, plan_node->uniqColIdx, node->eqfunctions,
+ node->tempContext, plan_node->uniq_collations)) {
break;
+ }
}
/*
diff --git a/src/gausskernel/runtime/executor/nodeValuesscan.cpp b/src/gausskernel/runtime/executor/nodeValuesscan.cpp
index e995b7dab..a1676bab4 100644
--- a/src/gausskernel/runtime/executor/nodeValuesscan.cpp
+++ b/src/gausskernel/runtime/executor/nodeValuesscan.cpp
@@ -27,6 +27,7 @@
#include "executor/executor.h"
#include "executor/node/nodeValuesscan.h"
#include "parser/parsetree.h"
+#include "optimizer/clauses.h"
static TupleTableSlot* ExecValuesScan(PlanState* state);
static TupleTableSlot* ValuesNext(ValuesScanState* node);
@@ -43,7 +44,7 @@ static TupleTableSlot* ValuesNext(ValuesScanState* node);
*/
static TupleTableSlot* ValuesNext(ValuesScanState* node)
{
- List* expr_list = NIL;
+ int curr_idx = 0;
/*
* get information from the estate and scan state
@@ -59,17 +60,9 @@ static TupleTableSlot* ValuesNext(ValuesScanState* node)
if (ScanDirectionIsForward(direction)) {
if (node->curr_idx < node->array_len)
node->curr_idx++;
- if (node->curr_idx < node->array_len)
- expr_list = node->exprlists[node->curr_idx];
- else
- expr_list = NIL;
} else {
if (node->curr_idx >= 0)
node->curr_idx--;
- if (node->curr_idx >= 0)
- expr_list = node->exprlists[node->curr_idx];
- else
- expr_list = NIL;
}
/*
@@ -80,9 +73,11 @@ static TupleTableSlot* ValuesNext(ValuesScanState* node)
*/
(void)ExecClearTuple(slot);
- if (expr_list != NULL) {
+ curr_idx = node->curr_idx;
+ if (curr_idx >= 0 && curr_idx < node->array_len) {
MemoryContext old_context;
- List* expr_state_list = NIL;
+ List* expr_state_list = node->exprstatelists[curr_idx];
+ List* exprlist = node->exprlists[curr_idx];
Datum* values = NULL;
bool* is_null = NULL;
ListCell* lc = NULL;
@@ -100,6 +95,7 @@ static TupleTableSlot* ValuesNext(ValuesScanState* node)
* This is a tad unusual, but we want to delete the eval state again
* when we move to the next row, to avoid growth of memory
* requirements over a long values list.
+ * Do per-value-row work in the per-tuple context.
*/
old_context = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@@ -109,7 +105,9 @@ static TupleTableSlot* ValuesNext(ValuesScanState* node)
* is a SubPlan, and there shouldn't be any (any subselects in the
* VALUES list should be InitPlans).
*/
- expr_state_list = (List*)ExecInitExpr((Expr*)expr_list, NULL);
+ if (expr_state_list == NIL) {
+ expr_state_list = (List*)ExecInitExpr((Expr*)exprlist, NULL);
+ }
/* parser should have checked all sublists are the same length */
Assert(list_length(expr_state_list) == slot->tts_tupleDescriptor->natts);
@@ -252,11 +250,36 @@ ValuesScanState* ExecInitValuesScan(ValuesScan* node, EState* estate, int eflags
scan_state->curr_idx = -1;
scan_state->array_len = list_length(node->values_lists);
- /* convert list of sublists into array of sublists for easy addressing */
+ /* Convert the list of expression sublists into an array for easier
+ * addressing at runtime. Also, detect whether any sublists contain
+ * SubPlans; for just those sublists, go ahead and do expression
+ * initialization. (This avoids problems with SubPlans wanting to connect
+ * themselves up to the outer plan tree. Notably, EXPLAIN won't see the
+ * subplans otherwise; also we will have troubles with dangling pointers
+ * and/or leaked resources if we try to handle SubPlans the same as
+ * simpler expressions.)
+ */
scan_state->exprlists = (List**)palloc(scan_state->array_len * sizeof(List*));
+ scan_state->exprstatelists = (List**)palloc0(scan_state->array_len * sizeof(List*));
i = 0;
foreach (vtl, node->values_lists) {
- scan_state->exprlists[i++] = (List*)lfirst(vtl);
+ List* exprs = castNode(List, lfirst(vtl));
+ scan_state->exprlists[i] = exprs;
+
+ /*
+ * Avoid the cost of a contain_subplans() scan in the simple
+ * case where there are no SubPlans anywhere.
+ */
+ if (estate->es_subplanstates && contain_subplans((Node*)exprs)) {
+ /*
+ * As these expressions are only used once. This is worthwhile
+ * because it's common to insert significant amounts of data
+ * via VALUES(). Note that's initialized separately;
+ * this just affects the upper-level subexpressions.
+ */
+ scan_state->exprstatelists[i] = ExecInitExprList(exprs, &scan_state->ss.ps);
+ }
+ i++;
}
scan_state->ss.ps.ps_TupFromTlist = false;
diff --git a/src/gausskernel/runtime/executor/nodeWindowAgg.cpp b/src/gausskernel/runtime/executor/nodeWindowAgg.cpp
index 80e37ba74..d534761cf 100644
--- a/src/gausskernel/runtime/executor/nodeWindowAgg.cpp
+++ b/src/gausskernel/runtime/executor/nodeWindowAgg.cpp
@@ -628,7 +628,7 @@ static void spool_tuples(WindowAggState* winstate, int64 pos)
if (node->partNumCols > 0) {
/* Check if this tuple still belongs to the current partition */
if (!execTuplesMatch(winstate->first_part_slot, outer_slot, node->partNumCols, node->partColIdx,
- winstate->partEqfunctions, winstate->tmpcontext->ecxt_per_tuple_memory)) {
+ winstate->partEqfunctions, winstate->tmpcontext->ecxt_per_tuple_memory, node->ord_collations)) {
/*
* end of partition; copy the tuple for the next cycle.
*/
@@ -1591,7 +1591,8 @@ static bool are_peers(WindowAggState* winstate, TupleTableSlot* slot1, TupleTabl
node->ordNumCols,
node->ordColIdx,
winstate->ordEqfunctions,
- winstate->tmpcontext->ecxt_per_tuple_memory);
+ winstate->tmpcontext->ecxt_per_tuple_memory,
+ node->ord_collations);
}
/*
diff --git a/src/gausskernel/runtime/opfusion/opfusion.cpp b/src/gausskernel/runtime/opfusion/opfusion.cpp
index baadfaa09..f4d4e6c65 100644
--- a/src/gausskernel/runtime/opfusion/opfusion.cpp
+++ b/src/gausskernel/runtime/opfusion/opfusion.cpp
@@ -349,22 +349,23 @@ void OpFusion::executeInit()
void OpFusion::auditRecord()
{
- if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0) &&
- u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
+ bool is_full_audit_user = audit_check_full_audit_user();
+ if ((u_sess->attr.attr_security.Audit_DML_SELECT != 0 || u_sess->attr.attr_security.Audit_DML != 0 ||
+ is_full_audit_user) && u_sess->attr.attr_security.Audit_enabled && IsPostmasterEnvironment) {
char *object_name = NULL;
switch (m_global->m_planstmt->commandType) {
case CMD_INSERT:
case CMD_DELETE:
case CMD_UPDATE:
- if (u_sess->attr.attr_security.Audit_DML != 0) {
+ if (u_sess->attr.attr_security.Audit_DML != 0 || is_full_audit_user) {
object_name = pgaudit_get_relation_name(m_global->m_planstmt->rtable);
pgaudit_dml_table(object_name, m_global->m_is_pbe_query ? m_global->m_psrc->query_string :
t_thrd.postgres_cxt.debug_query_string);
}
break;
case CMD_SELECT:
- if (u_sess->attr.attr_security.Audit_DML_SELECT != 0) {
+ if (u_sess->attr.attr_security.Audit_DML_SELECT != 0 || is_full_audit_user) {
object_name = pgaudit_get_relation_name(m_global->m_planstmt->rtable);
pgaudit_dml_table_select(object_name, m_global->m_is_pbe_query ?
m_global->m_psrc->query_string :
diff --git a/src/gausskernel/runtime/opfusion/opfusion_insert.cpp b/src/gausskernel/runtime/opfusion/opfusion_insert.cpp
index 129254865..fbe68dec7 100644
--- a/src/gausskernel/runtime/opfusion/opfusion_insert.cpp
+++ b/src/gausskernel/runtime/opfusion/opfusion_insert.cpp
@@ -25,6 +25,7 @@
#include "opfusion/opfusion_insert.h"
#include "access/tableam.h"
+#include "catalog/pg_partition_fn.h"
#include "catalog/storage_gtt.h"
#include "commands/matview.h"
#include "commands/sequence.h"
@@ -232,7 +233,7 @@ Datum ComputePartKeyExprTuple(Relation rel, EState *estate, TupleTableSlot *slot
if (tmpRel->partMap->type == PART_TYPE_RANGE)
boundary = ((RangePartitionMap*)(tmpRel->partMap))->rangeElements[0].boundary;
else if (tmpRel->partMap->type == PART_TYPE_LIST)
- boundary = ((ListPartitionMap*)(tmpRel->partMap))->listElements[0].boundary;
+ boundary = ((ListPartitionMap*)(tmpRel->partMap))->listElements[0].boundary[0].values;
else if (tmpRel->partMap->type == PART_TYPE_HASH)
boundary = ((HashPartitionMap*)(tmpRel->partMap))->hashElements[0].boundary;
else
@@ -286,13 +287,15 @@ unsigned long InsertFusion::ExecInsert(Relation rel, ResultRelInfo* result_rel_i
Assert(tuple != NULL);
if (RELATION_IS_PARTITIONED(rel)) {
m_c_local.m_estate->esfRelations = NULL;
- partOid = heapTupleGetPartitionId(rel, tuple, false, m_c_local.m_estate->es_plannedstmt->hasIgnore);
+ int partitionno = INVALID_PARTITION_NO;
+ partOid =
+ heapTupleGetPartitionId(rel, tuple, &partitionno, false, m_c_local.m_estate->es_plannedstmt->hasIgnore);
if (m_c_local.m_estate->es_plannedstmt->hasIgnore && partOid == InvalidOid) {
ExecReleaseResource(tuple, m_local.m_reslot, result_rel_info, m_c_local.m_estate, bucket_rel, rel, part,
partRel);
return 0;
}
- part = partitionOpen(rel, partOid, RowExclusiveLock);
+ part = PartitionOpenWithPartitionno(rel, partOid, partitionno, RowExclusiveLock);
partRel = partitionGetRelation(rel, part);
}
diff --git a/src/gausskernel/runtime/opfusion/opfusion_util.cpp b/src/gausskernel/runtime/opfusion/opfusion_util.cpp
index 74602e4ef..321b80a8a 100644
--- a/src/gausskernel/runtime/opfusion/opfusion_util.cpp
+++ b/src/gausskernel/runtime/opfusion/opfusion_util.cpp
@@ -1243,12 +1243,12 @@ Oid GetRelOidForPartitionTable(Scan scan, const Relation rel, ParamListInfo para
Oid relOid = InvalidOid;
if (params != NULL) {
Param* paramArg = scan.pruningInfo->paramArg;
- PartitionMap *partmap = scan.pruningInfo->partMap ? scan.pruningInfo->partMap : rel->partMap;
- relOid = GetPartitionOidByParam(partmap, paramArg, &(params->params[paramArg->paramid - 1]));
+ relOid = GetPartitionOidByParam(rel->partMap, paramArg, &(params->params[paramArg->paramid - 1]));
} else {
Assert((list_length(scan.pruningInfo->ls_rangeSelectedPartitions) != 0));
- int partId = lfirst_int(list_head(scan.pruningInfo->ls_rangeSelectedPartitions));
- relOid = getPartitionOidFromSequence(rel, partId, scan.pruningInfo->partMap);
+ int partId = linitial_int(scan.pruningInfo->ls_rangeSelectedPartitions);
+ int partitionno = linitial_int(scan.pruningInfo->ls_selectedPartitionnos);
+ relOid = getPartitionOidFromSequence(rel, partId, partitionno);
}
return relOid;
}
diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp
index 5ea616bec..6297612aa 100644
--- a/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp
+++ b/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp
@@ -482,7 +482,7 @@ void InitCStoreRelation(CStoreScanState* node, EState* estate, bool idx_flag, Re
Oid tbl_part_id = InvalidOid;
int part_seq = lfirst_int(cell);
- tbl_part_id = getPartitionOidFromSequence(curr_rel, part_seq, plan->pruningInfo->partMap);
+ tbl_part_id = getPartitionOidFromSequence(curr_rel, part_seq, INVALID_PARTITION_NO);
part = partitionOpen(curr_rel, tbl_part_id, lock_mode);
node->partitions = lappend(node->partitions, part);
}
@@ -533,7 +533,7 @@ void InitCStoreRelation(CStoreScanState* node, EState* estate, bool idx_flag, Re
Oid tbl_part_id = InvalidOid;
int part_seq = lfirst_int(cell);
- tbl_part_id = getPartitionOidFromSequence(parent_rel, part_seq, plan->pruningInfo->partMap);
+ tbl_part_id = getPartitionOidFromSequence(parent_rel, part_seq, INVALID_PARTITION_NO);
part = partitionOpen(parent_rel, tbl_part_id, lock_mode);
Oid part_idx_oid = getPartitionIndexOid(plan->scanrelid, part->pd_id);
Assert(OidIsValid(part_idx_oid));
diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecstream.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecstream.cpp
index 488689913..9e161fa5d 100755
--- a/src/gausskernel/runtime/vecexecutor/vecnode/vecstream.cpp
+++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecstream.cpp
@@ -437,8 +437,11 @@ VecStreamState* ExecInitVecStream(Stream* node, EState* estate, int eflags)
state->vector_output = true;
state->StreamScan = ScanStreamByLibcomm;
- if (STREAM_IS_LOCAL_NODE(node->smpDesc.distriType))
+ if (STREAM_IS_LOCAL_NODE(node->smpDesc.distriType)) {
state->StreamScan = ScanMemoryStream;
+ /* local stream do not support merge sort */
+ ((Stream*)(state->ss.ps.plan))->sort = NULL;
+ }
state->StreamDeserialize = get_batch_from_conn_buffer;
diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vectsstorescan.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vectsstorescan.cpp
index 5af737720..b8237e98f 100644
--- a/src/gausskernel/runtime/vecexecutor/vecnode/vectsstorescan.cpp
+++ b/src/gausskernel/runtime/vecexecutor/vecnode/vectsstorescan.cpp
@@ -114,15 +114,19 @@ static void init_tsstore_relation(TsStoreScanState* node, EState* estate)
resultPlan = plan->pruningInfo;
}
- ListCell* cell = NULL;
+ ListCell* cell1 = NULL;
+ ListCell* cell2 = NULL;
List* part_seqs = resultPlan->ls_rangeSelectedPartitions;
+ List* partitionnos = resultPlan->ls_selectedPartitionnos;
+ Assert(list_length(part_seqs) == list_length(partitionnos));
/* partitions info is initialized */
- foreach (cell, part_seqs) {
+ forboth (cell1, part_seqs, cell2, partitionnos) {
Oid tablepartitionid = InvalidOid;
- int partSeq = lfirst_int(cell);
+ int partSeq = lfirst_int(cell1);
+ int partitionno = lfirst_int(cell2);
- tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, plan->pruningInfo->partMap);
- part = partitionOpen(currentRelation, tablepartitionid, lockmode);
+ tablepartitionid = getPartitionOidFromSequence(currentRelation, partSeq, partitionno);
+ part = PartitionOpenWithPartitionno(currentRelation, tablepartitionid, partitionno, lockmode);
node->partitions = lappend(node->partitions, part);
}
diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp
index 4c9abd30e..46c8611ef 100644
--- a/src/gausskernel/storage/access/common/reloptions.cpp
+++ b/src/gausskernel/storage/access/common/reloptions.cpp
@@ -249,6 +249,7 @@ static relopt_int intRelOpts[] = {
0,
0,
7},
+ {{ "collate", "set relation default collation", RELOPT_KIND_HEAP }, 0, 0, 2000000000 },
/* list terminator */
{{NULL}}
};
@@ -1984,7 +1985,8 @@ bytea *default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressByteConvert)},
{ "compress_diff_convert", RELOPT_TYPE_BOOL,
offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressDiffConvert)},
- { "check_option", RELOPT_TYPE_STRING, offsetof(StdRdOptions, check_option_offset)}
+ { "check_option", RELOPT_TYPE_STRING, offsetof(StdRdOptions, check_option_offset)},
+ { "collate", RELOPT_TYPE_INT, offsetof(StdRdOptions, collate)}
};
options = parseRelOptions(reloptions, validate, kind, &numoptions);
@@ -2649,6 +2651,33 @@ void ForbidUserToSetCompressedOptions(List *options)
}
}
+void check_collate_in_options(List *user_options)
+{
+ ListCell *opt = NULL;
+ HeapTuple tp;
+
+ foreach(opt, user_options) {
+ DefElem *def = (DefElem *)lfirst(opt);
+
+ if (pg_strcasecmp(def->defname, "collate") == 0) {
+ Oid collate = intVal(def->arg);
+ if (!DB_IS_CMPT(B_FORMAT))
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ (errmsg("Un-support feature"),
+ errdetail("Forbid to set or change \"%s\" in non-B format", "collate"))));
+
+ if (!COLLATION_IN_B_FORMAT(collate))
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("this collation only cannot be specified here")));
+ tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collate));
+ if (!HeapTupleIsValid(tp))
+ ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED),
+ errmsg("cache lookup failed for collation %u", collate)));
+ ReleaseSysCache(tp);
+ }
+ }
+}
+
/*
* @Description: forbid to change inner option
* inner options only can be used by system itself.
@@ -2669,6 +2698,7 @@ void ForbidOutUsersToSetInnerOptions(List *userOptions)
errdetail("Forbid to set or change inner option \"%s\"", innnerOpts[firstInvalidOpt])));
}
}
+ check_collate_in_options(userOptions);
}
void ForbidUserToSetDefinedIndexOptions(List *options)
diff --git a/src/gausskernel/storage/access/common/tupdesc.cpp b/src/gausskernel/storage/access/common/tupdesc.cpp
index 120ca4f36..c0c62b849 100644
--- a/src/gausskernel/storage/access/common/tupdesc.cpp
+++ b/src/gausskernel/storage/access/common/tupdesc.cpp
@@ -34,6 +34,9 @@
#include "utils/syscache.h"
#include "pgxc/pgxc.h"
#include "utils/lsyscache.h"
+#include "mb/pg_wchar.h"
+#include "parser/parse_utilcmd.h"
+#include "catalog/gs_utf8_collation.h"
/*
* CreateTemplateTupleDesc
@@ -328,6 +331,7 @@ void FreeTupleDesc(TupleDesc tupdesc, bool need_check)
pfree(check);
}
pfree_ext(tupdesc->constr->clusterKeys);
+ pfree_ext(tupdesc->constr->cons_autoinc);
pfree(tupdesc->constr);
tupdesc->constr = NULL;
}
@@ -514,6 +518,11 @@ bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0) {
return false;
}
+
+ if (attr1->attnum != attr2->attnum) {
+ return false;
+ }
+
const bool cl_skip = IsClientLogicType(attr1->atttypid) && (Oid)attr1->atttypmod == attr2->atttypid;
if (attr1->atttypid != attr2->atttypid && !cl_skip) {
return false;
@@ -940,7 +949,7 @@ static void BlockColumnRelOption(const char *tableFormat, const Oid atttypid, co
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
-TupleDesc BuildDescForRelation(List *schema, Node *orientedFrom, char relkind)
+TupleDesc BuildDescForRelation(List *schema, Node *orientedFrom, char relkind, Oid rel_coll_oid)
{
int natts;
AttrNumber attnum = 0;
@@ -982,6 +991,10 @@ TupleDesc BuildDescForRelation(List *schema, Node *orientedFrom, char relkind)
attname = entry->colname;
typenameTypeIdAndMod(NULL, entry->typname, &atttypid, &atttypmod);
+ attcollation = GetColumnDefCollation(NULL, entry, atttypid, rel_coll_oid);
+ if (DB_IS_CMPT(B_FORMAT)) {
+ atttypid = binary_need_transform_typeid(atttypid, &attcollation);
+ }
#ifndef ENABLE_MULTIPLE_NODES
/* don't allow package or procedure type as column type */
if (u_sess->plsql_cxt.curr_compile_context == NULL && IsPackageDependType(atttypid, InvalidOid)) {
@@ -1027,7 +1040,6 @@ TupleDesc BuildDescForRelation(List *schema, Node *orientedFrom, char relkind)
if (aclresult != ACLCHECK_OK)
aclcheck_error_type(aclresult, atttypid);
- attcollation = GetColumnDefCollation(NULL, entry, atttypid);
attdim = UpgradeAdaptAttr(atttypid, entry);
BlockColumnRelOption(tableFormat, atttypid, atttypmod);
diff --git a/src/gausskernel/storage/access/hash/hashfunc.cpp b/src/gausskernel/storage/access/hash/hashfunc.cpp
index 7d6221d93..a33973c69 100644
--- a/src/gausskernel/storage/access/hash/hashfunc.cpp
+++ b/src/gausskernel/storage/access/hash/hashfunc.cpp
@@ -29,6 +29,7 @@
#include "knl/knl_variable.h"
#include "access/hash.h"
+#include "catalog/gs_utf8_collation.h"
#ifdef PGXC
#include "catalog/pg_type.h"
@@ -164,9 +165,15 @@ Datum hashtext(PG_FUNCTION_ARGS)
{
text *key = PG_GETARG_TEXT_PP(0);
Datum result;
+ Oid collid = PG_GET_COLLATION();
FUNC_CHECK_HUGE_POINTER(false, key, "hashtext()");
+ if (is_b_format_collation(collid)) {
+ result = hash_text_by_builtin_colltions((unsigned char *)VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key), collid);
+ PG_FREE_IF_COPY(key, 0);
+ return result;
+ }
#ifdef PGXC
if (g_instance.attr.attr_sql.string_hash_compatible) {
result = hash_any((unsigned char *)VARDATA_ANY(key), bcTruelen(key));
diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp
index 95b868800..bff00bf6e 100755
--- a/src/gausskernel/storage/access/heap/heapam.cpp
+++ b/src/gausskernel/storage/access/heap/heapam.cpp
@@ -9656,7 +9656,10 @@ Partition partitionOpenWithRetry(Relation relation, Oid partition_id, LOCKMODE l
Oid parentid = partid_get_parentid(partition_id);
if (!OidIsValid(parentid)) {
- ReportPartitionOpenError(relation, partition_id);
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("partition %u does not exist on relation \"%s\" when find parent oid with retry mod",
+ partition_id, RelationGetRelationName(relation)),
+ errdetail("this partition may have already been dropped")));
}
if (RelationIsSubPartitioned(relation) && relation->rd_id != parentid) {
@@ -9741,26 +9744,32 @@ Partition partitionOpenWithRetry(Relation relation, Oid partition_id, LOCKMODE l
* : on the partiiton already.)
* Notes :
*/
-Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode, int2 bucket_id)
+Partition partitionOpen(Relation relation, Oid partitionOid, LOCKMODE lockmode, int2 bucket_id)
{
Partition p;
- if (!OidIsValid(partition_id)) {
- ereport(ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("partition %u is invalid", partition_id)));
+ if (!OidIsValid(partitionOid)) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("Partition oid %u is invalid when opening partition", partitionOid),
+ errdetail("There is a partition may have already been dropped on relation/partition \"%s\"",
+ RelationGetRelationName(relation))));
}
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
Assert(PointerIsValid(relation));
Assert(bucket_id < SegmentBktId);
- Oid parentid = partid_get_parentid(partition_id);
- if (!OidIsValid(parentid)) {
- ReportPartitionOpenError(relation, partition_id);
+ Oid parentOid = partid_get_parentid(partitionOid);
+ if (!OidIsValid(parentOid)) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("partition %u does not exist on relation \"%s\" when find parent oid", partitionOid,
+ RelationGetRelationName(relation)),
+ errdetail("this partition may have already been dropped")));
}
- if (RelationIsSubPartitioned(relation) && relation->rd_id != parentid) {
- /* partition_id is subpartition oid */
- p = SubPartitionOidGetPartition(relation, partition_id, lockmode);
+ if (RelationIsSubPartitioned(relation) && relation->rd_id != parentOid) {
+ /* partitionOid is subpartition oid */
+ p = SubPartitionOidGetPartition(relation, partitionOid, lockmode);
Assert(relation->rd_id == partid_get_parentid(p->pd_part->parentid));
return p;
}
@@ -9778,27 +9787,27 @@ Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode,
* assume the partition is in PART_AREA_RANGE, if we support interval partition,
* we have to find a quick way to find the area it belongs to.
*/
- LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK);
+ LockPartition(relation->rd_id, partitionOid, lockmode, PARTITION_LOCK);
} else if (relation->rd_rel->relkind == RELKIND_INDEX) {
- LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK);
+ LockPartition(relation->rd_id, partitionOid, lockmode, PARTITION_LOCK);
} else {
ereport(ERROR,
(errcode(ERRCODE_RELATION_OPEN_ERROR),
errmsg("openning partition %u, but relation %s %u is neither table nor index",
- partition_id,
+ partitionOid,
RelationGetRelationName(relation),
RelationGetRelid(relation))));
}
}
/* The partcache does all the real work... */
- p = PartitionIdGetPartition(partition_id, RelationGetStorageType(relation));
+ p = PartitionIdGetPartition(partitionOid, RelationGetStorageType(relation));
if (!PartitionIsValid(p)) {
- ereport(ERROR,
- (errcode(ERRCODE_RELATION_OPEN_ERROR),
- errmsg("partition %u does not exist", partition_id),
- errdetail("this partition may have already been dropped")));
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("partition %u does not exist on relation \"%s\" when get the partcache", partitionOid,
+ RelationGetRelationName(relation)),
+ errdetail("this partition may have already been dropped")));
}
if (p->xmin_csn != InvalidCommitSeqNo && ActiveSnapshotSet()) {
@@ -9806,7 +9815,7 @@ Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode,
if (p->xmin_csn > snapshot->snapshotcsn) {
ereport(ERROR,
(errcode(ERRCODE_SNAPSHOT_INVALID),
- errmsg("current snapshot is invalid for this partition : %u.", partition_id)));
+ errmsg("current snapshot is invalid for this partition : %u.", partitionOid)));
}
}
@@ -9983,6 +9992,41 @@ Partition tryPartitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmod
return p;
}
+/*
+ * Search the partition with partitionno retry.
+ * If the partition entry has already been removed by DDL operations, we use partitionno to research the new entry.
+ * Must make sure the partitionno is of the old partition entry, otherwise a wrong entry may be found!
+ * If the partitionno is invalid, this function is degenerated into partitionOpen.
+ */
+Partition PartitionOpenWithPartitionno(Relation relation, Oid partition_id, int partitionno, LOCKMODE lockmode)
+{
+ Partition part = NULL;
+ bool issubpartition = false;
+ char parttype;
+ Oid newpartOid = InvalidOid;
+
+ /* first try open the partition */
+ part = tryPartitionOpen(relation, partition_id, lockmode);
+ if (likely(PartitionIsValid(part))) {
+ return part;
+ }
+
+ if (!PARTITIONNO_IS_VALID(partitionno)) {
+ ReportPartitionOpenError(relation, partition_id);
+ }
+
+ PARTITION_LOG(
+ "partition %u does not exist on relation \"%s\", we will try to use partitionno %d to search the new partition",
+ partition_id, RelationGetRelationName(relation), partitionno);
+
+ /* if not found, search the new partition with partitionno */
+ issubpartition = RelationIsPartitionOfSubPartitionTable(relation);
+ parttype = issubpartition ? PART_OBJ_TYPE_TABLE_SUB_PARTITION : PART_OBJ_TYPE_TABLE_PARTITION;
+ newpartOid = GetPartOidWithPartitionno(RelationGetRelid(relation), partitionno, parttype);
+
+ return partitionOpen(relation, newpartOid, lockmode);
+}
+
/*
* @brief: close the partiiton
* If lockmode is not "NoLock", we then release the specified lock.
diff --git a/src/gausskernel/storage/access/heap/hio.cpp b/src/gausskernel/storage/access/heap/hio.cpp
index 87007e489..fb71d5a7e 100644
--- a/src/gausskernel/storage/access/heap/hio.cpp
+++ b/src/gausskernel/storage/access/heap/hio.cpp
@@ -384,7 +384,7 @@ Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer other_buffe
* Blocks that extended one by one are different from bulk-extend blocks, and
* are not recorded into FSM. As its creator session close this realtion, they
* can not be used by any other body. It is especially obvious for partition
- * bulk insert. Here, if no avaiable found in FSM, we check the last block to
+ * bulk insert. Here, if no available found in FSM, we check the last block to
* reuse the 'leaked free space' mentioned earlier.
*/
bool test_last_block = false;
diff --git a/src/gausskernel/storage/access/index/genam.cpp b/src/gausskernel/storage/access/index/genam.cpp
index 3ce68686b..6d51be68f 100644
--- a/src/gausskernel/storage/access/index/genam.cpp
+++ b/src/gausskernel/storage/access/index/genam.cpp
@@ -23,6 +23,7 @@
#include "access/relscan.h"
#include "access/transam.h"
#include "catalog/index.h"
+#include "catalog/pg_partition_fn.h"
#include "miscadmin.h"
#include "storage/buf/bufmgr.h"
#include "utils/acl.h"
@@ -598,6 +599,14 @@ static bool GPIInsertFakeParentRelCacheForSubpartition(GPIScanDesc gpiScan, Memo
HTAB* fakeRels = gpiScan->fakeRelationTable;
Relation parentRel = gpiScan->parentRelation;
Oid parentPartOid = partid_get_parentid(gpiScan->currPartOid);
+ if (!OidIsValid(parentPartOid)) {
+ if (PartitionGetMetadataStatus(gpiScan->currPartOid, false) != PART_METADATA_INVISIBLE) {
+ return false;
+ }
+ Oid newpartOid = InvisiblePartidGetNewPartid(gpiScan->currPartOid);
+ parentPartOid = partid_get_parentid(newpartOid);
+ }
+
if (OidIsValid(parentPartOid) && parentPartOid != parentRel->rd_id) {
PartRelIdCacheKey fakeRelKey = {parentPartOid, InvalidBktId};
Partition parentPartition = NULL;
@@ -606,8 +615,8 @@ static bool GPIInsertFakeParentRelCacheForSubpartition(GPIScanDesc gpiScan, Memo
/* add current parentRel into fakeRelationTable */
Oid baseRelOid = partid_get_parentid(parentPartOid);
Relation baseRel = relation_open(baseRelOid, lmode);
- res = trySearchFakeReationForPartitionOid(&fakeRels, cxt, baseRel, parentPartOid, &parentRel,
- &parentPartition, lmode);
+ res = trySearchFakeReationForPartitionOid(&fakeRels, cxt, baseRel, parentPartOid, INVALID_PARTITION_NO,
+ &parentRel, &parentPartition, lmode);
relation_close(baseRel, NoLock);
}
if (res) {
@@ -631,15 +640,17 @@ static bool GPIInsertFakeRelCache(GPIScanDesc gpiScan, MemoryContext cxt, LOCKMO
Relation parentRel = gpiScan->parentRelation;
/* Save search fake relation in gpiScan->fakeRelation */
- res = trySearchFakeReationForPartitionOid(&fakeRels, cxt, parentRel, currPartOid, &gpiScan->fakePartRelation,
- &partition, lmode);
+ res = trySearchFakeReationForPartitionOid(&fakeRels, cxt, parentRel, currPartOid, INVALID_PARTITION_NO,
+ &gpiScan->fakePartRelation, &partition, lmode);
- if (res) {
- /* save partition */
- gpiScan->partition = partition;
- } else {
- ReportPartitionOpenError(parentRel, currPartOid);
+ if (!res) {
+ ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR),
+ errmsg("partition %u does not exist on relation \"%s\" when search the fake relation for GPI",
+ currPartOid, RelationGetRelationName(parentRel)),
+ errdetail("this partition may have already been dropped")));
}
+ /* save partition */
+ gpiScan->partition = partition;
return res;
}
diff --git a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp
index ee629b7c1..b31ccfccc 100644
--- a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp
+++ b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp
@@ -2378,8 +2378,12 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel)
if (ItemIdIsDead(itemId))
deletable[ndeletable++] = offnum;
}
- if (ndeletable > 0)
+ if (ndeletable > 0) {
+ if (RelationNeedsWAL(rel) && XLogIsNeeded() && TransactionIdIsValid(u_sess->utils_cxt.RecentGlobalXmin)) {
+ (void)log_heap_cleanup_info(&(rel->rd_node), u_sess->utils_cxt.RecentGlobalXmin);
+ }
_bt_delitems_delete(rel, buffer, deletable, ndeletable, heapRel);
+ }
/*
* Note: if we didn't find any LP_DEAD items, then the page's
* BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
diff --git a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp
index 574240f3e..25d89da50 100644
--- a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp
+++ b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp
@@ -1286,7 +1286,8 @@ void UBTreeXlogPrunePageOperatorPage(RedoBufferInfo* buffer, void* recorddata)
Page page = buffer->pageinfo.page;
/* Set up flags and try to repair page fragmentation */
- UBTreePagePruneExecute(page, (OffsetNumber *)(((char *)xlrec) + SizeOfUBTreePrunePage), xlrec->count, NULL);
+ UBTreePagePruneExecute(page, (OffsetNumber *)(((char *)xlrec) + SizeOfUBTreePrunePage), xlrec->count, NULL,
+ InvalidTransactionId);
UBTreePageRepairFragmentation(NULL, buffer->blockinfo.blkno, page);
diff --git a/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp
index 338199c07..f045cbc5a 100644
--- a/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp
+++ b/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp
@@ -37,6 +37,7 @@ void smgr_desc(StringInfo buf, XLogReaderState *record)
{
char *rec = XLogRecGetData(record);
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
+ bool compress = (bool)(XLogRecGetInfo(record) & XLR_REL_COMPRESS);
if (info == XLOG_SMGR_CREATE) {
xl_smgr_create *xlrec = (xl_smgr_create *)rec;
RelFileNode rnode;
@@ -44,7 +45,7 @@ void smgr_desc(StringInfo buf, XLogReaderState *record)
rnode.opt = GetCreateXlogFileNodeOpt(record);
char *path = relpathperm(rnode, xlrec->forkNum);
- appendStringInfo(buf, "file create: %s", path);
+ appendStringInfo(buf, "file create: %s%s", path, compress ? COMPRESS_STR : "");
#ifdef FRONTEND
free(path);
path = NULL;
@@ -58,7 +59,7 @@ void smgr_desc(StringInfo buf, XLogReaderState *record)
rnode.opt = GetTruncateXlogFileNodeOpt(record);
char *path = relpathperm(rnode, MAIN_FORKNUM);
- appendStringInfo(buf, "file truncate: %s to %u blocks", path, xlrec->blkno);
+ appendStringInfo(buf, "file truncate: %s%s to %u blocks", path, compress ? COMPRESS_STR : "", xlrec->blkno);
#ifdef FRONTEND
free(path);
path = NULL;
diff --git a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp
index 3dd5b7279..f6422148d 100644
--- a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp
+++ b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp
@@ -70,7 +70,6 @@ static const char *const bmp_file_name_template = "%s%s%lu_%08X%08X_%08X%08X.cbm
static const char *const merged_bmp_file_name_template = "%s%s%08X%08X_%08X%08X_%ld-%d.cbm";
static void CBMFileHomeInitialize(void);
-static void ResetXlogCbmSys(void);
static bool IsCBMFile(const char *fileName, uint64 *seqNum, XLogRecPtr *startLSN, XLogRecPtr *endLSN);
static void ValidateCBMFile(const char *filename, XLogRecPtr *trackedLSN, uint64 *lastfileSize, bool truncErrPage);
static bool ReadCBMPage(BitmapFile *cbmFile, char *page, bool *checksum_ok);
@@ -214,7 +213,7 @@ static void CBMFileHomeInitialize(void)
ereport(FATAL, (errmsg("Length of absolute CBM file path would exceed MAXPGPATH!")));
}
-static void ResetXlogCbmSys(void)
+extern void ResetXlogCbmSys(void)
{
int rc = 0;
@@ -428,9 +427,13 @@ static void ValidateCBMFile(const char *filename, XLogRecPtr *trackedLSN, uint64
*lastfileSize = (off_t)0;
else {
if (cbmFile.offset < st.st_size && truncErrPage) {
- if (ftruncate(cbmFile.fd, cbmFile.offset))
+ if (ftruncate(cbmFile.fd, cbmFile.offset)) {
+ if (close(cbmFile.fd))
+ ereport(WARNING, (errcode_for_file_access(), errmsg("could not close CBM file \"%s\": %m", filePath)));
+
ereport(ERROR, (errcode_for_file_access(),
errmsg("Failed to truncate CBM file \"%s\" to length %ld", filePath, cbmFile.offset)));
+ }
}
*lastfileSize = cbmFile.offset;
@@ -605,6 +608,7 @@ static void StartNextCBMFile(XLogRecPtr startLSN)
ereport(ERROR, (errcode_for_file_access(),
errmsg("could not create new CBM file \"%s\": %m", t_thrd.cbm_cxt.XlogCbmSys->out.name)));
+ Assert(t_thrd.cbm_cxt.XlogCbmSys->out.fd == -1);
t_thrd.cbm_cxt.XlogCbmSys->out.fd = fd;
t_thrd.cbm_cxt.XlogCbmSys->out.size = 0;
t_thrd.cbm_cxt.XlogCbmSys->out.offset = (off_t)0;
@@ -619,6 +623,7 @@ static void StartExistCBMFile(uint64 lastfileSize)
ereport(ERROR, (errcode_for_file_access(),
errmsg("could not open CBM file \"%s\": %m", t_thrd.cbm_cxt.XlogCbmSys->out.name)));
+ Assert(t_thrd.cbm_cxt.XlogCbmSys->out.fd == -1);
t_thrd.cbm_cxt.XlogCbmSys->out.fd = fd;
t_thrd.cbm_cxt.XlogCbmSys->out.size = lastfileSize;
t_thrd.cbm_cxt.XlogCbmSys->out.offset = (off_t)lastfileSize;
@@ -2004,6 +2009,7 @@ static void RotateCBMFile(void)
if (close(t_thrd.cbm_cxt.XlogCbmSys->out.fd) != 0)
ereport(ERROR, (errcode_for_file_access(),
errmsg("close CBM file \"%s\" failed during rotate", t_thrd.cbm_cxt.XlogCbmSys->out.name)));
+ t_thrd.cbm_cxt.XlogCbmSys->out.fd = -1;
if (strncmp(t_thrd.cbm_cxt.XlogCbmSys->out.name, t_thrd.cbm_cxt.XlogCbmSys->cbmFileHome,
strlen(t_thrd.cbm_cxt.XlogCbmSys->cbmFileHome)) ||
diff --git a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp
index 0df2493a1..1262eb4c6 100755
--- a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp
+++ b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp
@@ -570,12 +570,12 @@ bool BatchRedoDistributeItems(void **eleArry, uint32 eleNum)
RedoItem *item = (RedoItem *)eleArry[i];
UpdateRecordGlobals(item, g_redoWorker->standbyState);
CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3],
- g_redoWorker->timeCostList[TIME_COST_STEP_6]);
+ g_redoWorker->timeCostList[TIME_COST_STEP_4]);
do {
parsecomplete = BatchRedoParseItemAndDispatch(item);
RedoInterruptCallBack();
} while (parsecomplete);
- CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]);
+ CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]);
DereferenceRedoItem(item);
}
}
@@ -1894,29 +1894,35 @@ void XLogForceFinish(XLogReaderState *xlogreader, TermFileData *term_file)
(errcode(ERRCODE_LOG), errmsg("[ForceFinish]ArchiveXlogForForceFinishRedo in extremeRTO is over")));
}
+static void DoCleanUpReadPageWorkerQueue(SPSCBlockingQueue *queue)
+{
+ while (!SPSCBlockingQueueIsEmpty(queue)) {
+ XLogReaderState *xlogreader = reinterpret_cast(SPSCBlockingQueueTake(queue));
+ if (xlogreader == reinterpret_cast(&(g_redoEndMark.record)) ||
+ xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record)) ||
+ xlogreader == reinterpret_cast(&(g_cleanupMark.record))) {
+ if (xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record))) {
+ pg_atomic_write_u32(&g_GlobalLsnForwarder.record.refcount, 0);
+ }
+ continue;
+ }
+
+ RedoItem *item = GetRedoItemPtr(xlogreader);
+ FreeRedoItem(item);
+ }
+}
+
void CleanUpReadPageWorkerQueue()
{
SPSCBlockingQueue *queue = g_dispatcher->readLine.readPageThd->queue;
uint32 state;
do {
- while (!SPSCBlockingQueueIsEmpty(queue)) {
- XLogReaderState *xlogreader = reinterpret_cast(SPSCBlockingQueueTake(queue));
- if (xlogreader == reinterpret_cast(&(g_redoEndMark.record)) ||
- xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record)) ||
- xlogreader == reinterpret_cast(&(g_cleanupMark.record))) {
- if (xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record))) {
- pg_atomic_write_u32(&g_GlobalLsnForwarder.record.refcount, 0);
- }
- continue;
- }
-
- RedoItem *item = GetRedoItemPtr(xlogreader);
- FreeRedoItem(item);
- }
-
+ DoCleanUpReadPageWorkerQueue(queue);
RedoInterruptCallBack();
state = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readPageWorkerState);
} while (state != WORKER_STATE_EXIT);
+ /* Processing the state change after the queue is cleared */
+ DoCleanUpReadPageWorkerQueue(queue);
}
void ExtremeRtoStopHere()
@@ -2588,6 +2594,7 @@ static void InitGlobals()
t_thrd.xlog_cxt.InRecovery = g_redoWorker->InRecovery;
t_thrd.xlog_cxt.ArchiveRestoreRequested = g_redoWorker->ArchiveRestoreRequested;
t_thrd.xlog_cxt.minRecoveryPoint = g_redoWorker->minRecoveryPoint;
+ t_thrd.xlog_cxt.curFileTLI = t_thrd.xlog_cxt.ThisTimeLineID;
}
void WaitRedoWorkersQueueEmpty()
diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp
index 2806d1ffb..5464e7ccb 100755
--- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp
+++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp
@@ -1304,7 +1304,11 @@ static bool DispatchHeap2VacuumRecord(XLogReaderState *record, List *expectedTLI
bool isNeedFullSync = false;
uint8 info = ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) & XLOG_HEAP_OPMASK);
if (info == XLOG_HEAP2_CLEANUP_INFO) {
- DispatchTxnRecord(record, expectedTLIs, recordXTime, false);
+ xl_heap_cleanup_info* xlrec = (xl_heap_cleanup_info*)XLogRecGetData(record);
+ RelFileNode tmp_node;
+ RelFileNodeCopy(tmp_node, xlrec->node, (int2)XLogRecGetBucketId(record));
+
+ DispatchToOnePageWorker(record, tmp_node, expectedTLIs);
} else {
DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH);
}
@@ -1474,6 +1478,11 @@ static bool StandbyWillChangeStandbyState(XLogReaderState *record)
return true;
}
+ if ((XLogRecGetRmid(record) == RM_STANDBY_ID) &&
+ ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) == XLOG_STANDBY_LOCK)) {
+ return true;
+ }
+
return false;
}
diff --git a/src/gausskernel/storage/access/transam/single_double_write.cpp b/src/gausskernel/storage/access/transam/single_double_write.cpp
index 7e6b0a9b1..886cfa074 100644
--- a/src/gausskernel/storage/access/transam/single_double_write.cpp
+++ b/src/gausskernel/storage/access/transam/single_double_write.cpp
@@ -238,6 +238,11 @@ static void dw_recovery_first_version_page()
rc = memcpy_s(&flush_item, sizeof(dw_first_flush_item), dw_block + pghr->pd_lower, sizeof(dw_first_flush_item));
securec_check(rc, "\0", "\0");
+ if (unlikely((long)(t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION))) {
+ BufferTagSecondVer *old_buf_tag = (BufferTagSecondVer *)(void *)&flush_item.buf_tag;
+ flush_item.buf_tag.rnode.bucketNode = (int2)old_buf_tag->rnode.bucketNode;
+ flush_item.buf_tag.rnode.opt = 0;
+ }
if (!dw_verify_pg_checksum((PageHeader)dw_block, flush_item.buf_tag.blockNum, true)) {
if (PageIsNew(dw_block)) {
@@ -689,6 +694,11 @@ uint16 first_version_dw_single_flush(BufferDesc *buf_desc)
item.dwn = file_head->head.dwn;
item.buf_tag = phy_tag;
+ if (unlikely((long)(t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION))) {
+ BufferTagSecondVer *old_buf_tag = (BufferTagSecondVer *)(void *)&item.buf_tag;
+ old_buf_tag->rnode.bucketNode = phy_tag.rnode.bucketNode;
+ }
+
pghr = (PageHeader)buf;
rc = memcpy_s(buf + pghr->pd_lower, sizeof(dw_first_flush_item), &item, sizeof(dw_first_flush_item));
diff --git a/src/gausskernel/storage/access/transam/twophase.cpp b/src/gausskernel/storage/access/transam/twophase.cpp
index de1e6abfd..8f2b68aea 100644
--- a/src/gausskernel/storage/access/transam/twophase.cpp
+++ b/src/gausskernel/storage/access/transam/twophase.cpp
@@ -721,6 +721,8 @@ static void MarkAsPreparingGuts(GTM_TransactionHandle handle, GlobalTransaction
pgxact->handle = handle;
pgxact->xid = xid;
pgxact->xmin = InvalidTransactionId;
+ proc->snapXmax = InvalidTransactionId;
+ proc->snapCSN = InvalidCommitSeqNo;
pgxact->csn_min = InvalidCommitSeqNo;
pgxact->csn_dr = InvalidCommitSeqNo;
pgxact->delayChkpt = false;
diff --git a/src/gausskernel/storage/access/transam/xact.cpp b/src/gausskernel/storage/access/transam/xact.cpp
index 6060c10e2..de1c40490 100755
--- a/src/gausskernel/storage/access/transam/xact.cpp
+++ b/src/gausskernel/storage/access/transam/xact.cpp
@@ -1402,6 +1402,16 @@ void XLogInsertStandbyCSNCommitting(TransactionId xid, CommitSeqNo csn, Transact
XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_COMMITTING);
}
+#ifndef ENABLE_MULTIPLE_NODES
+static inline void ResetPartitionLockInfo()
+{
+ list_free_ext(u_sess->storage_cxt.partition_dml_oids);
+ u_sess->storage_cxt.partition_dml_oids = NIL;
+ list_free_ext(u_sess->storage_cxt.partition_ddl_oids);
+ u_sess->storage_cxt.partition_ddl_oids = NIL;
+}
+#endif
+
/* ----------------------------------------------------------------
* CommitTransaction stuff
* ----------------------------------------------------------------
@@ -2511,6 +2521,10 @@ static void StartTransaction(bool begin_on_gtm)
#endif
ResetBCMArray();
+#ifndef ENABLE_MULTIPLE_NODES
+ ResetPartitionLockInfo();
+#endif
+
/*
* Get node group status and save in cache,
* if we are doing two phase commit, skip init cache.
@@ -2582,6 +2596,11 @@ static void CommitTransaction(bool STP_commit)
ShowTransactionState("CommitTransaction");
+#ifndef ENABLE_MULTIPLE_NODES
+ LockPartitionDDLOperation();
+ ResetPartitionLockInfo();
+#endif
+
/* Check relcache init flag */
if (needNewLocalCacheFile) {
ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("Wrong flag of relcache init flag at commit transaction.")));
@@ -3605,6 +3624,10 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback)
TransactionId latestXid;
t_thrd.xact_cxt.bInAbortTransaction = true;
+#ifndef ENABLE_MULTIPLE_NODES
+ ResetPartitionLockInfo();
+#endif
+
/* clean stream snapshot register info */
ForgetRegisterStreamSnapshots();
@@ -7098,6 +7121,7 @@ void push_unlink_rel_to_hashtbl(ColFileNode *xnodes, int nrels)
entry->rnode.bucketNode = colFileNode.filenode.bucketNode;
entry->rnode.opt = colFileNode.filenode.opt;
entry->maxSegNo = -1;
+ entry->fileUnlink = false;
del_rel_num++;
}
BatchClearBadBlock(colFileNode.filenode, colFileNode.forknum, 0);
diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp
index 29b50b683..01bf8d383 100755
--- a/src/gausskernel/storage/access/transam/xlog.cpp
+++ b/src/gausskernel/storage/access/transam/xlog.cpp
@@ -13370,7 +13370,8 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo, XLogRecPtr curIns
}
LWLockRelease(XlogRemoveSegLock);
}
- if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE && t_thrd.xlog_cxt.is_hadr_main_standby) {
+ if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE && t_thrd.xlog_cxt.is_hadr_main_standby &&
+ !g_instance.attr.attr_storage.dcf_attr.enable_dcf) {
XLogSegNo mainStandbySegNo = CalcRecycleSegNoForHadrMainStandby(recptr, segno, repl_slot_state.min_required);
if (mainStandbySegNo < segno && mainStandbySegNo > 0) {
segno = mainStandbySegNo;
@@ -13659,6 +13660,7 @@ void xlog_redo(XLogReaderState *record)
if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->nextXid, checkPoint.nextXid)) {
t_thrd.xact_cxt.ShmemVariableCache->nextXid = checkPoint.nextXid;
}
+ ExtendCSNLOG(checkPoint.nextXid);
LWLockRelease(XidGenLock);
LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
t_thrd.xact_cxt.ShmemVariableCache->nextOid = checkPoint.nextOid;
@@ -13784,6 +13786,7 @@ void xlog_redo(XLogReaderState *record)
if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->nextXid, checkPoint.nextXid)) {
t_thrd.xact_cxt.ShmemVariableCache->nextXid = checkPoint.nextXid;
}
+ ExtendCSNLOG(checkPoint.nextXid);
LWLockRelease(XidGenLock);
/* ... but still treat OID counter as exact */
LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp
index a94c72d81..386c1a959 100644
--- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp
+++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp
@@ -222,8 +222,8 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTr
if (npreviousDead > 0 || prstate.ndead > 0) {
/* Set up flags and try to repair page fragmentation */
WaitState oldStatus = pgstat_report_waitstatus(STATE_PRUNE_INDEX);
- UBTreePagePruneExecute(page, prstate.previousdead, npreviousDead, &prstate);
- UBTreePagePruneExecute(page, prstate.nowdead, prstate.ndead, &prstate);
+ UBTreePagePruneExecute(page, prstate.previousdead, npreviousDead, &prstate, oldestXmin);
+ UBTreePagePruneExecute(page, prstate.nowdead, prstate.ndead, &prstate, oldestXmin);
UBTreePageRepairFragmentation(rel, BufferGetBlockNumber(buf), page);
@@ -321,7 +321,8 @@ bool UBTreePruneItem(Page page, OffsetNumber offnum, TransactionId oldestXmin, I
/*
* UBTreePagePruneExecute() -- Traverse the nowdead array, set the corresponding tuples as dead.
*/
-void UBTreePagePruneExecute(Page page, OffsetNumber *nowdead, int ndead, IndexPruneState *prstate)
+void UBTreePagePruneExecute(Page page, OffsetNumber *nowdead, int ndead, IndexPruneState *prstate,
+ TransactionId oldest_xmin)
{
OffsetNumber *offnum = NULL;
int i;
@@ -339,8 +340,9 @@ void UBTreePagePruneExecute(Page page, OffsetNumber *nowdead, int ndead, IndexPr
TransactionId xid = ShortTransactionIdToNormal(opaque->xid_base, uxid->xmax);
if (TransactionIdIsNormal(xid)) {
- if (!TransactionIdIsValid(prstate->latestRemovedXid) ||
- TransactionIdPrecedes(prstate->latestRemovedXid, xid)) {
+ if ((!TransactionIdIsValid(prstate->latestRemovedXid) ||
+ TransactionIdPrecedes(prstate->latestRemovedXid, xid)) &&
+ TransactionIdPrecedes(xid, oldest_xmin)) {
/* update latestRemovedXid */
prstate->latestRemovedXid = xid;
}
diff --git a/src/gausskernel/storage/access/ubtree/ubtutils.cpp b/src/gausskernel/storage/access/ubtree/ubtutils.cpp
index af1702b96..8fbf0c786 100644
--- a/src/gausskernel/storage/access/ubtree/ubtutils.cpp
+++ b/src/gausskernel/storage/access/ubtree/ubtutils.cpp
@@ -33,8 +33,8 @@
static bool UBTreeVisibilityCheckWrap(IndexScanDesc scan, Page page, OffsetNumber offnum, bool *needRecheck);
static bool UBTreeVisibilityCheckXid(TransactionId xmin, TransactionId xmax, bool xminCommitted, bool xmaxCommitted,
- Snapshot snapshot, bool isUpsert = false);
-static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot snapshot);
+ Snapshot snapshot, Buffer buffer, bool isUpsert = false);
+static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot snapshot, Buffer buffer);
static int UBTreeKeepNatts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itupKey);
static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, bool *needRecheck);
static bool UBTreeItupEquals(IndexTuple itup1, IndexTuple itup2);
@@ -365,8 +365,10 @@ static bool UBTreeVisibilityCheckWrap(IndexScanDesc scan, Page page, OffsetNumbe
IndexTuple tuple = (IndexTuple)PageGetItem(page, iid);
isVisible = UBTreeVisibilityCheckCid(scan, tuple, needRecheck); /* need check cid */
} else {
+ BTScanOpaque so = (BTScanOpaque)scan->opaque;
+ Buffer buffer = so->currPos.buf;
isVisible = UBTreeVisibilityCheckXid(xmin, xmax, xminCommitted, xmaxCommitted,
- scan->xs_snapshot, scan->isUpsert);
+ scan->xs_snapshot, buffer, scan->isUpsert);
}
}
@@ -710,7 +712,7 @@ static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, bool *
* xminCommitted && xmaxCommitted are just hint: true means committed, but false may also be committed.
*/
static bool UBTreeVisibilityCheckXid(TransactionId xmin, TransactionId xmax, bool xminCommitted, bool xmaxCommitted,
- Snapshot snapshot, bool isUpsert)
+ Snapshot snapshot, Buffer buffer, bool isUpsert)
{
if (snapshot->satisfies == SNAPSHOT_DIRTY && isUpsert) {
bool xmaxVisible = xmaxCommitted || TransactionIdIsCurrentTransactionId(xmax);
@@ -730,10 +732,10 @@ static bool UBTreeVisibilityCheckXid(TransactionId xmin, TransactionId xmax, boo
/* handle snapshot MVCC */
if (snapshot->satisfies == SNAPSHOT_VERSION_MVCC || snapshot->satisfies == SNAPSHOT_MVCC) {
- if (UBTreeXidSatisfiesMVCC(xmax, xmaxCommitted, snapshot)) {
+ if (UBTreeXidSatisfiesMVCC(xmax, xmaxCommitted, snapshot, buffer)) {
return false; /* already deleted */
}
- if (!UBTreeXidSatisfiesMVCC(xmin, xminCommitted, snapshot)) {
+ if (!UBTreeXidSatisfiesMVCC(xmin, xminCommitted, snapshot, buffer)) {
return false; /* have not inserted yet */
}
}
@@ -749,7 +751,7 @@ static bool UBTreeVisibilityCheckXid(TransactionId xmin, TransactionId xmax, boo
/*
* BtXidSatisfiesMvcc() -- Check whether the xid is visible for the given MVCC snapshot.
*/
-static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot snapshot)
+static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot snapshot, Buffer buffer)
{
TransactionIdStatus ignore;
@@ -765,7 +767,7 @@ static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot s
*/
/* we can't tell visibility by snapshot's xmin/xmax alone, check snapshot */
- return XidVisibleInSnapshot(xid, snapshot, &ignore, InvalidBuffer, NULL);
+ return XidVisibleInSnapshot(xid, snapshot, &ignore, (RecoveryInProgress() ? buffer : InvalidBuffer), NULL);
}
/*
diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp
index 4ebfe69c6..3a6334268 100644
--- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp
+++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp
@@ -489,7 +489,7 @@ static char *ReachXlUndoHeaderEnd(XlUndoHeader *xlundohdr)
{
char *currLogPtr = ((char *)xlundohdr + SizeOfXLUndoHeader);
if ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_SUB_XACT) != 0) {
- currLogPtr += sizeof(bool);
+ currLogPtr += sizeof(bool);
}
if ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_BLK_PREV) != 0) {
currLogPtr += sizeof(UndoRecPtr);
@@ -579,6 +579,7 @@ void UHeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Size
RowPtr *rp;
XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete);
char *currLogPtr = ReachXlUndoHeaderEnd(xlundohdr);
+ Size shiftSize = currLogPtr - (char *)xlrec;
XlogUndoMeta *xlundometa = (XlogUndoMeta *)((char *)currLogPtr);
UndoRecPtr urecptr = xlundohdr->urecptr;
@@ -589,8 +590,8 @@ void UHeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Size
* If the WAL stream contains undo tuple, then replace it with the
* explicitly stored tuple.
*/
- Size datalen = recordlen - SizeOfXLUndoHeader - SizeOfUHeapDelete - undoMetaSize - SizeOfUHeapHeader;
- char *data = (char *)xlrec + SizeOfUHeapDelete + SizeOfXLUndoHeader + undoMetaSize;
+ Size datalen = recordlen - shiftSize - undoMetaSize - SizeOfUHeapHeader;
+ char *data = currLogPtr + undoMetaSize;
/*
* If the WAL stream contains undo tuple, then replace it with the
diff --git a/src/gausskernel/storage/access/ustore/knl_uhio.cpp b/src/gausskernel/storage/access/ustore/knl_uhio.cpp
index 5794959d1..aef3c5d8e 100644
--- a/src/gausskernel/storage/access/ustore/knl_uhio.cpp
+++ b/src/gausskernel/storage/access/ustore/knl_uhio.cpp
@@ -56,7 +56,7 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe
* Blocks that extended one by one are different from bulk-extend blocks, and
* are not recorded into FSM. As its creator session close this realtion, they
* can not be used by any other body. It is especially obvious for partition
- * bulk insert. Here, if no avaiable found in FSM, we check the last block to
+ * bulk insert. Here, if no available found in FSM, we check the last block to
* reuse the 'leaked free space' mentioned earlier.
*/
bool test_last_block = false;
diff --git a/src/gausskernel/storage/cstore/compression/cstore_compress.cpp b/src/gausskernel/storage/cstore/compression/cstore_compress.cpp
index 469b9d3c8..9d4627b62 100644
--- a/src/gausskernel/storage/cstore/compression/cstore_compress.cpp
+++ b/src/gausskernel/storage/cstore/compression/cstore_compress.cpp
@@ -894,7 +894,7 @@ static void DeltaPlusRLEv2_Check(char* rawData, char* cmprData, int cmprSize, in
#endif
/// get the bound size needed.
-/// the returned value is avaiable for both delta and rle methods.
+/// the returned value is available for both delta and rle methods.
int DeltaPlusRLEv2::GetBound()
{
if (DeltaCanBeApplied(&m_deltaBytes, m_minVal, m_maxVal, m_valueBytes)) {
@@ -1112,7 +1112,7 @@ static int ParseBitmap(char* buf, T* values, int nValues, T valueIfSet, T valueI
/// whole/complete bytes used for this bitmap
const uint32 bpWholeBytes = (uint32)nValues >> 3;
- /// avaiable bits in the last part-byte for this bitmap
+ /// available bits in the last part-byte for this bitmap
/// it may be 0 or others less than 8.
const uint32 bpRemainBits = (uint32)nValues & 0x07;
diff --git a/src/gausskernel/storage/cstore/cstore_delete.cpp b/src/gausskernel/storage/cstore/cstore_delete.cpp
index 1a5d045bd..fc704c493 100644
--- a/src/gausskernel/storage/cstore/cstore_delete.cpp
+++ b/src/gausskernel/storage/cstore/cstore_delete.cpp
@@ -30,6 +30,7 @@
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/indexing.h"
+#include "catalog/pg_partition_fn.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
#include "storage/cucache_mgr.h"
@@ -604,6 +605,7 @@ uint64 CStoreDelete::ExecDeleteForPartition()
m_estate->es_query_cxt,
m_relation,
curPartID,
+ INVALID_PARTITION_NO,
partFakeRel,
partition,
RowExclusiveLock);
@@ -627,6 +629,7 @@ uint64 CStoreDelete::ExecDeleteForPartition()
m_estate->es_query_cxt,
m_relation,
curPartID,
+ INVALID_PARTITION_NO,
partFakeRel,
partition,
RowExclusiveLock);
diff --git a/src/gausskernel/storage/cstore/cstore_rewrite.cpp b/src/gausskernel/storage/cstore/cstore_rewrite.cpp
index 2ca26339f..3f7378183 100644
--- a/src/gausskernel/storage/cstore/cstore_rewrite.cpp
+++ b/src/gausskernel/storage/cstore/cstore_rewrite.cpp
@@ -1851,7 +1851,7 @@ bool ATTraverseSrcPartitions(Relation relation, List* srcPartitions, char* destP
partName = strVal(lfirst(cell));
/* from name to partition oid */
- srcPartOid = partitionNameGetPartitionOid(relation->rd_id,
+ srcPartOid = PartitionNameGetPartitionOid(relation->rd_id,
partName,
PART_OBJ_TYPE_TABLE_PARTITION,
ExclusiveLock, // get ExclusiveLock lock on src partitions
@@ -1924,7 +1924,7 @@ void ATCreateTempTableForMerge(
partedTableRelOptions = (Datum)0;
}
- /* open the dest partition, it was already locked by partitionNameGetPartitionOid() call */
+ /* open the dest partition, it was already locked by PartitionNameGetPartitionOid() call */
destPart = partitionOpen(partTableRel, destPartOid, NoLock);
destPartRel = partitionGetRelation(partTableRel, destPart);
diff --git a/src/gausskernel/storage/file/fd.cpp b/src/gausskernel/storage/file/fd.cpp
old mode 100644
new mode 100755
index 632ec4407..7864942f4
--- a/src/gausskernel/storage/file/fd.cpp
+++ b/src/gausskernel/storage/file/fd.cpp
@@ -2026,6 +2026,7 @@ void FileWriteback(File file, off_t offset, off_t nbytes)
int FilePRead(File file, char* buffer, int amount, off_t offset, uint32 wait_event_info)
{
int returnCode;
+ int count = 0;
Assert(FileIsValid(file));
vfd *vfdcache = GetVfdCache();
@@ -2081,6 +2082,18 @@ retry:
/* OK to retry if interrupted */
if (errno == EINTR)
goto retry;
+ if (errno == EIO) {
+ if (count < EIO_RETRY_TIMES) {
+ count++;
+ ereport(WARNING, (errmsg("FilePRead: %d (%s) " INT64_FORMAT " %d \
+ failed, then retry: Input/Output ERROR",
+ file,
+ vfdcache[file].fileName,
+ (int64)vfdcache[file].seekPos,
+ amount)));
+ goto retry;
+ }
+ }
/* Trouble, so assume we don't know the file position anymore */
vfdcache[file].seekPos = FileUnknownPos;
@@ -2156,6 +2169,7 @@ int FileWrite(File file, const char* buffer, int amount, off_t offset, int fastE
int FilePWrite(File file, const char* buffer, int amount, off_t offset, uint32 wait_event_info, int fastExtendSize)
{
int returnCode;
+ int count = 0;
Assert(FileIsValid(file));
vfd *vfdcache = GetVfdCache();
@@ -2248,6 +2262,18 @@ retry:
/* OK to retry if interrupted */
if (errno == EINTR)
goto retry;
+ if (errno == EIO) {
+ if (count < EIO_RETRY_TIMES) {
+ count++;
+ ereport(WARNING, (errmsg("FilePWrite: %d (%s) " INT64_FORMAT " %d \
+ failed, then retry: Input/Output ERROR",
+ file,
+ vfdcache[file].fileName,
+ (int64)vfdcache[file].seekPos,
+ amount)));
+ goto retry;
+ }
+ }
/* Trouble, so assume we don't know the file position anymore */
vfdcache[file].seekPos = FileUnknownPos;
diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp
index eb978c547..b742fb305 100755
--- a/src/gausskernel/storage/ipc/procarray.cpp
+++ b/src/gausskernel/storage/ipc/procarray.cpp
@@ -327,6 +327,7 @@ void CreateSharedProcArray(void)
g_instance.proc_array_idx->numProcs = 0;
g_instance.proc_array_idx->maxProcs = PROCARRAY_MAXPROCS;
g_instance.proc_array_idx->replication_slot_xmin = InvalidTransactionId;
+ g_instance.proc_array_idx->replication_slot_catalog_xmin = InvalidTransactionId;
}
g_instance.proc_base_all_procs = g_instance.proc_base->allProcs;
@@ -534,6 +535,8 @@ void ProcArrayEndTransaction(PGPROC* proc, TransactionId latestXid, bool isCommi
proc->lxid = InvalidLocalTransactionId;
pgxact->next_xid = InvalidTransactionId;
pgxact->xmin = InvalidTransactionId;
+ proc->snapXmax = InvalidTransactionId;
+ proc->snapCSN = InvalidCommitSeqNo;
pgxact->csn_min = InvalidCommitSeqNo;
pgxact->csn_dr = InvalidCommitSeqNo;
/* must be cleared with xid/xmin: */
@@ -576,6 +579,8 @@ static inline void ProcArrayEndTransactionInternal(PGPROC* proc, PGXACT* pgxact,
pgxact->next_xid = InvalidTransactionId;
proc->lxid = InvalidLocalTransactionId;
pgxact->xmin = InvalidTransactionId;
+ proc->snapXmax = InvalidTransactionId;
+ proc->snapCSN = InvalidCommitSeqNo;
pgxact->csn_min = InvalidCommitSeqNo;
pgxact->csn_dr = InvalidCommitSeqNo;
/* must be cleared with xid/xmin: */
@@ -747,6 +752,8 @@ void ProcArrayClearTransaction(PGPROC* proc)
pgxact->next_xid = InvalidTransactionId;
proc->lxid = InvalidLocalTransactionId;
pgxact->xmin = InvalidTransactionId;
+ proc->snapXmax = InvalidTransactionId;
+ proc->snapCSN = InvalidCommitSeqNo;
pgxact->csn_min = InvalidCommitSeqNo;
pgxact->csn_dr = InvalidCommitSeqNo;
proc->recoveryConflictPending = false;
@@ -1315,109 +1322,111 @@ bool TransactionIdIsInProgress(TransactionId xid, uint32* needSync, bool shortcu
return true;
}
- LWLockAcquire(ProcArrayLock, LW_SHARED);
-
- /*
- * Now that we have the lock, we can check latestCompletedXid; if the
- * target Xid is after that, it's surely still running.
- */
- if (checkLatestCompletedXid && TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, xid)) {
- LWLockRelease(ProcArrayLock);
- xc_by_latest_xid_inc();
+ if (!RecoveryInProgress()) {
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
/*
- * If xid < RecentXmin, xid should smaller than latestCompletedXid,
- * So shortCutCheckRes should be false. But for data replication,
- * page maybe faster than xlog, and tuple xid will be more than
- * latestCompletedXid after standby promote to primary. So the assert cannot
- * be always true, we will remove the assert. And it will not affect MVCC,
- * the xid should be aborted. Assert(shortCutCheckRes == true);
+ * Now that we have the lock, we can check latestCompletedXid; if the
+ * target Xid is after that, it's surely still running.
*/
- return true;
- }
-
- if (isTopXact && !bCareNextxid) {
- int procId = ProcXactHashTableLookup(xid);
-
- volatile PGXACT *pgxact = &g_instance.proc_base_all_xacts[procId];
-
- if (procId != InvalidProcessId) {
- if (needSync != NULL) {
- *needSync = pgxact->needToSyncXid;
- }
+ if (checkLatestCompletedXid &&
+ TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, xid)) {
LWLockRelease(ProcArrayLock);
+ xc_by_latest_xid_inc();
+
+ /*
+ * If xid < RecentXmin, xid should smaller than latestCompletedXid,
+ * So shortCutCheckRes should be false. But for data replication,
+ * page maybe faster than xlog, and tuple xid will be more than
+ * latestCompletedXid after standby promote to primary. So the assert cannot
+ * be always true, we will remove the assert. And it will not affect MVCC,
+ * the xid should be aborted. Assert(shortCutCheckRes == true);
+ */
return true;
}
- LWLockRelease(ProcArrayLock);
- } else {
- /* No shortcuts, gotta grovel through the array */
- for (i = 0; i < arrayP->numProcs; i++) {
- int pgprocno = arrayP->pgprocnos[i];
- volatile PGPROC* proc = g_instance.proc_base_all_procs[pgprocno];
- volatile PGXACT* pgxact = &g_instance.proc_base_all_xacts[pgprocno];
- TransactionId pxid;
+ if (isTopXact && !bCareNextxid) {
+ int procId = ProcXactHashTableLookup(xid);
- /* Ignore my own proc --- dealt with it above */
- if (proc == t_thrd.proc)
- continue;
+ volatile PGXACT *pgxact = &g_instance.proc_base_all_xacts[procId];
- /* Fetch xid just once - see GetNewTransactionId */
- pxid = pgxact->xid;
-
- if (!TransactionIdIsValid(pxid)) {
- if (bCareNextxid && TransactionIdIsValid(pgxact->next_xid))
- pxid = pgxact->next_xid;
- else
- continue;
- }
-
- /*
- * Step 1: check the main Xid
- */
- if (TransactionIdEquals(pxid, xid)) {
- if (needSync != NULL)
+ if (procId != InvalidProcessId) {
+ if (needSync != NULL) {
*needSync = pgxact->needToSyncXid;
+ }
LWLockRelease(ProcArrayLock);
- xc_by_main_xid_inc();
- Assert(shortCutCheckRes == true);
return true;
}
- /*
- * We can ignore main Xids that are younger than the target Xid, since
- * the target could not possibly be their child.
- */
- if (TransactionIdPrecedes(xid, pxid))
- continue;
+ LWLockRelease(ProcArrayLock);
+ } else {
+ /* No shortcuts, gotta grovel through the array */
+ for (i = 0; i < arrayP->numProcs; i++) {
+ int pgprocno = arrayP->pgprocnos[i];
+ volatile PGPROC* proc = g_instance.proc_base_all_procs[pgprocno];
+ volatile PGXACT* pgxact = &g_instance.proc_base_all_xacts[pgprocno];
+ TransactionId pxid;
- /*
- * Step 2: check the cached child-Xids arrays
- */
- if (pgxact->nxids > 0) {
- /* Use subxidsLock to protect subxids */
- LWLockAcquire(proc->subxidsLock, LW_SHARED);
- for (j = pgxact->nxids - 1; j >= 0; j--) {
- /* Fetch xid just once - see GetNewTransactionId */
- TransactionId cxid = proc->subxids.xids[j];
+ /* Ignore my own proc --- dealt with it above */
+ if (proc == t_thrd.proc)
+ continue;
- if (TransactionIdEquals(cxid, xid)) {
- if (needSync != NULL)
- *needSync = pgxact->needToSyncXid;
- LWLockRelease(proc->subxidsLock);
- LWLockRelease(ProcArrayLock);
- xc_by_child_xid_inc();
- Assert(shortCutCheckRes == true);
- return true;
- }
+ /* Fetch xid just once - see GetNewTransactionId */
+ pxid = pgxact->xid;
+
+ if (!TransactionIdIsValid(pxid)) {
+ if (bCareNextxid && TransactionIdIsValid(pgxact->next_xid))
+ pxid = pgxact->next_xid;
+ else
+ continue;
+ }
+
+ /*
+ * Step 1: check the main Xid
+ */
+ if (TransactionIdEquals(pxid, xid)) {
+ if (needSync != NULL)
+ *needSync = pgxact->needToSyncXid;
+ LWLockRelease(ProcArrayLock);
+ xc_by_main_xid_inc();
+ Assert(shortCutCheckRes == true);
+ return true;
+ }
+
+ /*
+ * We can ignore main Xids that are younger than the target Xid, since
+ * the target could not possibly be their child.
+ */
+ if (TransactionIdPrecedes(xid, pxid))
+ continue;
+
+ /*
+ * Step 2: check the cached child-Xids arrays
+ */
+ if (pgxact->nxids > 0) {
+ /* Use subxidsLock to protect subxids */
+ LWLockAcquire(proc->subxidsLock, LW_SHARED);
+ for (j = pgxact->nxids - 1; j >= 0; j--) {
+ /* Fetch xid just once - see GetNewTransactionId */
+ TransactionId cxid = proc->subxids.xids[j];
+
+ if (TransactionIdEquals(cxid, xid)) {
+ if (needSync != NULL)
+ *needSync = pgxact->needToSyncXid;
+ LWLockRelease(proc->subxidsLock);
+ LWLockRelease(ProcArrayLock);
+ xc_by_child_xid_inc();
+ Assert(shortCutCheckRes == true);
+ return true;
+ }
+ }
+ LWLockRelease(proc->subxidsLock);
}
- LWLockRelease(proc->subxidsLock);
}
+
+ LWLockRelease(ProcArrayLock);
}
-
- LWLockRelease(ProcArrayLock);
}
-
/*
* Step 3: in hot standby mode, check the CSN log.
*/
@@ -1688,12 +1697,17 @@ static void GroupGetSnapshot(PGPROC* proc)
volatile TransactionId replication_slot_catalog_xmin = InvalidTransactionId;
bool clearGroup = false;
+ HOLD_INTERRUPTS();
+
/* Add ourselves to the list of processes needing to get snapshot. */
proc->snapshotGroupMember = true;
while (true) {
nextidx = pg_atomic_read_u32(&g_instance.proc_base->snapshotGroupFirst);
pg_atomic_write_u32(&proc->snapshotGroupNext, nextidx);
+ /* Ensure all previous writes are visible before follower continues. */
+ pg_memory_barrier();
+
if (pg_atomic_compare_exchange_u32(
&g_instance.proc_base->snapshotGroupFirst, &nextidx, (uint32)proc->pgprocno))
break;
@@ -1722,8 +1736,15 @@ static void GroupGetSnapshot(PGPROC* proc)
/* Fix semaphore count for any absorbed wakeups */
while (extraWaits-- > 0)
PGSemaphoreUnlock(&proc->sem);
+
+ /* in case of memory reordering in relaxed memory model like ARM */
+ pg_memory_barrier();
+
+ RESUME_INTERRUPTS();
+
return;
}
+ RESUME_INTERRUPTS();
/* We are the leader. Acquire the lock on behalf of everyone. */
bool retryGet = false;
@@ -1737,7 +1758,6 @@ RETRY_GET:
if (!clearGroup) {
XLogRecPtr redoEndLsn = GetXLogReplayRecPtr(NULL, NULL);
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
-
bool condition = (t_thrd.xact_cxt.ShmemVariableCache->standbyXmin <=
t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin) &&
(t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn);
@@ -1780,6 +1800,7 @@ RETRY_GET:
PGPROC* procMember = g_instance.proc_base_all_procs[nextidx];
PGXACT* pgxact = &g_instance.proc_base_all_xacts[nextidx];
+ pg_memory_barrier();
GroupGetSnapshotInternal(pgxact, procMember->snapshotGroup, &xmin);
procMember->xminGroup = xmin;
@@ -1818,7 +1839,7 @@ RETRY_GET:
pg_atomic_write_u32(&procMember->snapshotGroupNext, INVALID_PGPROCNO);
/* ensure all previous writes are visible before follower continues. */
- pg_write_barrier();
+ pg_memory_barrier();
procMember->snapshotGroupMember = false;
@@ -1829,6 +1850,56 @@ RETRY_GET:
CHECK_FOR_INTERRUPTS();
}
}
+
+void AgentCopySnapshot(TransactionId *xmin, TransactionId *xmax, CommitSeqNo *snapcsn)
+{
+ ProcArrayStruct* arrayP = g_instance.proc_array_idx;
+ int* pgprocnos = arrayP->pgprocnos;
+ int numProcs = arrayP->numProcs;
+ volatile PGXACT* pgxact = NULL;
+ PGPROC* proc = NULL;
+ int pgprocno;
+ int maxPgprocno;
+ TransactionId pgprocXmin;
+ TransactionId maxpgprocXmin;
+
+ maxpgprocXmin = InvalidTransactionId;
+ for (int index = 0; index < numProcs; index++) {
+ pgprocno = pgprocnos[index];
+ pgxact = &g_instance.proc_base_all_xacts[pgprocno];
+ /*
+ * Backend is doing logical decoding which manages snapshot
+ * separately, check below.
+ */
+ if (pgxact->vacuumFlags & PROC_IN_LOGICAL_DECODING) {
+ continue;
+ }
+
+ if (pgxact == t_thrd.pgxact) {
+ continue;
+ }
+
+ pgprocXmin = pgxact->xmin;
+ /* get pgprocno with maximal xmin to reduce recovery conflict. */
+ if (TransactionIdIsNormal(pgprocXmin) && TransactionIdPrecedes(maxpgprocXmin, pgprocXmin)) {
+ maxpgprocXmin = pgprocXmin;
+ maxPgprocno = pgprocno;
+ }
+ }
+
+ if (TransactionIdIsValid(maxpgprocXmin)) {
+ pgxact = &g_instance.proc_base_all_xacts[maxPgprocno];
+ proc = g_instance.proc_base_all_procs[maxPgprocno];
+
+ *xmin = pgxact->xmin;
+ *xmax = proc->snapXmax;
+ *snapcsn = proc->snapCSN;
+ } else {
+ *xmin = InvalidTransactionId;
+ *xmax = InvalidTransactionId;
+ *snapcsn = InvalidCommitSeqNo;
+ }
+}
#endif
/*
@@ -1881,6 +1952,7 @@ Snapshot GetSnapshotData(Snapshot snapshot, bool force_local_snapshot)
volatile TransactionId replication_slot_catalog_xmin = InvalidTransactionId;
bool is_exec_cn = IS_PGXC_COORDINATOR && !IsConnFromCoord();
bool is_exec_dn = IS_PGXC_DATANODE && !IsConnFromCoord() && !IsConnFromDatanode();
+ WaitState oldStatus = STATE_WAIT_UNDEFINED;
Assert(snapshot != NULL);
@@ -1966,16 +2038,67 @@ RETRY:
* going to set MyPgXact->xmin.
*/
snapshot->takenDuringRecovery = RecoveryInProgress();
-#ifndef ENABLE_MULTIPLE_NODES
- bool retry_get = false;
-RETRY_GET:
if (snapshot->takenDuringRecovery) {
+ oldStatus = pgstat_report_waitstatus(STATE_STANDBY_GET_SNAPSHOT);
+ }
+ bool retry_get = false;
+ uint64 retry_count = 0;
+ const static uint64 WAIT_COUNT = 0x7FFFF;
+ /* reset xmin before acquiring lwlock, in case blocking redo */
+ t_thrd.pgxact->xmin = InvalidTransactionId;
+RETRY_GET:
+ if (snapshot->takenDuringRecovery && !StreamThreadAmI() &&
+ !u_sess->proc_cxt.clientIsCMAgent) {
+ if (InterruptPending) {
+ (void)pgstat_report_waitstatus(oldStatus);
+ }
if (retry_get) {
CHECK_FOR_INTERRUPTS();
pg_usleep(100L);
}
XLogRecPtr redoEndLsn = GetXLogReplayRecPtr(NULL, NULL);
- if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE)) {
+ retry_count++;
+ if ((retry_count & WAIT_COUNT) == WAIT_COUNT) {
+ ereport(LOG, (errmsg("standbyRedoCleanupXmin = %ld, "
+ "standbyRedoCleanupXminLsn = %ld, "
+ "standbyXmin = %ld, redoEndLsn = %ld",
+ t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin,
+ t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn,
+ t_thrd.xact_cxt.ShmemVariableCache->standbyXmin,
+ redoEndLsn)));
+ }
+ if ((u_sess->proc_cxt.gsqlRemainCopyNum > 0 && retry_get)) {
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
+ if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin
+ <= t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin)
+ && (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn)) {
+ /*
+ * If CM agent cannot get consistency snapshot immediately, try
+ * getting snapshot from other backends.
+ */
+ AgentCopySnapshot(&xmin, &xmax, &snapshot->snapshotcsn);
+ bool obtained = TransactionIdIsValid(xmin) && TransactionIdIsValid(xmax) &&
+ snapshot->snapshotcsn != InvalidCommitSeqNo;
+ if (obtained) {
+ globalxmin = xmin;
+ /* fetch into volatile var while ProcArrayLock is held */
+ replication_slot_xmin = g_instance.proc_array_idx->replication_slot_xmin;
+ replication_slot_catalog_xmin = g_instance.proc_array_idx->replication_slot_catalog_xmin;
+
+ if (!TransactionIdIsValid(t_thrd.pgxact->xmin)) {
+ t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny();
+ }
+ t_thrd.pgxact->xmin = u_sess->utils_cxt.TransactionXmin = xmin;
+ LWLockRelease(ProcArrayLock);
+ u_sess->proc_cxt.gsqlRemainCopyNum--;
+ /* reuse the groupgetsnapshot logic to set snapshot and thread information. */
+ goto GROUP_GET_SNAPSHOT;
+ }
+ LWLockRelease(ProcArrayLock);
+ retry_get = true;
+ goto RETRY_GET;
+ }
+ } else if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE)) {
if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin <=
t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin) &&
(t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn)) {
@@ -1983,6 +2106,7 @@ RETRY_GET:
retry_get = true;
goto RETRY_GET;
}
+#ifndef ENABLE_MULTIPLE_NODES
} else if (forHSFeedBack) {
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin
@@ -1992,11 +2116,22 @@ RETRY_GET:
retry_get = true;
goto RETRY_GET;
}
- } else {
+ }
+#endif
+ else {
+ if (!retry_get) {
+ retry_get = true;
+ goto RETRY_GET;
+ }
+
if (!TransactionIdIsValid(t_thrd.pgxact->xmin)) {
t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny();
}
t_thrd.proc->snapshotGroup = snapshot;
+
+ /* ensure all previous writes are visible before setting snapshotGroup. */
+ pg_memory_barrier();
+
GroupGetSnapshot(t_thrd.proc);
xmin = t_thrd.proc->xminGroup;
@@ -2012,7 +2147,9 @@ RETRY_GET:
t_thrd.proc->globalxminGroup = InvalidTransactionId;
t_thrd.proc->replicationSlotXminGroup = InvalidTransactionId;
t_thrd.proc->replicationSlotCatalogXminGroup = InvalidTransactionId;
+
if (snapshot->snapshotcsn == 0) {
+ retry_get = true;
goto RETRY_GET;
}
goto GROUP_GET_SNAPSHOT;
@@ -2020,9 +2157,6 @@ RETRY_GET:
} else {
LWLockAcquire(ProcArrayLock, LW_SHARED);
}
-#else
- LWLockAcquire(ProcArrayLock, LW_SHARED);
-#endif
/* xmax is always latestCompletedXid + 1 */
xmax = t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid;
Assert(TransactionIdIsNormal(xmax));
@@ -2132,6 +2266,10 @@ RETRY_GET:
#ifndef ENABLE_MULTIPLE_NODES
GROUP_GET_SNAPSHOT:
#endif
+ /* Save the xmax and csn, so that the CM agent can obtain them. */
+ t_thrd.proc->snapXmax = xmax;
+ t_thrd.proc->snapCSN = snapshot->snapshotcsn;
+
/*
* Update globalxmin to include actual process xids. This is a slightly
* different way of computing it than GetOldestXmin uses, but should give
@@ -2218,6 +2356,10 @@ GROUP_GET_SNAPSHOT:
snapshot->regd_count = 0;
snapshot->copied = false;
+ if (snapshot->takenDuringRecovery) {
+ (void)pgstat_report_waitstatus(oldStatus);
+ }
+
return snapshot;
}
@@ -2817,6 +2959,26 @@ VirtualTransactionId* GetCurrentVirtualXIDs(
return vxids;
}
+void UpdateCleanUpInfo(TransactionId limitXmin, XLogRecPtr lsn)
+{
+ if (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin < limitXmin) {
+ t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin = limitXmin;
+ const int xid_gap = 10000000;
+ if (limitXmin > t_thrd.xact_cxt.ShmemVariableCache->standbyXmin + xid_gap) {
+ ereport(LOG, (errmsg("limitXmin = %ld, standbyRedoCleanupXmin = %ld, "
+ "lsn = %ld, standbyRedoCleanupXminLsn = %ld, "
+ "standbyXmin = %ld",
+ limitXmin, t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin,
+ lsn, t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn,
+ t_thrd.xact_cxt.ShmemVariableCache->standbyXmin)));
+ }
+ }
+
+ if (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn < lsn) {
+ t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn = lsn;
+ }
+}
+
/*
* GetConflictingVirtualXIDs -- returns an array of currently active VXIDs.
*
@@ -2916,11 +3078,7 @@ VirtualTransactionId *GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbO
#endif
}
#ifndef ENABLE_MULTIPLE_NODES
- if (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin < limitXmin)
- t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin = limitXmin;
-
- if (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn < lsn)
- t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn = lsn;
+ UpdateCleanUpInfo(limitXmin, lsn);
#endif
LWLockRelease(ProcArrayLock);
@@ -3640,8 +3798,13 @@ void ProcArraySetReplicationSlotXmin(TransactionId xmin, TransactionId catalog_x
if (!already_locked)
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
- g_instance.proc_array_idx->replication_slot_xmin = xmin;
- g_instance.proc_array_idx->replication_slot_catalog_xmin = catalog_xmin;
+ if (xmin == InvalidTransactionId || TransactionIdPrecedes(g_instance.proc_array_idx->replication_slot_xmin, xmin)) {
+ g_instance.proc_array_idx->replication_slot_xmin = xmin;
+ }
+ if (catalog_xmin == InvalidTransactionId ||
+ TransactionIdPrecedes(g_instance.proc_array_idx->replication_slot_catalog_xmin, catalog_xmin)) {
+ g_instance.proc_array_idx->replication_slot_catalog_xmin = catalog_xmin;
+ }
if (!already_locked)
LWLockRelease(ProcArrayLock);
diff --git a/src/gausskernel/storage/ipc/sinval.cpp b/src/gausskernel/storage/ipc/sinval.cpp
index e30e5e551..090b2d4db 100644
--- a/src/gausskernel/storage/ipc/sinval.cpp
+++ b/src/gausskernel/storage/ipc/sinval.cpp
@@ -89,6 +89,36 @@ void SendSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n)
}
}
+static bool SkipRedundantInvalMsg(SharedInvalidationMessage *msg)
+{
+ if (msg->id != SHAREDINVALRELCACHE_ID || unlikely(u_sess->proc_cxt.MyDatabaseId == InvalidOid) ||
+ unlikely(msg->rc.dbId == InvalidOid)) {
+ return false;
+ }
+
+ /* skip if no need to handle for current db */
+ if (u_sess->proc_cxt.MyDatabaseId != msg->rc.dbId) {
+ return true;
+ }
+
+ /* skip if inval msg is duplicate */
+ for (int i = t_thrd.rc_cxt.rcNum - 1; i >= 0; i--) {
+ if (msg->rc.relId == t_thrd.rc_cxt.rcData[i]) {
+ return true;
+ }
+ }
+
+ /* reset msg reservoir if full */
+ if (t_thrd.rc_cxt.rcNum >= RC_MAX_NUM) {
+ t_thrd.rc_cxt.rcNum = 0;
+ }
+
+ /* keep track of 16 deduplicated msg */
+ t_thrd.rc_cxt.rcData[t_thrd.rc_cxt.rcNum] = msg->rc.relId;
+ t_thrd.rc_cxt.rcNum++;
+ return false;
+}
+
/*
* ReceiveSharedInvalidMessages
* Process shared-cache-invalidation messages waiting for this backend
@@ -120,6 +150,10 @@ void ReceiveSharedInvalidMessages(void (*invalFunction)(SharedInvalidationMessag
while (inval_cxt->nextmsg < inval_cxt->nummsgs) {
SharedInvalidationMessage msg = inval_cxt->messages[inval_cxt->nextmsg++];
+ if (SkipRedundantInvalMsg(&msg)) {
+ continue;
+ }
+
inval_cxt->SIMCounter++;
invalFunction(&msg);
}
diff --git a/src/gausskernel/storage/ipc/standby.cpp b/src/gausskernel/storage/ipc/standby.cpp
index 3b5143e25..5f90131e7 100755
--- a/src/gausskernel/storage/ipc/standby.cpp
+++ b/src/gausskernel/storage/ipc/standby.cpp
@@ -37,7 +37,8 @@
#include "pgxc/poolutils.h"
#include "replication/walreceiver.h"
static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlist, TransactionId* xminArray,
- ProcSignalReason reason);
+ ProcSignalReason reason, TimestampTz waitStart,
+ TransactionId limitXmin = InvalidTransactionId);
static void ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid);
static void LogAccessExclusiveLocks(int nlocks, xl_standby_lock* locks);
static void LogReleaseAccessExclusiveLocks(int nlocks, xl_standby_lock* locks);
@@ -132,19 +133,11 @@ void ShutdownRecoveryTransactionEnvironment(void)
static TimestampTz GetStandbyLimitTime(TimestampTz startTime)
{
TimestampTz rtime = startTime;
- bool fromStream = (t_thrd.xlog_cxt.XLogReceiptSource == XLOG_FROM_STREAM);
- if (fromStream) {
- if (u_sess->attr.attr_storage.max_standby_streaming_delay < 0)
- return 0; /* wait forever */
+ if (u_sess->attr.attr_storage.max_standby_streaming_delay < 0)
+ return 0; /* wait forever */
- return TimestampTzPlusMilliseconds(rtime, u_sess->attr.attr_storage.max_standby_streaming_delay);
- } else {
- if (u_sess->attr.attr_storage.max_standby_archive_delay < 0)
- return 0; /* wait forever */
-
- return TimestampTzPlusMilliseconds(rtime, u_sess->attr.attr_storage.max_standby_archive_delay);
- }
+ return TimestampTzPlusMilliseconds(rtime, u_sess->attr.attr_storage.max_standby_streaming_delay);
}
#define STANDBY_INITIAL_WAIT_US 1000
@@ -187,9 +180,9 @@ static bool WaitExceedsMaxStandbyDelay(TimestampTz startTime)
* then throw the required error as instructed.
*/
static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlist, TransactionId* xminArray,
- ProcSignalReason reason)
+ ProcSignalReason reason, TimestampTz waitStart,
+ TransactionId limitXmin)
{
- TimestampTz waitStart;
char* new_status = NULL;
bool waited = false;
@@ -197,8 +190,7 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis
if (!VirtualTransactionIdIsValid(*waitlist))
return;
- waitStart = GetCurrentTimestamp();
-
+ WaitState oldStatus = pgstat_report_waitstatus(STATE_STANDBY_READ_RECOVERY_CONFLICT);
while (VirtualTransactionIdIsValid(*waitlist)) {
/* reset standbyWait_us for each xact we wait for */
t_thrd.storage_cxt.standbyWait_us = STANDBY_INITIAL_WAIT_US;
@@ -207,7 +199,8 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis
while (!VirtualXactLock(*waitlist, false)) {
PGPROC* proc = BackendIdGetProc((*waitlist).backendId);
PGXACT* pgxact = &g_instance.proc_base_all_xacts[proc->pgprocno];
- if (xminArray != NULL && pgxact->xmin != *xminArray) {
+ if (xminArray != NULL && pgxact->xmin != *xminArray &&
+ (!TransactionIdIsValid(pgxact->xmin) || TransactionIdFollows(pgxact->xmin, limitXmin))) {
ereport(WARNING, (errmsg("hotstandby:snapshot changed old xmin = %lu, new xmin = %lu",
*xminArray, pgxact->xmin)));
break;
@@ -255,6 +248,7 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis
waited = true;
ereport(LOG, (errmsg("hotstandby:snapshot changed, wait pid %lu", pid)));
}
+ break;
}
}
@@ -263,6 +257,7 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis
if (xminArray != NULL)
xminArray++;
}
+ (void)pgstat_report_waitstatus(oldStatus);
/* Reset ps display if we changed it */
if (new_status != NULL) {
@@ -280,6 +275,7 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis
void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, const RelFileNode& node, XLogRecPtr lsn)
{
VirtualTransactionId* backends = NULL;
+ TimestampTz waitStart;
/*
* If we get passed InvalidTransactionId then we are a little surprised,
@@ -314,15 +310,24 @@ void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, const R
if (t_thrd.storage_cxt.xminArray == NULL)
ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory")));
}
- backends = GetConflictingVirtualXIDs(latestRemovedXid, node.dbNode, lsn, limitXminCSN,
- t_thrd.storage_cxt.xminArray);
- ResolveRecoveryConflictWithVirtualXIDs(backends, t_thrd.storage_cxt.xminArray, PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
+ waitStart = GetCurrentTimestamp();
+ while (true) {
+ backends = GetConflictingVirtualXIDs(latestRemovedXid, node.dbNode, lsn, limitXminCSN,
+ t_thrd.storage_cxt.xminArray);
+ if (!VirtualTransactionIdIsValid(*backends)) {
+ break;
+ }
+
+ ResolveRecoveryConflictWithVirtualXIDs(backends, t_thrd.storage_cxt.xminArray,
+ PROCSIG_RECOVERY_CONFLICT_SNAPSHOT, waitStart, latestRemovedXid);
+ }
}
void ResolveRecoveryConflictWithSnapshotOid(TransactionId latestRemovedXid, Oid dbid, XLogRecPtr lsn)
{
VirtualTransactionId* backends = NULL;
+ TimestampTz waitStart;
/*
* If we get passed InvalidTransactionId then we are a little surprised,
* but it is theoretically possible in normal running. It also happens
@@ -343,14 +348,24 @@ void ResolveRecoveryConflictWithSnapshotOid(TransactionId latestRemovedXid, Oid
ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory")));
}
- backends = GetConflictingVirtualXIDs(latestRemovedXid, dbid, lsn, InvalidCommitSeqNo, t_thrd.storage_cxt.xminArray);
- ResolveRecoveryConflictWithVirtualXIDs(backends, t_thrd.storage_cxt.xminArray, PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
+ waitStart = GetCurrentTimestamp();
+ while (true) {
+ backends = GetConflictingVirtualXIDs(latestRemovedXid, dbid, lsn, InvalidCommitSeqNo,
+ t_thrd.storage_cxt.xminArray);
+ if (!VirtualTransactionIdIsValid(*backends)) {
+ break;
+ }
+
+ ResolveRecoveryConflictWithVirtualXIDs(backends, t_thrd.storage_cxt.xminArray,
+ PROCSIG_RECOVERY_CONFLICT_SNAPSHOT, waitStart, latestRemovedXid);
+ }
}
void ResolveRecoveryConflictWithTablespace(Oid tsid)
{
VirtualTransactionId* temp_file_users = NULL;
+ TimestampTz waitStart;
/*
* Standby users may be currently using this tablespace for their
@@ -369,8 +384,16 @@ void ResolveRecoveryConflictWithTablespace(Oid tsid)
*
* We don't wait for commit because drop tablespace is non-transactional.
*/
- temp_file_users = GetConflictingVirtualXIDs(InvalidTransactionId, InvalidOid);
- ResolveRecoveryConflictWithVirtualXIDs(temp_file_users, NULL, PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
+ waitStart = GetCurrentTimestamp();
+ while (true) {
+ temp_file_users = GetConflictingVirtualXIDs(InvalidTransactionId, InvalidOid);
+ if (!VirtualTransactionIdIsValid(*temp_file_users)) {
+ break;
+ }
+
+ ResolveRecoveryConflictWithVirtualXIDs(temp_file_users, NULL,
+ PROCSIG_RECOVERY_CONFLICT_TABLESPACE, waitStart);
+ }
}
void ResolveRecoveryConflictWithDatabase(Oid dbid)
@@ -410,6 +433,7 @@ static void ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid)
bool lock_acquired = false;
int num_attempts = 0;
LOCKTAG locktag;
+ TimestampTz waitStart;
SET_LOCKTAG_RELATION(locktag, dbOid, relOid);
@@ -421,13 +445,14 @@ static void ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid)
* people crowding for the same table. Recovery must win; the end
* justifies the means.
*/
+ waitStart = GetCurrentTimestamp();
while (!lock_acquired) {
if (++num_attempts < 3)
backends = GetLockConflicts(&locktag, AccessExclusiveLock);
else
backends = GetConflictingVirtualXIDs(InvalidTransactionId, InvalidOid);
- ResolveRecoveryConflictWithVirtualXIDs(backends, NULL, PROCSIG_RECOVERY_CONFLICT_LOCK);
+ ResolveRecoveryConflictWithVirtualXIDs(backends, NULL, PROCSIG_RECOVERY_CONFLICT_LOCK, waitStart);
if (LockAcquireExtended(&locktag, AccessExclusiveLock, true, true, false) != LOCKACQUIRE_NOT_AVAIL)
lock_acquired = true;
@@ -1302,4 +1327,4 @@ void RemoveAllCommittedCsnInfo()
}
list_free_deep(t_thrd.xlog_cxt.committing_csn_list);
t_thrd.xlog_cxt.committing_csn_list = NIL;
-}
\ No newline at end of file
+}
diff --git a/src/gausskernel/storage/lmgr/lock.cpp b/src/gausskernel/storage/lmgr/lock.cpp
index 6644f2357..fa3a36130 100644
--- a/src/gausskernel/storage/lmgr/lock.cpp
+++ b/src/gausskernel/storage/lmgr/lock.cpp
@@ -773,8 +773,9 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE
*
* First we prepare to log, then after lock acquired we issue log record.
*/
- if (lockmode >= AccessExclusiveLock && locktag->locktag_type == LOCKTAG_RELATION && !RecoveryInProgress() &&
- XLogStandbyInfoActive()) {
+ if (lockmode >= AccessExclusiveLock && (locktag->locktag_type == LOCKTAG_RELATION ||
+ locktag->locktag_type == LOCKTAG_PARTITION || locktag->locktag_type == LOCKTAG_PARTITION_SEQUENCE) &&
+ !RecoveryInProgress() && XLogStandbyInfoActive()) {
LogAccessExclusiveLockPrepare();
log_lock = true;
}
diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp
index 4886fe0a8..ec614e2ce 100755
--- a/src/gausskernel/storage/lmgr/proc.cpp
+++ b/src/gausskernel/storage/lmgr/proc.cpp
@@ -867,6 +867,8 @@ void InitProcess(void)
t_thrd.pgxact->xid = InvalidTransactionId;
t_thrd.pgxact->next_xid = InvalidTransactionId;
t_thrd.pgxact->xmin = InvalidTransactionId;
+ t_thrd.proc->snapXmax = InvalidTransactionId;
+ t_thrd.proc->snapCSN = InvalidCommitSeqNo;
t_thrd.pgxact->csn_min = InvalidCommitSeqNo;
t_thrd.pgxact->csn_dr = InvalidCommitSeqNo;
t_thrd.pgxact->prepare_xid = InvalidTransactionId;
@@ -1117,6 +1119,8 @@ void InitAuxiliaryProcess(void)
t_thrd.pgxact->xid = InvalidTransactionId;
t_thrd.pgxact->next_xid = InvalidTransactionId;
t_thrd.pgxact->xmin = InvalidTransactionId;
+ t_thrd.proc->snapXmax = InvalidTransactionId;
+ t_thrd.proc->snapCSN = InvalidCommitSeqNo;
t_thrd.pgxact->csn_min = InvalidCommitSeqNo;
t_thrd.pgxact->csn_dr = InvalidCommitSeqNo;
t_thrd.proc->backendId = InvalidBackendId;
@@ -2838,10 +2842,11 @@ bool pause_sig_alarm(bool is_statement_timeout)
bool resume_sig_alarm(bool is_statement_timeout)
{
/*
- * The time counter was not paused before if t_thrd.utils_cxt.timeIsPausing is false.
- * You should not invoke resume_sig_alarm here.
+ * In case of network error or unchanged statistics, pause_sig_alarm may never be called.
*/
- Assert(t_thrd.storage_cxt.timeIsPausing == true);
+ if (!t_thrd.storage_cxt.timeIsPausing) {
+ return true;
+ }
if (enable_sig_alarm(t_thrd.storage_cxt.restimems, is_statement_timeout)) {
t_thrd.storage_cxt.timeIsPausing = false;
diff --git a/src/gausskernel/storage/replication/heartbeat.cpp b/src/gausskernel/storage/replication/heartbeat.cpp
index 038c265a9..43f45713b 100755
--- a/src/gausskernel/storage/replication/heartbeat.cpp
+++ b/src/gausskernel/storage/replication/heartbeat.cpp
@@ -118,7 +118,7 @@ static int deal_with_sigup()
* dynamically modify the ha socket.
*/
for (j = 1; j < MAX_REPLNODE_NUM; j++) {
- if (t_thrd.postmaster_cxt.ReplConnChangeType[j] == OLD_REPL_CHANGE_IP_OR_PORT) {
+ if (t_thrd.postmaster_cxt.ReplConnChangeType[j] != NO_CHANGE) {
break;
}
}
diff --git a/src/gausskernel/storage/replication/libpqsw.cpp b/src/gausskernel/storage/replication/libpqsw.cpp
index 1e1c9d909..07b8aabfc 100644
--- a/src/gausskernel/storage/replication/libpqsw.cpp
+++ b/src/gausskernel/storage/replication/libpqsw.cpp
@@ -293,6 +293,9 @@ static bool libpqsw_before_redirect(const char* commandTag, List* query_list, co
if (!redirect_manager->get_remote_excute()) {
return false;
}
+ if (commandTag == NULL) {
+ commandTag = "";
+ }
bool need_redirect = false;
if (!libpqsw_enable_autocommit()) {
if (strcmp(commandTag, "SET") == 0) {
@@ -314,9 +317,11 @@ static bool libpqsw_before_redirect(const char* commandTag, List* query_list, co
if (strcmp(commandTag, "SHOW") == 0) {
return false;
}
+ if (query_list == NIL) {
+ return false;
+ }
ListCell* remote_lc = NULL;
- Assert(query_list != NULL);
foreach (remote_lc, query_list) {
Query* tmp_query = (Query*)lfirst(remote_lc);
if (!queryIsReadOnly(tmp_query)) {
@@ -606,15 +611,13 @@ bool libpqsw_process_query_message(const char* commandTag, List* query_list, con
// is start transaction command
bool libpqsw_begin_command(const char* commandTag)
{
- return (strcmp(commandTag, "BEGIN") == 0)
- || (strcmp(commandTag, "START TRANSACTION") == 0);
+ return commandTag != NULL && (strcmp(commandTag, "BEGIN") == 0 || strcmp(commandTag, "START TRANSACTION") == 0);
}
// is end transaction command
bool libpqsw_end_command(const char* commandTag)
{
- return (strcmp(commandTag, "COMMIT") == 0)
- || (strcmp(commandTag, "ROLLBACK") == 0);
+ return commandTag != NULL && (strcmp(commandTag, "COMMIT") == 0 || strcmp(commandTag, "ROLLBACK") == 0);
}
// set commandTag
diff --git a/src/gausskernel/storage/replication/libpqwalreceiver.cpp b/src/gausskernel/storage/replication/libpqwalreceiver.cpp
index 26f2f184a..c30df148b 100755
--- a/src/gausskernel/storage/replication/libpqwalreceiver.cpp
+++ b/src/gausskernel/storage/replication/libpqwalreceiver.cpp
@@ -798,7 +798,8 @@ retry:
}
#ifndef ENABLE_MULTIPLE_NODES
- if (t_thrd.xlog_cxt.is_cascade_standby && !t_thrd.postmaster_cxt.HaShmData->is_cross_region) {
+ if (g_instance.attr.attr_storage.enable_availablezone &&
+ t_thrd.xlog_cxt.is_cascade_standby && !t_thrd.postmaster_cxt.HaShmData->is_cross_region) {
IdentifyRemoteAvailableZone();
}
#endif
diff --git a/src/gausskernel/storage/replication/logical/parallel_decode.cpp b/src/gausskernel/storage/replication/logical/parallel_decode.cpp
index a3707fd3e..3634780a0 100644
--- a/src/gausskernel/storage/replication/logical/parallel_decode.cpp
+++ b/src/gausskernel/storage/replication/logical/parallel_decode.cpp
@@ -72,7 +72,7 @@ ParallelReorderBufferTXN *ParallelReorderBufferGetOldestTXN(ParallelReorderBuffe
return txn;
}
-void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+void tuple_to_stringinfo(Relation relation, StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool isOld)
{
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
@@ -90,7 +90,7 @@ void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool
attr = &tupdesc->attrs[natt];
- if (attr->attisdropped || attr->attnum < 0)
+ if (attr->attisdropped || attr->attnum < 0 || (isOld && !IsRelationReplidentKey(relation, attr->attnum)))
continue;
typid = attr->atttypid;
@@ -102,10 +102,6 @@ void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (isnull && skip_nulls) {
- continue;
- }
-
/* print attribute name */
appendStringInfoChar(s, ' ');
appendStringInfoString(s, quote_identifier(NameStr(attr->attname)));
@@ -221,6 +217,7 @@ void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChan
char *schema = get_namespace_name(class_form->relnamespace);
char *table = NameStr(class_form->relname);
if (FilterWhiteList(schema, table, slotId, old, data->context)) {
+ logChange->type = LOGICAL_LOG_EMPTY;
return;
}
@@ -240,7 +237,7 @@ void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChan
if (change->data.tp.newtuple == NULL)
appendStringInfoString(logChange->out, " (no-tuple-data)");
else
- tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ tuple_to_stringinfo(relation, logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
break;
case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE:
@@ -248,14 +245,14 @@ void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChan
appendStringInfoString(logChange->out, " UPDATE:");
if (change->data.tp.oldtuple != NULL) {
appendStringInfoString(logChange->out, " old-key:");
- tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ tuple_to_stringinfo(relation, logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
appendStringInfoString(logChange->out, " new-tuple:");
}
if (change->data.tp.newtuple == NULL)
appendStringInfoString(logChange->out, " (no-tuple-data)");
else
- tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ tuple_to_stringinfo(relation, logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
break;
case PARALLEL_REORDER_BUFFER_CHANGE_DELETE:
@@ -267,7 +264,7 @@ void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChan
appendStringInfoString(logChange->out, " (no-tuple-data)");
/* In DELETE, only the replica identity is present; display that */
else
- tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ tuple_to_stringinfo(relation, logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
break;
default:
@@ -283,8 +280,8 @@ void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChan
MemoryContextReset(data->context);
}
-static void TupleToJsoninfo(
- cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+static void TupleToJsoninfo(Relation relation, cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc,
+ HeapTuple tuple, bool isOld)
{
if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) ||
(int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) {
@@ -294,7 +291,7 @@ static void TupleToJsoninfo(
/* print all columns individually */
for (int natt = 0; natt < tupdesc->natts; natt++) {
Form_pg_attribute attr = &tupdesc->attrs[natt]; /* the attribute itself */
- if (attr->attisdropped || attr->attnum < 0) {
+ if (attr->attisdropped || attr->attnum < 0 || (isOld && !IsRelationReplidentKey(relation, attr->attnum))) {
continue;
}
@@ -308,9 +305,6 @@ static void TupleToJsoninfo(
} else {
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (isnull && skip_nulls) {
- continue;
- }
/* print attribute name */
@@ -375,6 +369,7 @@ void parallel_decode_change_to_json(Relation relation, ParallelReorderBufferChan
char *schema = get_namespace_name(class_form->relnamespace);
char *table = NameStr(class_form->relname);
if (FilterWhiteList(schema, table, slotId, old, data->context)) {
+ logChange->type = LOGICAL_LOG_EMPTY;
return;
}
@@ -409,25 +404,29 @@ void parallel_decode_change_to_json(Relation relation, ParallelReorderBufferChan
case PARALLEL_REORDER_BUFFER_CHANGE_UINSERT:
opType = cJSON_CreateString("INSERT");
if (change->data.tp.newtuple != NULL) {
- TupleToJsoninfo(columnsName, columnsType, columnsVal, tupdesc, &change->data.tp.newtuple->tuple, false);
+ TupleToJsoninfo(relation, columnsName, columnsType, columnsVal, tupdesc,
+ &change->data.tp.newtuple->tuple, false);
}
break;
case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE:
case PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE:
opType = cJSON_CreateString("UPDATE");
if (change->data.tp.oldtuple != NULL) {
- TupleToJsoninfo(oldKeysName, oldKeysType, oldKeysVal, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ TupleToJsoninfo(relation, oldKeysName, oldKeysType, oldKeysVal, tupdesc,
+ &change->data.tp.oldtuple->tuple, true);
}
if (change->data.tp.newtuple != NULL) {
- TupleToJsoninfo(columnsName, columnsType, columnsVal, tupdesc, &change->data.tp.newtuple->tuple, false);
+ TupleToJsoninfo(relation, columnsName, columnsType, columnsVal, tupdesc,
+ &change->data.tp.newtuple->tuple, false);
}
break;
case PARALLEL_REORDER_BUFFER_CHANGE_DELETE:
case PARALLEL_REORDER_BUFFER_CHANGE_UDELETE:
opType = cJSON_CreateString("DELETE");
if (change->data.tp.oldtuple != NULL) {
- TupleToJsoninfo(oldKeysName, oldKeysType, oldKeysVal, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ TupleToJsoninfo(relation, oldKeysName, oldKeysType, oldKeysVal, tupdesc,
+ &change->data.tp.oldtuple->tuple, true);
}
/* if there was no PK, we only know that a delete happened */
break;
@@ -480,7 +479,7 @@ static inline bool AppendInvalidations(StringInfo s, TupleDesc tupdesc, HeapTupl
}
/* decode a tuple into binary style */
-static void AppendTuple(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skipNulls)
+static void AppendTuple(Relation relation, StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool isOld)
{
if (AppendInvalidations(s, tupdesc, tuple)) {
return;
@@ -490,7 +489,7 @@ static void AppendTuple(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool s
pq_sendint16(s, (uint16)(tupdesc->natts));
for (int natt = 0; natt < tupdesc->natts; natt++) {
Form_pg_attribute attr = &tupdesc->attrs[natt];
- if (attr->attisdropped || attr->attnum < 0) {
+ if (attr->attisdropped || attr->attnum < 0 || (isOld && !IsRelationReplidentKey(relation, attr->attnum))) {
continue;
}
@@ -502,9 +501,6 @@ static void AppendTuple(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool s
} else {
origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull);
}
- if (isnull && skipNulls) {
- continue;
- }
attrNum++;
const char *columnName = quote_identifier(NameStr(attr->attname));
pq_sendint16(s, (uint16)strlen(columnName));
@@ -548,6 +544,7 @@ void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChang
char *schema = get_namespace_name(class_form->relnamespace);
char *table = NameStr(class_form->relname);
if (FilterWhiteList(schema, table, slotId, old, data->context)) {
+ logChange->type = LOGICAL_LOG_EMPTY;
return;
}
@@ -562,7 +559,7 @@ void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChang
AppendRelation(logChange->out, tupdesc, schema, table);
if (change->data.tp.newtuple != NULL) {
appendStringInfoChar(logChange->out, 'N');
- AppendTuple(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ AppendTuple(relation, logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
}
break;
@@ -573,11 +570,11 @@ void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChang
if (change->data.tp.newtuple != NULL) {
appendStringInfoChar(logChange->out, 'N');
- AppendTuple(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
+ AppendTuple(relation, logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false);
}
if (change->data.tp.oldtuple != NULL) {
appendStringInfoChar(logChange->out, 'O');
- AppendTuple(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ AppendTuple(relation, logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
}
break;
@@ -588,7 +585,7 @@ void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChang
/* if there was no PK, we only know that a delete happened */
if (change->data.tp.oldtuple != NULL) {
appendStringInfoChar(logChange->out, 'O');
- AppendTuple(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
+ AppendTuple(relation, logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true);
}
break;
@@ -773,16 +770,7 @@ logicalLog* getIUDLogicalLog(ParallelReorderBufferChange* change, ParallelLogica
* Catalog tuple without data, emitted while catalog was
* in the process of being rewritten.
*/
- if (change->data.tp.newtuple == NULL && change->data.tp.oldtuple == NULL) {
- /*
- * The parser thread polls and puts tuples into the decoder queue in LSN order.
- * When there is a log that does not need to be parsed, the empty logical log should
- * also be inserted into the queue to ensure that the order is preserved when the slicer
- * polls to obtain the logical log.
- */
- logChange = GetLogicalLog(worker);
- return logChange;
- } else if (reloid == InvalidOid) {
+ if (reloid == InvalidOid) {
/*
* description:
* When we try to decode a table who is already dropped.
diff --git a/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp
index 032c53de1..4555b07c3 100644
--- a/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp
+++ b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp
@@ -705,11 +705,13 @@ bool CheckWhiteList(const List *whiteList, const char *schema, const char *table
foreach(lc, whiteList) {
chosenTable *cTable = (chosenTable *)lfirst(lc);
- if ((cTable->schema == NULL || strncmp(cTable->schema, schema, strlen(schema)) == 0) &&
- (cTable->table == NULL || strncmp(cTable->table, table, strlen(table)) == 0)) {
- return true;
+ if ((cTable->schema == NULL || strcmp(cTable->schema, schema) == 0) &&
+ (cTable->table == NULL || strcmp(cTable->table, table) == 0)) {
+ return true;
}
}
+ ereport(DEBUG1, (errmodule(MOD_LOGICAL_DECODE),
+ errmsg("logical change record of table %s.%s is filtered by white-table-list", schema, table)));
return false;
}
@@ -1142,6 +1144,11 @@ void ParallelDecodeWorkerMain(void* point)
break;
}
+ if (t_thrd.parallel_decode_cxt.got_SIGHUP) {
+ t_thrd.parallel_decode_cxt.got_SIGHUP = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
+
LogicalChangeHead = (ParallelReorderBufferChange *)LogicalQueueTop(worker->changeQueue);
if (LogicalChangeHead == NULL) {
continue;
@@ -1293,6 +1300,10 @@ void LogicalReadRecordMain(ParallelDecodeReaderWorker *worker)
break;
}
+ if (t_thrd.parallel_decode_cxt.got_SIGHUP) {
+ t_thrd.parallel_decode_cxt.got_SIGHUP = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
char *errm = NULL;
XLogRecord *record = XLogReadRecord(ctx->reader, startptr, &errm);
if (errm != NULL) {
diff --git a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp
index 1ef02d1ec..ea84de910 100644
--- a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp
+++ b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp
@@ -2016,7 +2016,7 @@ static void ReorderBufferCheckSerializeTXN(LogicalDecodingContext *ctx, ReorderB
if (txn->nentries_mem >= (unsigned)g_instance.attr.attr_common.max_changes_in_memory ||
(data != NULL && data->max_txn_in_memory > 0 && txn->size >= (Size)data->max_txn_in_memory * sizeMB) ||
(data != NULL && data->max_reorderbuffer_in_memory > 0 &&
- txn->size >= (Size)data->max_reorderbuffer_in_memory * sizeGB)) {
+ ctx->reorder->size >= (Size)data->max_reorderbuffer_in_memory * sizeGB)) {
ReorderBufferSerializeTXN(ctx->reorder, txn);
Assert(txn->size == 0);
Assert(txn->nentries_mem == 0);
@@ -3020,6 +3020,7 @@ static void ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *
new_ent->combocid = ent->combocid;
}
}
+ (void)CloseTransientFile(fd);
}
/*
diff --git a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp
index 1c11618ea..2824d5320 100644
--- a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp
+++ b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp
@@ -141,7 +141,8 @@ static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *o
PGOutputData *data = (PGOutputData *)palloc0(sizeof(PGOutputData));
/* Create our memory context for private allocations. */
- data->context = AllocSetContextCreate(ctx->context, "logical replication output context", ALLOCSET_DEFAULT_SIZES);
+ data->common.context = AllocSetContextCreate(ctx->context,
+ "logical replication output context", ALLOCSET_DEFAULT_SIZES);
ctx->output_plugin_private = data;
@@ -328,7 +329,7 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
}
/* Avoid leaking memory by using and resetting our own context */
- old = MemoryContextSwitchTo(data->context);
+ old = MemoryContextSwitchTo(data->common.context);
/*
* Write the relation schema if the current schema haven't been sent yet.
@@ -385,7 +386,7 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Cleanup */
MemoryContextSwitchTo(old);
- MemoryContextReset(data->context);
+ MemoryContextReset(data->common.context);
}
/*
diff --git a/src/gausskernel/storage/replication/rto_statistic.cpp b/src/gausskernel/storage/replication/rto_statistic.cpp
index b594e1de1..66384614b 100644
--- a/src/gausskernel/storage/replication/rto_statistic.cpp
+++ b/src/gausskernel/storage/replication/rto_statistic.cpp
@@ -133,7 +133,7 @@ RTOStandbyData *GetRTOStat(uint32 *num)
/* use volatile pointer to prevent code rearrangement */
volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i];
SpinLockAcquire(&walsnd->mutex);
- if (walsnd->pid != 0) {
+ if (walsnd->pid != 0 && walsnd->sendRole == SNDROLE_PRIMARY_STANDBY) {
char *standby_names = (char *)(result[readWalSnd].id);
rc = strncpy_s(standby_names, IP_LEN, g_instance.rto_cxt.rto_standby_data[i].id,
strlen(g_instance.rto_cxt.rto_standby_data[i].id));
diff --git a/src/gausskernel/storage/replication/slot.cpp b/src/gausskernel/storage/replication/slot.cpp
index 900a833c4..fbe416b99 100755
--- a/src/gausskernel/storage/replication/slot.cpp
+++ b/src/gausskernel/storage/replication/slot.cpp
@@ -226,6 +226,7 @@ void slot_reset_for_backup(ReplicationSlotPersistency persistency, bool isDummyS
SET_SLOT_PERSISTENCY(slot->data, persistency);
slot->data.xmin = InvalidTransactionId;
slot->effective_xmin = InvalidTransactionId;
+ slot->effective_catalog_xmin = InvalidTransactionId;
slot->data.database = databaseId;
slot->data.restart_lsn = restart_lsn;
slot->data.isDummyStandby = isDummyStandby;
@@ -414,6 +415,7 @@ void ReplicationSlotCreate(const char *name, ReplicationSlotPersistency persiste
SET_SLOT_PERSISTENCY(slot->data, persistency);
slot->data.xmin = InvalidTransactionId;
slot->effective_xmin = InvalidTransactionId;
+ slot->effective_catalog_xmin = InvalidTransactionId;
rc = strncpy_s(NameStr(slot->data.name), NAMEDATALEN, name, NAMEDATALEN - 1);
securec_check(rc, "\0", "\0");
NameStr(slot->data.name)[NAMEDATALEN - 1] = '\0';
@@ -964,6 +966,10 @@ void ReplicationSlotsComputeRequiredXmin(bool already_locked)
if (TransactionIdIsValid(effective_xmin) &&
(!TransactionIdIsValid(agg_xmin) || TransactionIdPrecedes(effective_xmin, agg_xmin)))
agg_xmin = effective_xmin;
+
+ if (s->data.database == InvalidOid) {
+ continue;
+ }
/* check the catalog xmin */
if (TransactionIdIsValid(effective_catalog_xmin) &&
(!TransactionIdIsValid(agg_catalog_xmin) ||
diff --git a/src/gausskernel/storage/replication/slotfuncs.cpp b/src/gausskernel/storage/replication/slotfuncs.cpp
index 32f640c87..5119b11c4 100755
--- a/src/gausskernel/storage/replication/slotfuncs.cpp
+++ b/src/gausskernel/storage/replication/slotfuncs.cpp
@@ -1088,10 +1088,9 @@ void slot_redo(XLogReaderState *record)
return;
}
-
/* Backup blocks are not used in xlog records */
Assert(!XLogRecHasAnyBlockRefs(record));
- if (GET_SLOT_EXTRA_DATA_LENGTH(*xlrec) != 0) {
+ if (info != XLOG_SLOT_CHECK && GET_SLOT_EXTRA_DATA_LENGTH(*xlrec) != 0) {
extra_content = (char*)XLogRecGetData(record) + ReplicationSlotPersistentDataConstSize;
Assert(strlen(extra_content) == (uint32)(GET_SLOT_EXTRA_DATA_LENGTH(*xlrec)));
}
diff --git a/src/gausskernel/storage/replication/walreceiverfuncs.cpp b/src/gausskernel/storage/replication/walreceiverfuncs.cpp
index 1fcad05cd..9e47555a5 100755
--- a/src/gausskernel/storage/replication/walreceiverfuncs.cpp
+++ b/src/gausskernel/storage/replication/walreceiverfuncs.cpp
@@ -1425,20 +1425,25 @@ static void GetHadrUserInfo(char *hadr_user_info)
void GetPasswordForHadrStreamingReplication(char user[], char password[])
{
char hadr_user_info[MAXPGPATH] = {0};
- char plain_hadr_user_info[MAXPGPATH] = {0};
+ char *plain_hadr_user_info = NULL;
errno_t rc = EOK;
GetHadrUserInfo(hadr_user_info);
- if (!decryptECString(hadr_user_info, plain_hadr_user_info, MAXPGPATH, HADR_MODE)) {
- rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info));
- securec_check(rc, "\0", "\0");
+ if (!decryptECString(hadr_user_info, &plain_hadr_user_info, HADR_MODE)) {
+ if (plain_hadr_user_info != NULL) {
+ rc = memset_s(plain_hadr_user_info, strlen(plain_hadr_user_info), 0, strlen(plain_hadr_user_info));
+ securec_check(rc, "\0", "\0");
+ pfree(plain_hadr_user_info);
+ }
ereport(ERROR, (errmsg("In disaster cluster, decrypt hadr_user_info fail.")));
}
if (sscanf_s(plain_hadr_user_info, "%[^|]|%s", user, MAXPGPATH, password, MAXPGPATH) != 2) {
- rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info));
+ rc = memset_s(plain_hadr_user_info, strlen(plain_hadr_user_info), 0, strlen(plain_hadr_user_info));
securec_check(rc, "\0", "\0");
+ pfree(plain_hadr_user_info);
ereport(ERROR, (errmsg("In disaster cluster, parse plain hadr_user_info fail.")));
}
- rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info));
+ rc = memset_s(plain_hadr_user_info, strlen(plain_hadr_user_info), 0, strlen(plain_hadr_user_info));
securec_check(rc, "\0", "\0");
+ pfree(plain_hadr_user_info);
}
diff --git a/src/gausskernel/storage/replication/walsender.cpp b/src/gausskernel/storage/replication/walsender.cpp
index 8f0a6a74b..333153e23 100755
--- a/src/gausskernel/storage/replication/walsender.cpp
+++ b/src/gausskernel/storage/replication/walsender.cpp
@@ -2127,6 +2127,9 @@ static bool cmdStringLengthCheck(const char* cmd_string)
char* rm_cmd = NULL;
char* slot_name = NULL;
+ if (cmd_string == NULL) {
+ return true;
+ }
size_t cmd_length = strlen(cmd_string);
if (cmd_length == 0) {
return true;
@@ -2141,7 +2144,7 @@ static bool cmdStringLengthCheck(const char* cmd_string)
strncmp(cmd_string, "START_REPLICATION", strlen("START_REPLICATION")) == 0) {
sub_cmd = strtok_r(comd, " ", &rm_cmd);
sub_cmd = strtok_r(NULL, " ", &rm_cmd);
- if (strlen(sub_cmd) != strlen("SLOT") ||
+ if (sub_cmd == NULL || strlen(sub_cmd) != strlen("SLOT") ||
strncmp(sub_cmd, "SLOT", strlen("SLOT")) != 0) {
return true;
}
@@ -2156,14 +2159,17 @@ static bool cmdStringLengthCheck(const char* cmd_string)
strncmp(cmd_string, "ADVANCE_REPLICATION", strlen("ADVANCE_REPLICATION")) == 0) {
sub_cmd = strtok_r(comd, " ", &rm_cmd);
sub_cmd = strtok_r(NULL, " ", &rm_cmd);
- if (strlen(sub_cmd) != strlen("SLOT") ||
+ if (sub_cmd == NULL || strlen(sub_cmd) != strlen("SLOT") ||
strncmp(sub_cmd, "SLOT", strlen("SLOT")) != 0) {
- return false;
+ return true;
}
} else {
return true;
}
slot_name = strtok_r(NULL, " ", &rm_cmd);
+ if (slot_name == NULL) {
+ return true;
+ }
/* if slot_name contains "", its length should minus 2. */
size_t slot_name_len = strlen(slot_name);
if (slot_name_len != 0 && slot_name[0] == '"' && slot_name[slot_name_len - 1] == '"') {
@@ -5359,6 +5365,24 @@ static void WalSndSigHupHandler(SIGNAL_ARGS)
if (t_thrd.walsender_cxt.MyWalSnd)
SetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch);
+ if (AM_WAL_DB_SENDER && t_thrd.walsender_cxt.LogicalSlot != -1) {
+ int slotId = t_thrd.walsender_cxt.LogicalSlot;
+ int parallelism = g_Logicaldispatcher[slotId].pOptions.parallel_decode_num;
+ knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt;
+
+ if (gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState == PARALLEL_DECODE_WORKER_RUN &&
+ g_Logicaldispatcher[slotId].readWorker != NULL && g_Logicaldispatcher[slotId].readWorker->tid != 0) {
+ signal_child(g_Logicaldispatcher[slotId].readWorker->tid, SIGHUP, -1);
+ }
+ for (int i = 0; i < parallelism; i++) {
+ if (gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState == PARALLEL_DECODE_WORKER_RUN &&
+ g_Logicaldispatcher[slotId].decodeWorkers != NULL &&
+ g_Logicaldispatcher[slotId].decodeWorkers[i] != NULL &&
+ g_Logicaldispatcher[slotId].decodeWorkers[i]->tid.thid != 0) {
+ signal_child(g_Logicaldispatcher[slotId].decodeWorkers[i]->tid.thid, SIGHUP, -1);
+ }
+ }
+ }
errno = save_errno;
}
diff --git a/src/gausskernel/storage/smgr/cfs/cfs_buffers.cpp b/src/gausskernel/storage/smgr/cfs/cfs_buffers.cpp
index 663ca1f8e..b7ce00682 100644
--- a/src/gausskernel/storage/smgr/cfs/cfs_buffers.cpp
+++ b/src/gausskernel/storage/smgr/cfs/cfs_buffers.cpp
@@ -299,7 +299,7 @@ void pca_buf_load_page(pca_page_ctrl_t *item, const ExtentLocation& location, Cf
location.headerNum * BLCKSZ, (uint32)WAIT_EVENT_DATA_FILE_READ);
if (nbytes != BLCKSZ) {
item->load_status = CTRL_PAGE_LOADED_ERROR;
- ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED),
+ ereport(LOG, (errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Failed to pca_buf_load_page %s, headerNum: %u.", FilePathName(location.fd), location.headerNum)));
return;
}
diff --git a/src/gausskernel/storage/smgr/cfs/cfs_md.cpp b/src/gausskernel/storage/smgr/cfs/cfs_md.cpp
index 116e7122c..20cf5c861 100644
--- a/src/gausskernel/storage/smgr/cfs/cfs_md.cpp
+++ b/src/gausskernel/storage/smgr/cfs/cfs_md.cpp
@@ -263,6 +263,9 @@ size_t CfsWritePage(SMgrRelation reln, ForkNumber forknum, BlockNumber logicBloc
if (ctrl->load_status == CTRL_PAGE_LOADED_ERROR) {
pca_buf_free_page(ctrl, location, false);
if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) {
+ ereport(DEBUG1,
+ (errmsg("could not write block %u in file \"%s\" headerNum: %u, this relation has been removed",
+ logicBlockNumber, FilePathName(location.fd), location.headerNum)));
return 0;
}
ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED),
diff --git a/src/gausskernel/storage/smgr/smgr.cpp b/src/gausskernel/storage/smgr/smgr.cpp
old mode 100644
new mode 100755
index 055dba21c..861c34c33
--- a/src/gausskernel/storage/smgr/smgr.cpp
+++ b/src/gausskernel/storage/smgr/smgr.cpp
@@ -567,6 +567,9 @@ void smgrdounlink(SMgrRelation reln, bool isRedo, BlockNumber blockNum)
RelFileNodeBackend rnode = reln->smgr_rnode;
int which = reln->smgr_which;
int forknum;
+ HTAB *unlink_rel_hashtbl = g_instance.bgwriter_cxt.unlink_rel_hashtbl;
+ DelFileTag *entry = NULL;
+ bool found = false;
/* Close the forks at smgr level */
for (forknum = 0; forknum < (int)(reln->md_fdarray_size); forknum++) {
@@ -606,6 +609,13 @@ void smgrdounlink(SMgrRelation reln, bool isRedo, BlockNumber blockNum)
*/
unlink_file:
(*(smgrsw[which].smgr_unlink))(rnode, InvalidForkNumber, isRedo, blockNum);
+
+ (void)LWLockAcquire(g_instance.bgwriter_cxt.rel_hashtbl_lock, LW_EXCLUSIVE);
+ entry = (DelFileTag*)hash_search(unlink_rel_hashtbl, (void *)&rnode, HASH_FIND, &found);
+ if (found) {
+ entry->fileUnlink = true;
+ }
+ LWLockRelease(g_instance.bgwriter_cxt.rel_hashtbl_lock);
}
/*
diff --git a/src/gausskernel/storage/tcap/tcap_drop.cpp b/src/gausskernel/storage/tcap/tcap_drop.cpp
index 6da5d8cee..764194f98 100644
--- a/src/gausskernel/storage/tcap/tcap_drop.cpp
+++ b/src/gausskernel/storage/tcap/tcap_drop.cpp
@@ -64,6 +64,7 @@
#include "catalog/pg_ts_template.h"
#include "catalog/pgxc_class.h"
#include "catalog/storage.h"
+#include "client_logic/client_logic.h"
#include "commands/comment.h"
#include "commands/dbcommands.h"
#include "commands/directory.h"
@@ -1174,6 +1175,17 @@ static void TrTagDependentObjects(Relation depRel, ObjectAddresses *targetObject
return;
}
+static bool NeedTrFullEncryptedRel(Oid relid)
+{
+ Relation rel = relation_open(relid, NoLock);
+ if (is_full_encrypted_rel(rel)) {
+ relation_close(rel, NoLock);
+ return false;
+ }
+ relation_close(rel, NoLock);
+ return true;
+}
+
bool TrCheckRecyclebinDrop(const DropStmt *stmt, ObjectAddresses *objects)
{
Relation depRel;
@@ -1199,6 +1211,9 @@ bool TrCheckRecyclebinDrop(const DropStmt *stmt, ObjectAddresses *objects)
if (!NeedTrComm(objects->refs->objectId)) {
return false;
}
+ if (!NeedTrFullEncryptedRel(objects->refs->objectId)) {
+ return false;
+ }
depRel = heap_open(DependRelationId, AccessShareLock);
rbDrop = !TrNeedPhyDelete(depRel, objects, &objects->refs[0]);
diff --git a/src/gausskernel/storage/tcap/tcap_version.cpp b/src/gausskernel/storage/tcap/tcap_version.cpp
index abd373f17..adff5ef07 100644
--- a/src/gausskernel/storage/tcap/tcap_version.cpp
+++ b/src/gausskernel/storage/tcap/tcap_version.cpp
@@ -29,6 +29,7 @@
#include "catalog/indexing.h"
#include "catalog/pg_partition_fn.h"
#include "catalog/pg_snapshot.h"
+#include "client_logic/client_logic.h"
#include "commands/tablecmds.h"
#include "commands/matview.h"
#include "executor/node/nodeModifyTable.h"
@@ -154,6 +155,8 @@ static bool TvFeatureSupport(Oid relid, char **errstr, bool isTimecapsuleTable)
*errstr = "timecapsule feature does not support in non READ COMMITTED transaction";
} else if (TvForeignKeyCheck(relid) && isTimecapsuleTable) {
*errstr = "timecapsule feature does not support the table included foreign key or referenced by foreign key";
+ } else if (is_full_encrypted_rel(rel)) {
+ *errstr = "timecapsule feature does not support full encrypted table";
} else {
*errstr = NULL;
}
diff --git a/src/include/access/cbmparsexlog.h b/src/include/access/cbmparsexlog.h
index 1ce0565ee..6ca0fd936 100644
--- a/src/include/access/cbmparsexlog.h
+++ b/src/include/access/cbmparsexlog.h
@@ -276,6 +276,7 @@ typedef struct cbmbitmapiterator {
extern void InitXlogCbmSys(void);
extern void CBMTrackInit(bool startupXlog, XLogRecPtr startupCPRedo);
+extern void ResetXlogCbmSys(void);
extern void CBMFollowXlog(void);
extern void CBMGetMergedFile(XLogRecPtr startLSN, XLogRecPtr endLSN, char* mergedFileName);
extern CBMArray* CBMGetMergedArray(XLogRecPtr startLSN, XLogRecPtr endLSN);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index e30638807..db4bd717b 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -256,6 +256,7 @@ extern Partition partitionOpenWithRetry(Relation relation, Oid partitionId, LOCK
extern Partition partitionOpen(Relation relation, Oid partitionId, LOCKMODE lockmode, int2 bucketId=-1);
extern void partitionClose(Relation relation, Partition partition, LOCKMODE lockmode);
extern Partition tryPartitionOpen(Relation relation, Oid partitionId, LOCKMODE lockmode);
+extern Partition PartitionOpenWithPartitionno(Relation relation, Oid partition_id, int partitionno, LOCKMODE lockmode);
extern Relation try_relation_open(Oid relationId, LOCKMODE lockmode);
extern Relation relation_openrv(const RangeVar* relation, LOCKMODE lockmode);
extern Relation relation_openrv_extended(const RangeVar* relation, LOCKMODE lockmode, bool missing_ok,
@@ -373,6 +374,13 @@ static inline void ReportPartitionOpenError(Relation relation, Oid partition_id)
#endif
}
+static inline void ReportNoExistPartition(PartStatus partStatus, Oid partOid)
+{
+ if (partStatus == PART_METADATA_NOEXIST && module_logging_is_on(MOD_GPI)) {
+ ereport(LOG, (errmodule(MOD_GPI), errmsg("Partition %u does not exist in GPI", partOid)));
+ }
+}
+
extern void heap_redo(XLogReaderState* rptr);
extern void heap_desc(StringInfo buf, XLogReaderState* record);
extern const char* heap_type_name(uint8 subtype);
diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h
index 5a029877b..b7a066230 100644
--- a/src/include/access/tupdesc.h
+++ b/src/include/access/tupdesc.h
@@ -23,6 +23,7 @@
#include "access/attnum.h"
#include "catalog/pg_attribute.h"
#include "nodes/pg_list.h"
+#include "mb/pg_wchar.h"
/*
* Total number of different Table Access Method types.
@@ -202,7 +203,8 @@ extern void TupleDescInitEntryCollation(TupleDesc desc, AttrNumber attribute_num
extern void VerifyAttrCompressMode(int8 mode, int attlen, const char* attname);
-extern TupleDesc BuildDescForRelation(List* schema, Node* oriented_from = NULL, char relkind = '\0');
+extern TupleDesc BuildDescForRelation(List* schema, Node* oriented_from = NULL, char relkind = '\0',
+ Oid rel_coll_oid = InvalidOid);
extern TupleDesc BuildDescFromLists(List* names, List* types, List* typmods, List* collations);
diff --git a/src/include/access/ubtree.h b/src/include/access/ubtree.h
index eec7d528c..e059299ac 100644
--- a/src/include/access/ubtree.h
+++ b/src/include/access/ubtree.h
@@ -484,7 +484,8 @@ extern bool UBTreeDoDelete(Relation rel, IndexTuple itup, bool isRollbackIndex);
extern bool UBTreePagePruneOpt(Relation rel, Buffer buf, bool tryDelete);
extern bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTree *invisibleParts = NULL);
extern bool UBTreePruneItem(Page page, OffsetNumber offnum, TransactionId oldestXmin, IndexPruneState* prstate);
-extern void UBTreePagePruneExecute(Page page, OffsetNumber* nowdead, int ndead, IndexPruneState* prstate);
+extern void UBTreePagePruneExecute(Page page, OffsetNumber* nowdead, int ndead, IndexPruneState* prstate,
+ TransactionId oldest_xmin);
extern void UBTreePageRepairFragmentation(Relation rel, BlockNumber blkno, Page page);
extern void UBTreeInsertParent(Relation rel, Buffer buf, Buffer rbuf, BTStack stack, bool is_root, bool is_only);
diff --git a/src/include/auditfuncs.h b/src/include/auditfuncs.h
index 59460b3f6..f6536d144 100644
--- a/src/include/auditfuncs.h
+++ b/src/include/auditfuncs.h
@@ -34,6 +34,8 @@
#define SmartShutdown 1
#define FastShutdown 2
#define ImmediateShutdown 3
+#define AUDIT_CLIENT_LEN 100
+#define CRYPT_FUNC_ARG "(********)"
char* pgaudit_get_relation_name(List* relation_name_list);
void pgaudit_dml_table(const char* objectname, const char* cmdtext);
@@ -50,4 +52,92 @@ extern void pgaudit_user_login(bool login_ok, const char* object_name, const cha
extern void pgaudit_user_logout(void);
extern void pgaudit_lock_or_unlock_user(bool islocked, const char* user_name);
extern void pgaudit_ddl_sql_patch(const char* objectName, const char* cmdText);
+
+extern bool audit_check_client_blacklist(char client_info[]);
+extern bool audit_check_full_audit_user();
+extern void audit_system_function(FunctionCallInfo fcinfo, const AuditResult result);
+extern char* audit_get_value_bytype(FunctionCallInfo fcinfo, int n_arg);
+
+const char* const g_audit_system_funcs[] = {
+ "set_working_grand_version_num_manually",
+ "set_config",
+ "pg_cancel_backend",
+ "pg_cancel_session",
+ "pg_cancel_invalid_query",
+ "pg_reload_conf",
+ "pg_rotate_logfile",
+ "pg_terminate_session",
+ "pg_terminate_backend",
+ "pg_create_restore_point",
+ "pg_start_backup",
+ "pg_stop_backup",
+ "pg_switch_xlog",
+ "pg_cbm_get_merged_file",
+ "pg_cbm_recycle_file",
+ "pg_enable_delay_ddl_recycle",
+ "pg_disable_delay_ddl_recycle",
+ "pg_cbm_rotate_file",
+ "gs_roach_stop_backup",
+ "gs_roach_enable_delay_ddl_recycle",
+ "gs_roach_disable_delay_ddl_recycle",
+ "gs_roach_switch_xlog",
+ "pg_last_xlog_receive_location",
+ "pg_xlog_replay_pause",
+ "pg_xlog_replay_resume",
+ "gs_pitr_clean_history_global_barriers",
+ "gs_pitr_archive_slot_force_advance",
+ "pg_create_physical_replication_slot_extern",
+ "gs_set_obs_delete_location",
+ "gs_hadr_do_switchover",
+ "gs_set_obs_delete_location_with_slotname",
+ "gs_streaming_dr_in_switchover",
+ "gs_upload_obs_file",
+ "gs_download_obs_file",
+ "gs_set_obs_file_context",
+ "gs_get_hadr_key_cn",
+ "pg_advisory_lock",
+ "pg_advisory_lock_shared",
+ "pg_advisory_unlock",
+ "pg_advisory_unlock_shared",
+ "pg_advisory_unlock_all",
+ "pg_advisory_xact_lock",
+ "pg_advisory_xact_lock_shared",
+ "pg_try_advisory_lock",
+ "pg_try_advisory_lock_shared",
+ "pg_try_advisory_xact_lock",
+ "pg_try_advisory_xact_lock_shared",
+ "pg_create_logical_replication_slot",
+ "pg_drop_replication_slot",
+ "pg_logical_slot_peek_changes",
+ "pg_logical_slot_get_changes",
+ "pg_logical_slot_get_binary_changes",
+ "pg_replication_slot_advance",
+ "pg_replication_origin_create",
+ "pg_replication_origin_drop",
+ "pg_replication_origin_session_setup",
+ "pg_replication_origin_session_reset",
+ "pg_replication_origin_session_progress",
+ "pg_replication_origin_xact_setup",
+ "pg_replication_origin_xact_reset",
+ "pg_replication_origin_advance",
+ "local_space_shrink",
+ "gs_space_shrink",
+ "global_space_shrink",
+ "pg_free_remain_segment",
+ "gs_fault_inject",
+ "sqladvisor.init",
+ "sqladvisor.set_weight_params",
+ "sqladvisor.set_cost_params",
+ "sqladvisor.assign_table_type",
+ "gs_repair_file",
+ "local_clear_bad_block_info",
+ "gs_repair_page",
+ NULL
+ };
+
+/* refer to funCrypt in elog.cpp */
+const char* const g_audit_crypt_funcs[] = {"gs_encrypt_aes128", "gs_decrypt_aes128",
+ "gs_encrypt", "gs_decrypt",
+ "aes_encrypt", "aes_decrypt", "pg_create_physical_replication_slot_extern",
+ "dblink_connect", NULL};
#endif
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 0c8295d70..f1b3ae5fc 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -62,6 +62,6 @@
#define NAILED_IN_CATALOG_NUM 8
-#define CATALOG_NUM 107
+#define CATALOG_NUM 108
#endif
diff --git a/src/include/catalog/gs_job_attribute.h b/src/include/catalog/gs_job_attribute.h
index 34a738c37..63e115b98 100644
--- a/src/include/catalog/gs_job_attribute.h
+++ b/src/include/catalog/gs_job_attribute.h
@@ -153,6 +153,14 @@ inline Datum TimeStampTzToText(Datum value)
return res;
}
+inline Datum TimeStampToText(Datum value)
+{
+ char *str = DatumGetCString(DirectFunctionCall1(timestamp_out, value));
+ Datum res = CStringGetTextDatum(str);
+ pfree_ext(str);
+ return res;
+}
+
inline Datum TextToTimeStampTz(Datum value)
{
if (!PointerIsValid(value)) {
@@ -287,6 +295,11 @@ extern void enable_single_force(Datum object_name, Datum enable_value, bool forc
extern void enable_program(Datum program_name, Datum enable_value);
extern void set_job_attribute(const Datum job_name, const Datum attribute_name, const Datum attribute_value);
extern bool execute_backend_scheduler_job(Datum job_name, StringInfoData *buf);
+extern HeapTuple search_from_pg_job_proc(Relation rel, Datum name);
+extern void get_program_info(Datum program_name, Datum *job_type, Datum *job_action, Datum *num_of_args,
+ Datum *enabled);
+extern Datum get_priv_user(Datum program_name, Datum job_intype);
+
/* prefix for inlined object */
#define INLINE_JOB_SCHEDULE_PREFIX "inline_schedule_"
diff --git a/src/include/catalog/gs_utf8_collation.h b/src/include/catalog/gs_utf8_collation.h
new file mode 100644
index 000000000..918b3d914
--- /dev/null
+++ b/src/include/catalog/gs_utf8_collation.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
+ *
+ * openGauss is licensed under Mulan PSL v2.
+ * You can use this software according to the terms and conditions of the Mulan PSL v2.
+ * You may obtain a copy of Mulan PSL v2 at:
+ *
+ * http://license.coscl.org.cn/MulanPSL2
+ *
+ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
+ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+ * See the Mulan PSL v2 for more details.
+ * ---------------------------------------------------------------------------------------
+ *
+ * gs_utf8_collation.h
+ *
+ * IDENTIFICATION
+ * src/include/catalog/gs_utf8_collation.h
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+#ifndef GS_UTF8_COLLATION_H
+#define GS_UTF8_COLLATION_H
+
+int varstr_cmp_by_builtin_collations(char* arg1, int len1, char* arg2, int len2, Oid collid);
+Datum hash_text_by_builtin_colltions(const unsigned char *key, size_t len, Oid collid);
+void check_binary_collation(Oid collation, Oid type_oid);
+bool is_support_b_format_collation(Oid collation);
+Oid binary_need_transform_typeid(Oid typeoid, Oid* collation);
+int matchtext_utf8mb4(unsigned char* t, int tlen, unsigned char* p, int plen);
+bool is_b_format_collation(Oid collation);
+
+#endif /* GS_UTF8_COLLATION_H */
\ No newline at end of file
diff --git a/src/include/catalog/heap.h b/src/include/catalog/heap.h
index 07c84e799..1b4eea2e2 100644
--- a/src/include/catalog/heap.h
+++ b/src/include/catalog/heap.h
@@ -158,7 +158,7 @@ extern Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partT
Oid bucketOid, HashPartitionDefState *newPartDef, Oid ownerid, Datum reloptions,
const bool* isTimestamptz, StorageType storage_type, int2vector* subpartition_key = NULL, bool isSubPartition = false,
bool partkeyexprIsNull = true, bool partkeyIsFunc = false);
-extern Node *MakeDefaultSubpartition(PartitionState *partitionState, Node *partitionDefState);
+extern Node *MakeDefaultSubpartition(PartitionState *partitionState, PartitionDefState *partitionDefState);
extern List *addNewSubPartitionTuplesForPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespace,
Oid bucketOid, Oid ownerid, Datum reloptions, const bool *isTimestamptz, StorageType storage_type,
PartitionState *partitionState, Node *partitionDefState, LOCKMODE partLockMode);
@@ -170,7 +170,8 @@ extern void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc,
Datum interval, Datum maxValues, Datum transitionPoint, Datum reloptions, bool partkeyexprIsNull = true, bool partkeyIsFunc = false);
extern void heap_truncate_one_part(Relation rel , Oid partOid);
-extern Oid heapTupleGetPartitionId(Relation rel, void *tuple, bool isDDL = false, bool canIgnore = false);
+extern Oid heapTupleGetPartitionId(Relation rel, void *tuple, int *partitionno, bool isDDL = false,
+ bool canIgnore = false);
extern Oid heapTupleGetSubPartitionId(Relation rel, void *tuple);
extern void heap_truncate(List *relids);
extern void heap_truncate_one_rel(Relation rel);
@@ -185,7 +186,8 @@ extern void InsertPgClassTuple(Relation pg_class_desc, Relation new_rel_desc, Oi
extern List *AddRelationNewConstraints(Relation rel, List *newColDefaults, List *newConstraints, bool allow_merge, bool is_local);
extern List *AddRelClusterConstraints(Relation rel, List *clusterKeys);
-extern void StoreAttrDefault(Relation rel, AttrNumber attnum, Node *expr, char generatedCol, Node* update_expr);
+extern void StoreAttrDefault(Relation rel, AttrNumber attnum, Node *expr, char generatedCol, Node* update_expr,
+ bool skip_dep = false);
extern Node *cookDefault(ParseState *pstate, Node *raw_default, Oid atttypid, int32 atttypmod, char *attname,
char generatedCol);
extern void DeleteRelationTuple(Oid relid);
@@ -248,7 +250,7 @@ extern char* make_column_map(TupleDesc tuple_desc);
extern bool* CheckPartkeyHasTimestampwithzone(Relation partTableRel, bool isForSubPartition = false);
extern bool *CheckSubPartkeyHasTimestampwithzone(Relation partTableRel, List *subpartKeyPosList);
-extern Oid AddNewIntervalPartition(Relation rel, void* insertTuple, bool isDDL = false);
+extern Oid AddNewIntervalPartition(Relation rel, void* insertTuple, int *partitionno, bool isDDL = false);
extern int GetIndexKeyAttsByTuple(Relation relation, HeapTuple indexTuple);
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index 1909fcd3d..6fc1e3ec7 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -78,7 +78,8 @@ typedef enum CheckWaitMode
CHECK_NOWAIT,
} CheckWaitMode;
-extern void index_check_primary_key(Relation heapRel, IndexInfo *indexInfo, bool is_alter_table);
+extern void index_check_primary_key(Relation heapRel, IndexInfo *indexInfo, bool is_alter_table,
+ bool is_modify_primary = false);
/*
* Parameter isPartitionedIndex indicates whether the index is a partition index.
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index 9d4678414..49b4011d3 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -119,6 +119,8 @@ DECLARE_UNIQUE_INDEX(pg_class_relname_nsp_index, 2663, on pg_class using btree(r
#define ClassNameNspIndexId 2663
DECLARE_INDEX(pg_class_tblspc_relfilenode_index, 9981, on pg_class using btree(reltablespace oid_ops, relfilenode oid_ops));
#define ClassTblspcRelfilenodeIndexId 9981
+DECLARE_UNIQUE_INDEX(pg_collation_enc_def_index, 3147, on pg_collation using btree(collencoding int4_ops, collisdef bool_ops));
+#define CollationEncDefIndexId 3147
DECLARE_UNIQUE_INDEX(pg_collation_name_enc_nsp_index, 3164, on pg_collation using btree(collname name_ops, collencoding int4_ops, collnamespace oid_ops));
#define CollationNameEncNspIndexId 3164
DECLARE_UNIQUE_INDEX(pg_collation_oid_index, 3085, on pg_collation using btree(oid oid_ops));
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index e1e06a2e1..d2e4a69d7 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -208,6 +208,7 @@ extern void RemoveTmpNspFromSearchPath(Oid tmpnspId);
extern Oid get_collation_oid(List *collname, bool missing_ok);
extern Oid get_conversion_oid(List *conname, bool missing_ok);
extern Oid FindDefaultConversionProc(int4 for_encoding, int4 to_encoding);
+extern Oid get_collation_oid_with_lower_name(const char* collation_name, int charset);
/* initialization & transaction cleanup code */
extern void InitializeSearchPath(void);
diff --git a/src/include/catalog/pg_collation.h b/src/include/catalog/pg_collation.h
index cf94ffb80..3dff57ed2 100644
--- a/src/include/catalog/pg_collation.h
+++ b/src/include/catalog/pg_collation.h
@@ -38,6 +38,10 @@ CATALOG(pg_collation,3456) BKI_SCHEMA_MACRO
int4 collencoding; /* encoding for this collation; -1 = "all" */
NameData collcollate; /* LC_COLLATE setting */
NameData collctype; /* LC_CTYPE setting */
+#ifdef CATALOG_VARLEN
+ text collpadattr; /* collation pad attribute */
+ bool collisdef; /* whether the collation is the default for its collencoding */
+#endif
} FormData_pg_collation;
/* ----------------
@@ -51,27 +55,58 @@ typedef FormData_pg_collation *Form_pg_collation;
* compiler constants for pg_collation
* ----------------
*/
-#define Natts_pg_collation 6
+#define Natts_pg_collation 8
#define Anum_pg_collation_collname 1
#define Anum_pg_collation_collnamespace 2
#define Anum_pg_collation_collowner 3
#define Anum_pg_collation_collencoding 4
#define Anum_pg_collation_collcollate 5
#define Anum_pg_collation_collctype 6
+#define Anum_pg_collation_collpadattr 7
+#define Anum_pg_collation_collisdef 8
/* ----------------
* initial contents of pg_collation
* ----------------
*/
-DATA(insert OID = 100 ( default PGNSP PGUID -1 "" "" ));
+DATA(insert OID = 100 ( default PGNSP PGUID -1 "" "" _null_ _null_));
DESCR("database's default collation");
#define DEFAULT_COLLATION_OID 100
-DATA(insert OID = 950 ( C PGNSP PGUID -1 "C" "C" ));
+DATA(insert OID = 950 ( C PGNSP PGUID -1 "C" "C" _null_ _null_));
DESCR("standard C collation");
#define C_COLLATION_OID 950
-DATA(insert OID = 951 ( POSIX PGNSP PGUID -1 "POSIX" "POSIX" ));
+DATA(insert OID = 951 ( POSIX PGNSP PGUID -1 "POSIX" "POSIX" _null_ _null_));
DESCR("standard POSIX collation");
#define POSIX_COLLATION_OID 951
+#define B_FORMAT_COLLATION_INTERVAL 256
+/* collation in B format start here. */
+#define B_FORMAT_COLLATION_OID_MIN 1024
+
+/* BINARY's start with 1024 */
+DATA(insert OID = 1026 (binary PGNSP PGUID 0 "binary" "binary" "NO PAD" t));
+DESCR("binary collation");
+#define BINARY_COLLATION_OID 1026
+/* GBK's start with 1280 */
+/* UTF8's start with 1536 */
+DATA(insert OID = 1537 (utf8mb4_general_ci PGNSP PGUID 7 "utf8mb4_general_ci" "utf8mb4_general_ci" "PAD SPACE" t));
+DESCR("utf8mb4_general_ci collation");
+#define UTF8MB4_GENERAL_CI_COLLATION_OID 1537
+DATA(insert OID = 1538 (utf8mb4_unicode_ci PGNSP PGUID 7 "utf8mb4_unicode_ci" "utf8mb4_unicode_ci" "PAD SPACE" _null_));
+DESCR("utf8mb4_unicode_ci collation");
+#define UTF8MB4_UNICODE_CI_COLLATION_OID 1538
+DATA(insert OID = 1539 (utf8mb4_bin PGNSP PGUID 7 "utf8mb4_bin" "utf8mb4_bin" "PAD SPACE" _null_));
+DESCR("utf8mb4_bin collation");
+#define UTF8MB4_BIN_COLLATION_OID 1539
+/* GB10830's start with 1536 */
+
+#define B_FORMAT_COLLATION_OID_MAX 10000
+
+#define COLLATION_IN_B_FORMAT(colloid) \
+ ((colloid) > B_FORMAT_COLLATION_OID_MIN && (colloid) < B_FORMAT_COLLATION_OID_MAX)
+
+#define COLLATION_HAS_INVALID_ENCODING(colloid) \
+ ((colloid) < B_FORMAT_COLLATION_OID_MIN)
+
#endif /* PG_COLLATION_H */
diff --git a/src/include/catalog/pg_job.h b/src/include/catalog/pg_job.h
index 3e2d07b7f..968ea6812 100644
--- a/src/include/catalog/pg_job.h
+++ b/src/include/catalog/pg_job.h
@@ -175,6 +175,13 @@ extern bool is_scheduler_job_id(Relation relation, int64 job_id);
/* For run_job_internal */
extern void job_finish(PG_FUNCTION_ARGS);
+extern Datum job_submit(PG_FUNCTION_ARGS);
+extern void create_job_raw(PG_FUNCTION_ARGS);
+extern char *create_inline_program(Datum job_name, Datum job_type, Datum job_action, Datum num_of_args, Datum enabled);
+extern char *get_inline_schedule_name(Datum job_name);
+extern void drop_single_job_internal(PG_FUNCTION_ARGS);
+extern void ShowEventCommand(ShowEventStmt *stmt, DestReceiver* dest);
+extern TupleDesc GetEventResultDesc();
#define JOBID_ALLOC_OK 0 /* alloc jobid ok */
#define JOBID_ALLOC_ERROR 1 /* alloc jobid error */
diff --git a/src/include/catalog/pg_namespace.h b/src/include/catalog/pg_namespace.h
index b36cbe566..1daa82307 100644
--- a/src/include/catalog/pg_namespace.h
+++ b/src/include/catalog/pg_namespace.h
@@ -45,6 +45,7 @@ CATALOG(pg_namespace,2615) BKI_SCHEMA_MACRO
#endif
char in_redistribution;
bool nspblockchain;
+ Oid nspcollation;
} FormData_pg_namespace;
/* ----------------
@@ -59,74 +60,76 @@ typedef FormData_pg_namespace *Form_pg_namespace;
* ----------------
*/
-#define Natts_pg_namespace 6
+#define Natts_pg_namespace 7
#define Anum_pg_namespace_nspname 1
#define Anum_pg_namespace_nspowner 2
#define Anum_pg_namespace_nsptimeline 3
#define Anum_pg_namespace_nspacl 4
#define Anum_pg_namespace_in_redistribution 5
#define Anum_pg_namespace_nspblockchain 6
-
+#define Anum_pg_namespace_nspcollation 7
/* ----------------
* initial contents of pg_namespace
* ---------------
*/
-DATA(insert OID = 11 ( "pg_catalog" PGUID 0 _null_ n f));
+DATA(insert OID = 11 ( "pg_catalog" PGUID 0 _null_ n f _null_));
DESCR("system catalog schema");
#define PG_CATALOG_NAMESPACE 11
-DATA(insert OID = 99 ( "pg_toast" PGUID 0 _null_ n f));
+DATA(insert OID = 99 ( "pg_toast" PGUID 0 _null_ n f _null_));
DESCR("reserved schema for TOAST tables");
#define PG_TOAST_NAMESPACE 99
-DATA(insert OID = 100 ( "cstore" PGUID 0 _null_ n f));
+DATA(insert OID = 100 ( "cstore" PGUID 0 _null_ n f _null_));
DESCR("reserved schema for DELTA tables");
#define CSTORE_NAMESPACE 100
-DATA(insert OID = 3988 ( "pkg_service" PGUID 0 _null_ n f));
+DATA(insert OID = 3988 ( "pkg_service" PGUID 0 _null_ n f _null_));
DESCR("pkg_service schema");
#define PG_PKG_SERVICE_NAMESPACE 3988
-DATA(insert OID = 2200 ( "public" PGUID 0 _null_ n f));
+DATA(insert OID = 2200 ( "public" PGUID 0 _null_ n f _null_));
DESCR("standard public schema");
#define PG_PUBLIC_NAMESPACE 2200
-DATA(insert OID = 4988 ( "dbe_perf" PGUID 0 _null_ n f));
+DATA(insert OID = 4988 ( "dbe_perf" PGUID 0 _null_ n f _null_));
DESCR("dbe_perf schema");
#define PG_DBEPERF_NAMESPACE 4988
-DATA(insert OID = 4989 ( "snapshot" PGUID 0 _null_ n f));
+DATA(insert OID = 4989 ( "snapshot" PGUID 0 _null_ n f _null_));
DESCR("snapshot schema");
#define PG_SNAPSHOT_NAMESPACE 4989
-DATA(insert OID = 4990 ( "blockchain" PGUID 0 _null_ n f));
+DATA(insert OID = 4990 ( "blockchain" PGUID 0 _null_ n f _null_));
DESCR("blockchain schema");
#define PG_BLOCKCHAIN_NAMESPACE 4990
-DATA(insert OID = 4991 ( "db4ai" PGUID 0 _null_ n f));
+DATA(insert OID = 4991 ( "db4ai" PGUID 0 _null_ n f _null_));
DESCR("db4ai schema");
#define PG_DB4AI_NAMESPACE 4991
-DATA(insert OID = 4992 ( "dbe_pldebugger" PGUID 0 _null_ n f));
+DATA(insert OID = 4992 ( "dbe_pldebugger" PGUID 0 _null_ n f _null_));
DESCR("dbe_pldebugger schema");
#define PG_PLDEBUG_NAMESPACE 4992
-DATA(insert OID = 7813 ( "sqladvisor" PGUID 0 _null_ n f));
+DATA(insert OID = 7813 ( "sqladvisor" PGUID 0 _null_ n f _null_));
DESCR("sqladvisor schema");
#define PG_SQLADVISOR_NAMESPACE 7813
#ifndef ENABLE_MULTIPLE_NODES
-DATA(insert OID = 4993 ( "dbe_pldeveloper" PGUID 0 _null_ n f));
+DATA(insert OID = 4993 ( "dbe_pldeveloper" PGUID 0 _null_ n f _null_));
DESCR("dbe_pldeveloper schema");
#define DBE_PLDEVELOPER_NAMESPACE 4993
#endif
-DATA(insert OID = 9049 ( "dbe_sql_util" PGUID 0 _null_ n f));
+DATA(insert OID = 9049 ( "dbe_sql_util" PGUID 0 _null_ n f _null_));
DESCR("sql util schema");
#define DBE_SQL_UTIL_NAMESPACE 9049
/*
* prototypes for functions in pg_namespace.c
*/
-extern Oid NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp, bool hasBlockChain = false);
+extern Oid NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp, bool hasBlockChain = false,
+ Oid colloid = InvalidOid);
extern bool IsLedgerNameSpace(Oid nspOid);
+extern Oid get_nsp_default_collation(Oid nspOid);
#endif /* PG_NAMESPACE_H */
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index 3d8efbf9d..66e23adca 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -240,6 +240,7 @@ typedef FormData_pg_operator *Form_pg_operator;
#define OID_NAME_ICLIKE_OP 1625
#define OID_TEXT_ICLIKE_OP 1627
#define OID_BPCHAR_ICLIKE_OP 1629
+#define OID_BPCHAR_IC_NOT_LIKE_OP 1630
#define NUMEQOID 1752
#define NUMERICEQOID 1752
#define NUMERICNEOID 1753
diff --git a/src/include/catalog/pg_partition.h b/src/include/catalog/pg_partition.h
index 392e16b29..b42740107 100644
--- a/src/include/catalog/pg_partition.h
+++ b/src/include/catalog/pg_partition.h
@@ -61,11 +61,13 @@ CATALOG(pg_partition,9016) BKI_ROWTYPE_OID(3790) BKI_SCHEMA_MACRO
text boundaries[1];
text transit[1];
text reloptions[1]; /* access-method-specific options */
-#endif
TransactionId relfrozenxid64;
TransactionId relminmxid; /* all multixacts in this rel are >= this.
* this is really a MultiXactId */
text partkeyexpr;
+ int4 partitionno; /* An unique identifier of each partition, see pg_partition_fn.h for more detail */
+ int4 subpartitionno; /* An unique identifier of each subpartition */
+#endif
} FormData_pg_partition;
/* Size of fixed part of pg_partition tuples, not counting var-length fields */
#define PARTITION_TUPLE_SIZE \
@@ -96,7 +98,7 @@ typedef FormData_pg_partition *Form_pg_partition;
#define PART_OBJ_TYPE_TABLE_SUB_PARTITION 's'
#define PART_OBJ_TYPE_INDEX_PARTITION 'x'
-#define Natts_pg_partition 30
+#define Natts_pg_partition 32
#define Anum_pg_partition_relname 1
#define Anum_pg_partition_parttype 2
#define Anum_pg_partition_parentid 3
@@ -127,5 +129,7 @@ typedef FormData_pg_partition *Form_pg_partition;
#define Anum_pg_partition_relfrozenxid64 28
#define Anum_pg_partition_relminmxid 29
#define Anum_pg_partition_partkeyexpr 30
+#define Anum_pg_partition_partitionno 31
+#define Anum_pg_partition_subpartitionno 32
#endif/*PG_PARTITION_H*/
diff --git a/src/include/catalog/pg_partition_fn.h b/src/include/catalog/pg_partition_fn.h
index 4dab1f6ef..8b5189c87 100644
--- a/src/include/catalog/pg_partition_fn.h
+++ b/src/include/catalog/pg_partition_fn.h
@@ -42,7 +42,6 @@
#include "access/heapam.h"
#include "storage/lmgr.h"
-#define MAX_PARTITIONKEY_NUM 4
#define MAX_PARTITION_NUM 1048575 /* update LEN_PARTITION_PREFIX as well ! */
#define INTERVAL_PARTITION_NAME_PREFIX "sys_p"
#define INTERVAL_PARTITION_NAME_PREFIX_FMT "sys_p%u"
@@ -57,25 +56,144 @@
*/
#define LEN_PARTITION_PREFIX 55
+/*-----------------------------------------------------------*/
+/* Partition Concurrency Design Part1: PARTITION OBJECT LOCK */
/*
- * A suppositional sequence number for partition.
+ * A suppositional sequence number for partition lock.
+ * We apply PARTITION OBJECT_LOCK to protect partition parallel work.
+ * In this case, the relid will be locked by LockPartitionObject(relid, PARTITION_OBJECT_LOCK_SDEQUENCE, lockmode).
+ * We take these three cases in condition:
+ * 1. If it's a DML operation, the OBJECT_LOCK(ShareLock) will be added in parserOpenTable, and will be released
+ * in InitPlan. Note that the snapshot is obtained before InitPlan.
+ * 2. If it's a DDL operation, the OBJECT_LOCK(ExclusiveLock) will be added before CommitTransaction, which will
+ * be released during CommitTransaction.
+ * 3. If it's a ADD_INTERVAL_PARTITION operation, the operation will be transformed from DML to DDL. Besides the two
+ * actions above, we apply an additinal OBJECT_LOCK, INTERVAL_PARTITION_LOCK_SDEQUENCE on all INTERVAL DDL operation
+ * (including ADD_INTERVAL_PARTITION) to protect the whole process.
*/
-#define ADD_PARTITION_ACTION (MAX_PARTITION_NUM + 1)
+typedef enum PartitionObjectLock {
+ PARTITION_OBJECT_LOCK_SDEQUENCE = MAX_PARTITION_NUM + 1,
+ /* A suppositional sequence number for all partition operations. This lock is designed to avoid committing DDL until
+ * we get the DML executor snapshot before InitPlan.
+ * This is to forbid instantaneous inconsistency. Just Consider the following condition: a SQL scans the destination
+ * partitioned table and uses both Local Partitioned Index and Global Partitioned Index, the LPI scan partition info
+ * is generated in static pruning in optimizer, while the GPI is in executor. Without PARTITION OBJECT_LOCK, if a
+ * DDL operation commits, which creates a new partition and inserts into data, such as ADD_INTERVAL_PARTITION, the
+ * inconsistency may accur. Since the new partition data is invisible for static pruning of LPI scan, and is visible
+ * for GPI scan. */
+ INTERVAL_PARTITION_LOCK_SDEQUENCE
+ /* A suppositional sequence number for DDL operations of all interval partitioned table. This is to forbid
+ * concurrency of ADD_INTERVAL_PARTITION. Note that an ADD_INTERVAL_PARTITION action only holds RowExclusiveLock
+ * on the table, we need an additinal OBJECT_LOCK to protect the whole process. */
+} PartitionObjectLock;
+
+typedef enum PartitionObjectLockType {
+ PARTITION_SHARE_LOCK = AccessShareLock,
+ PARTITION_EXCLUSIVE_LOCK = AccessExclusiveLock
+} PartitionObjectLockType;
+
+extern void LockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type);
+extern void UnlockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type);
+extern bool ConditionalLockPartitionObject(Oid relOid, PartitionObjectLock object, PartitionObjectLockType type);
+#ifndef ENABLE_MULTIPLE_NODES
+extern void AddPartitionDMLInfo(Oid relOid);
+extern void AddPartitionDDLInfo(Oid relOid);
+extern void LockPartitionDDLOperation();
+#endif
+
+/*----------------------------------------------------------------*/
+/* Partition Concurrency Design Part2: PARTITIONNO/SUBPARTITIONNO */
/*
- * We add ADD_PARTITION_ACTION sequence lock to prevent parallel complaints.
- */
-extern void LockRelationForAddIntervalPartition(Relation rel);
-extern void LockRelationForAccessIntervalPartitionTab(Relation rel);
-extern void UnlockRelationForAccessIntervalPartTabIfHeld(Relation rel);
-extern void UnlockRelationForAddIntervalPartition(Relation rel);
+ * PARTITIONNO/SUBPARTITIONNO: An unique identifier of each partition in a partitioned/subpartitioned table.
+ * The partitionno/subpartitionno is recorded in catalog PG_PARTITION. See pg_partition.h for more detail.
+ * 1. When create a partitioned table containing n partitions, the partitionno of each partition is set to 1~n, and the
+ * partitioned tuple records the maxvalue, we use a negative value, -n to distinguish from the partition tuple.
+ * Similarly, if a partition contains m subpartitions, the subpartitionno of each subpartition is set to 1~m, and the
+ * partition tuple records the maxvalue -m.
+ * 2. When DDL is done on the partitioned table, think of four base conditions:
+ * a). ADD_PARTITION_ACTION: Firstly we read the record of partitioned tuple, and get the maxvalue of partitionno,
+ * we mask it as -x(the maxvalue is records in negative type). Then the partitionno of the new partition is set
+ * to x+1, and the maxvalue is updated into -(x+1).
+ * b). DROP_PARTITION_ACTION: Do nothing.
+ * c). INPLACE_UPDATE_ACTION: Firstly we read the record of the src partition tuple, we mask it as k. Then the
+ * partitionno of the new partition is set to k. Do nothing on the partitioned tuple.
+ * d). OTHER_PARTITION_ACTION: Do nothing.
+ * In fact, a partition DDL can be a collection of these four action above. For example, a Merge-Partition operation
+ * has a series of DROP_PARTITION_ACTION and an ADD_PARTITION_ACTION, a Truncate-Partition operation with
+ * UPDATE_GLOBAL_INDEX has an OTHER_PARTITION_ACTION and a INPLACE_UPDATE_ACTION.
+ * 3. When DML is done on the partitioned table, think of three stages: pruning -> obtain-partoid -> partition-open.
+ * In the period of pruning -> obtain-partoid, the partseq may be dislocationed, in the period of obtain-partoid ->
+ * partition-open, the partoid may be inplace-updated to a new oid with an UPDATE_GLOBAL_INDEX DDL action. So the
+ * partitionno is used to solve these problem.
+ * a). When pruning, we save partitionno list when obtain the partseq list. See the definition of PruningResult.
+ * b). When obtain-partoid, we use partseq to get the partoid, and check the src partitionno with dest one, if not
+ * match, we use partitionno to research. See the definition of getPartitionOidFromSequence.
+ * c). When partition-open, if we have the dest partitionno, firstly we will do tryPartitionOpen, if not found, just
+ * use partitionno to research the new partoid, and retry. See the definition of PartitionOpenWithPartitionno. */
+
+/* if the current partitionno reached to MAX_PARTITION_NO, run RelationResetPartitionno to reset it. */
+#define MAX_PARTITION_NO (INT32_MAX - MAX_PARTITION_NUM)
+#define INVALID_PARTITION_NO 0
+#define PARTITIONNO_IS_VALID(partitionno) ((partitionno) > 0)
+
+extern HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapshot);
+extern Oid RelOidGetPartitionTupleid(Oid relOid);
+extern int GetCurrentPartitionNo(Oid partOid);
+extern int GetCurrentSubPartitionNo(Oid partOid);
+extern void UpdateCurrentPartitionNo(Oid partOid, int partitionno, bool inplace);
+extern void UpdateCurrentSubPartitionNo(Oid partOid, int subpartitionno);
+extern Oid GetPartOidWithPartitionno(Oid parentid, int partitionno, char parttype);
+extern Oid InvisiblePartidGetNewPartid(Oid partoid);
+extern void SetPartitionnoForPartitionState(PartitionState *partTableState);
+extern void RelationResetPartitionno(Oid relOid, LOCKMODE relationlock);
+extern int GetPartitionnoFromSequence(PartitionMap *partmap, int partseq);
+
+#define PARTITION_LOG(format, ...) \
+ do { \
+ if (module_logging_is_on(MOD_PARTITION)) { \
+ ereport(DEBUG2, (errmodule(MOD_PARTITION), errmsg(format, ##__VA_ARGS__), ignore_interrupt(true))); \
+ } \
+ } while (0)
+
+extern const uint32 PARTITION_ENHANCE_VERSION_NUM;
+#define PARTITIONNO_IN_UPGRADE (t_thrd.proc->workingVersionNum < PARTITION_ENHANCE_VERSION_NUM)
+#define PARTITIONNO_VALID_ASSERT(partitionno) \
+ Assert(PARTITIONNO_IN_UPGRADE || (!PARTITIONNO_IN_UPGRADE && (partitionno) > 0))
typedef void (*PartitionNameGetPartidCallback) (Oid partitioned_relation, const char *partition_name, Oid partId,
Oid oldPartId, char partition_type, void *callback_arg, LOCKMODE callbackobj_lockMode);
-extern void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, Oid new_part_id,
- int2vector *pkey, const oidvector *inttablespace, Datum interval,
- Datum maxValues, Datum transitionPoint, Datum reloptions, char parttype, bool partkeyexprIsNull = true,
- bool partkeyIsFunc = false);
+
+/* some pg_partition tuple info */
+struct PartitionTupleInfo {
+ int2vector* pkey;
+ oidvector* intablespace;
+ Datum interval;
+ Datum boundaries;
+ Datum transitionPoint;
+ Datum reloptions;
+ bool partkeyexprIsNull;
+ bool partkeyIsFunc;
+ int partitionno;
+ int subpartitionno;
+
+ PartitionTupleInfo()
+ {
+ pkey = NULL;
+ intablespace = NULL;
+ interval = (Datum)0;
+ boundaries = (Datum)0;
+ transitionPoint = (Datum)0;
+ reloptions = (Datum)0;
+ partkeyexprIsNull = true;
+ partkeyIsFunc = false;
+ partitionno = INVALID_PARTITION_NO;
+ subpartitionno = INVALID_PARTITION_NO;
+ }
+};
+
+extern void insertPartitionEntry(Relation pg_partition_desc, Partition new_part_desc, Oid new_part_id,
+ PartitionTupleInfo *partTupleInfo);
extern bool isPartitionedObject(Oid relid, char relkind, bool missing_ok);
extern bool isSubPartitionedObject(Oid relid, char relkind, bool missing_ok);
extern bool isPartitionObject(Oid partid, char partkind, bool missing_ok);
@@ -86,7 +204,7 @@ extern Oid indexPartGetHeapPart(Oid indexPart, bool missing_ok);
extern Oid searchPartitionIndexOid(Oid partitionedIndexid, List *pindex);
extern List *getPartitionObjectIdList(Oid relid, char relkind);
extern List* getSubPartitionObjectIdList(Oid relid);
-extern Oid partitionNameGetPartitionOid (Oid partitionedTableOid,
+extern Oid PartitionNameGetPartitionOid (Oid partitionedTableOid,
const char *partitionName,
char objectType,
LOCKMODE lockMode,
@@ -94,12 +212,21 @@ extern Oid partitionNameGetPartitionOid (Oid partitionedTableOid,
bool noWait,
PartitionNameGetPartidCallback callback,
void *callback_arg,
- LOCKMODE callbackobj_lockMode,
- Oid *partOidForSubPart = NULL);
-extern Oid partitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed,
+ LOCKMODE callbackobj_lockMode);
+extern Oid SubPartitionNameGetSubPartitionOid(Oid partitionedRelationOid,
+ const char* subpartitionName,
+ LOCKMODE partlock,
+ LOCKMODE subpartlock,
+ bool missingOk,
+ bool noWait,
+ PartitionNameGetPartidCallback callback,
+ void* callback_arg,
+ LOCKMODE callbackobj_lockMode,
+ Oid *partOidForSubPart);
+extern Oid PartitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed,
bool missingOk, bool noWait);
-extern Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList,
- LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart);
+extern Oid SubPartitionValuesGetSubPartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList,
+ LOCKMODE partlock, LOCKMODE subpartlock, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart);
extern List *searchPartitionIndexesByblid(Oid blid);
extern List *searchPgPartitionByParentId(char parttype, Oid parentId, ScanDirection direction = ForwardScanDirection);
extern List *searchPgSubPartitionByParentId(char parttype, List *parentOids,
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 76411e5fb..eef4fd69e 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -855,4 +855,13 @@ DATA(insert OID = 3272 ( anyset PGNSP PGUID -1 f s H t t \054 0 0 0 anyset_in
((typid) == BYTEAWITHOUTORDERCOLOID || \
(typid) == BYTEAWITHOUTORDERWITHEQUALCOLOID)
+#define IsBinaryType(typid) \
+ ((typid) == BLOBOID)
+
+#define IsSupportCharsetType(typid) \
+ (((typid) == TEXTOID) || \
+ ((typid) == VARCHAROID) || \
+ ((typid) == BPCHAROID) || \
+ ((typid) == NVARCHAR2OID))
+
#endif /* PG_TYPE_H */
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_666.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_666.sql
index cc8cbc519..fa4499788 100644
--- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_666.sql
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_666.sql
@@ -1,3 +1,3 @@
DROP AGGREGATE IF EXISTS pg_catalog.group_concat(text, "any") CASCADE;
-DROP FUNCTION IF EXISTS pg_catalog.group_concat_transfn(internal, text, "any") CASCADE;
+DROP FUNCTION IF EXISTS pg_catalog.group_concat_transfn(internal, text, VARIADIC "any") CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.group_concat_finalfn(internal) CASCADE;
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_799.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_799.sql
index 388781780..fb5ef3df3 100644
--- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_799.sql
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_799.sql
@@ -336,18 +336,7 @@ BEGIN
end if;
END$DO$;
-DO $$
-DECLARE
-query_str text;
-ans bool;
-BEGIN
- select case when count(*)=1 then true else false end as ans from
- (select *from pg_class where relname='snapshot_sequence' and relnamespace = 4991) into ans;
- if ans = true then
- query_str := 'DROP SEQUENCE db4ai.snapshot_sequence;';
- EXECUTE IMMEDIATE query_str;
- end if;
-END$$;DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
+DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql
new file mode 100644
index 000000000..cb4e042cc
--- /dev/null
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql
@@ -0,0 +1,624 @@
+--------------------------------------------------------------
+-- delete pg_catalog.pg_collation
+--------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.Delete_pg_collation_temp()
+RETURNS void
+AS $$
+DECLARE
+row_name record;
+query_str_nodes text;
+BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ delete from pg_catalog.pg_collation where collname in ('utf8mb4_general_ci', 'utf8mb4_unicode_ci', 'utf8mb4_bin', 'binary');
+ END LOOP;
+return;
+END;
+$$ LANGUAGE 'plpgsql';
+
+SELECT pg_catalog.Delete_pg_collation_temp();
+DROP FUNCTION pg_catalog.Delete_pg_collation_temp();
+
+DROP INDEX IF EXISTS pg_catalog.pg_collation_enc_def_index;DROP FUNCTION IF EXISTS pg_catalog.gs_validate_ext_listen_ip() cascade;DO $DO$
+DECLARE
+ ans boolean;
+ check_version boolean;
+BEGIN
+ SELECT case WHEN count(*)=1 THEN true ELSE false END AS ans FROM (SELECT nspname FROM pg_namespace WHERE nspname='dbe_perf' LIMIT 1) INTO ans;
+ IF ans = true THEN
+ DROP VIEW IF EXISTS DBE_PERF.statement_history cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.standby_statement_history(boolean);
+ DROP FUNCTION IF EXISTS DBE_PERF.standby_statement_history(boolean, timestamp with time zone[]);
+
+ SELECT case WHEN (working_version_num() >= 92606 AND working_version_num() < 90656) OR (working_version_num() >= 92827) THEN true ELSE false END INTO check_version;
+ IF check_version = true THEN
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ END IF;
+
+ check_version = false;
+ SELECT case WHEN (working_version_num() >= 92301 AND working_version_num() < 90420) THEN true ELSE false END INTO check_version;
+ IF check_version = true THEN
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ END IF;
+ END IF;
+END$DO$;
+
+DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx;
+DROP TABLE IF EXISTS pg_catalog.statement_history cascade;
+
+CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history(
+ db_name name,
+ schema_name name,
+ origin_node integer,
+ user_name name,
+ application_name text,
+ client_addr text,
+ client_port integer,
+ unique_query_id bigint,
+ debug_query_id bigint,
+ query text,
+ start_time timestamp with time zone,
+ finish_time timestamp with time zone,
+ slow_sql_threshold bigint,
+ transaction_id bigint,
+ thread_id bigint,
+ session_id bigint,
+ n_soft_parse bigint,
+ n_hard_parse bigint,
+ query_plan text,
+ n_returned_rows bigint,
+ n_tuples_fetched bigint,
+ n_tuples_returned bigint,
+ n_tuples_inserted bigint,
+ n_tuples_updated bigint,
+ n_tuples_deleted bigint,
+ n_blocks_fetched bigint,
+ n_blocks_hit bigint,
+ db_time bigint,
+ cpu_time bigint,
+ execution_time bigint,
+ parse_time bigint,
+ plan_time bigint,
+ rewrite_time bigint,
+ pl_execution_time bigint,
+ pl_compilation_time bigint,
+ data_io_time bigint,
+ net_send_info text,
+ net_recv_info text,
+ net_stream_send_info text,
+ net_stream_recv_info text,
+ lock_count bigint,
+ lock_time bigint,
+ lock_wait_count bigint,
+ lock_wait_time bigint,
+ lock_max_count bigint,
+ lwlock_count bigint,
+ lwlock_wait_count bigint,
+ lwlock_time bigint,
+ lwlock_wait_time bigint,
+ details bytea,
+ is_slow_sql boolean,
+ trace_id text
+);
+REVOKE ALL on table pg_catalog.statement_history FROM public;
+create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql);
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true then
+ CREATE VIEW DBE_PERF.statement_history AS select * from pg_catalog.statement_history;
+
+ SELECT SESSION_USER INTO username;
+ IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN
+ querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC;
+ END IF;
+ END IF;
+END$DO$;
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true THEN
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+ END IF;
+END$DO$;
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_666.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_666.sql
index cc8cbc519..fa4499788 100644
--- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_666.sql
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_666.sql
@@ -1,3 +1,3 @@
DROP AGGREGATE IF EXISTS pg_catalog.group_concat(text, "any") CASCADE;
-DROP FUNCTION IF EXISTS pg_catalog.group_concat_transfn(internal, text, "any") CASCADE;
+DROP FUNCTION IF EXISTS pg_catalog.group_concat_transfn(internal, text, VARIADIC "any") CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.group_concat_finalfn(internal) CASCADE;
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_799.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_799.sql
index 388781780..8d1e3e390 100644
--- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_799.sql
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_799.sql
@@ -335,19 +335,7 @@ BEGIN
LANGUAGE 'plpgsql' NOT FENCED;
end if;
END$DO$;
-
-DO $$
-DECLARE
-query_str text;
-ans bool;
-BEGIN
- select case when count(*)=1 then true else false end as ans from
- (select *from pg_class where relname='snapshot_sequence' and relnamespace = 4991) into ans;
- if ans = true then
- query_str := 'DROP SEQUENCE db4ai.snapshot_sequence;';
- EXECUTE IMMEDIATE query_str;
- end if;
-END$$;DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
+DROP FUNCTION IF EXISTS pg_catalog.gs_stack() CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(pid bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(OUT tid bigint, OUT lwtid bigint, OUT stack text) CASCADE;
diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql
new file mode 100644
index 000000000..cb4e042cc
--- /dev/null
+++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql
@@ -0,0 +1,624 @@
+--------------------------------------------------------------
+-- delete pg_catalog.pg_collation
+--------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.Delete_pg_collation_temp()
+RETURNS void
+AS $$
+DECLARE
+row_name record;
+query_str_nodes text;
+BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ delete from pg_catalog.pg_collation where collname in ('utf8mb4_general_ci', 'utf8mb4_unicode_ci', 'utf8mb4_bin', 'binary');
+ END LOOP;
+return;
+END;
+$$ LANGUAGE 'plpgsql';
+
+SELECT pg_catalog.Delete_pg_collation_temp();
+DROP FUNCTION pg_catalog.Delete_pg_collation_temp();
+
+DROP INDEX IF EXISTS pg_catalog.pg_collation_enc_def_index;DROP FUNCTION IF EXISTS pg_catalog.gs_validate_ext_listen_ip() cascade;DO $DO$
+DECLARE
+ ans boolean;
+ check_version boolean;
+BEGIN
+ SELECT case WHEN count(*)=1 THEN true ELSE false END AS ans FROM (SELECT nspname FROM pg_namespace WHERE nspname='dbe_perf' LIMIT 1) INTO ans;
+ IF ans = true THEN
+ DROP VIEW IF EXISTS DBE_PERF.statement_history cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.standby_statement_history(boolean);
+ DROP FUNCTION IF EXISTS DBE_PERF.standby_statement_history(boolean, timestamp with time zone[]);
+
+ SELECT case WHEN (working_version_num() >= 92606 AND working_version_num() < 90656) OR (working_version_num() >= 92827) THEN true ELSE false END INTO check_version;
+ IF check_version = true THEN
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ END IF;
+
+ check_version = false;
+ SELECT case WHEN (working_version_num() >= 92301 AND working_version_num() < 90420) THEN true ELSE false END INTO check_version;
+ IF check_version = true THEN
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ END IF;
+ END IF;
+END$DO$;
+
+DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx;
+DROP TABLE IF EXISTS pg_catalog.statement_history cascade;
+
+CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history(
+ db_name name,
+ schema_name name,
+ origin_node integer,
+ user_name name,
+ application_name text,
+ client_addr text,
+ client_port integer,
+ unique_query_id bigint,
+ debug_query_id bigint,
+ query text,
+ start_time timestamp with time zone,
+ finish_time timestamp with time zone,
+ slow_sql_threshold bigint,
+ transaction_id bigint,
+ thread_id bigint,
+ session_id bigint,
+ n_soft_parse bigint,
+ n_hard_parse bigint,
+ query_plan text,
+ n_returned_rows bigint,
+ n_tuples_fetched bigint,
+ n_tuples_returned bigint,
+ n_tuples_inserted bigint,
+ n_tuples_updated bigint,
+ n_tuples_deleted bigint,
+ n_blocks_fetched bigint,
+ n_blocks_hit bigint,
+ db_time bigint,
+ cpu_time bigint,
+ execution_time bigint,
+ parse_time bigint,
+ plan_time bigint,
+ rewrite_time bigint,
+ pl_execution_time bigint,
+ pl_compilation_time bigint,
+ data_io_time bigint,
+ net_send_info text,
+ net_recv_info text,
+ net_stream_send_info text,
+ net_stream_recv_info text,
+ lock_count bigint,
+ lock_time bigint,
+ lock_wait_count bigint,
+ lock_wait_time bigint,
+ lock_max_count bigint,
+ lwlock_count bigint,
+ lwlock_wait_count bigint,
+ lwlock_time bigint,
+ lwlock_wait_time bigint,
+ details bytea,
+ is_slow_sql boolean,
+ trace_id text
+);
+REVOKE ALL on table pg_catalog.statement_history FROM public;
+create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql);
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true then
+ CREATE VIEW DBE_PERF.statement_history AS select * from pg_catalog.statement_history;
+
+ SELECT SESSION_USER INTO username;
+ IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN
+ querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC;
+ END IF;
+ END IF;
+END$DO$;
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true THEN
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+ END IF;
+END$DO$;
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_200.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_200.sql
index e6a687dc8..cb2965220 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_200.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_200.sql
@@ -5035,7 +5035,7 @@ DECLARE
BEGIN
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
- query_str := 'select * from get_node_stat_reset_time()';
+ query_str := 'select * from pg_catalog.get_node_stat_reset_time()';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
reset_time := row_data.get_node_stat_reset_time;
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_666.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_666.sql
index 3d2a6dc7d..7474138f8 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_666.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_666.sql
@@ -1,15 +1,16 @@
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4098;
-CREATE FUNCTION pg_catalog.group_concat_finalfn(internal)
+CREATE OR REPLACE FUNCTION pg_catalog.group_concat_finalfn(internal)
RETURNS text
LANGUAGE internal
IMMUTABLE NOT FENCED NOT SHIPPABLE
AS 'group_concat_finalfn';
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4099;
-CREATE FUNCTION pg_catalog.group_concat_transfn(internal, text, VARIADIC "any")
+CREATE OR REPLACE FUNCTION pg_catalog.group_concat_transfn(internal, text, VARIADIC "any")
RETURNS internal
LANGUAGE internal
IMMUTABLE NOT FENCED NOT SHIPPABLE AS 'group_concat_transfn';
+UPDATE pg_catalog.pg_proc SET provariadic=0 WHERE oid=4099;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4097;
CREATE AGGREGATE pg_catalog.group_concat(text, "any") (SFUNC=group_concat_transfn, STYPE=internal, FINALFUNC=group_concat_finalfn);
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_799.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_799.sql
index 5b2a2bf4b..0f8c17db3 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_799.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_799.sql
@@ -382,20 +382,7 @@ BEGIN
END$DO$;
-DO $$
-DECLARE
-query_str text;
-ans bool;
-BEGIN
- select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
- if ans = false then
- query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
- EXECUTE IMMEDIATE query_str;
- end if;
- update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
- query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
- EXECUTE IMMEDIATE query_str;
-END$$;-- gs_stack_int8
+-- gs_stack_int8
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(INT8)
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_844.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_844.sql
new file mode 100644
index 000000000..a44bd4c18
--- /dev/null
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_844.sql
@@ -0,0 +1,556 @@
+DECLARE
+ SPACE_NAME VARCHAR(64);
+ REL_NAME VARCHAR(64);
+ SQL_COMMAND VARCHAR(200);
+ CURSOR C1 IS
+ SELECT n.nspname, c.relname
+ FROM pg_catalog.pg_namespace n INNER JOIN pg_catalog.pg_class c ON n.oid = c.relnamespace
+ WHERE c.relkind = 'r' AND c.parttype IN ('p', 's');
+BEGIN
+ OPEN C1;
+ LOOP
+ FETCH C1 INTO SPACE_NAME, REL_NAME;
+ EXIT WHEN C1%NOTFOUND;
+ SQL_COMMAND := 'ALTER TABLE "' || SPACE_NAME || '"."' || REL_NAME || '" RESET PARTITION;';
+ EXECUTE SQL_COMMAND;
+ END LOOP;
+ CLOSE C1;
+END;
+/
+-- ----------------------------------------------------------------
+-- upgrade pg_catalog.pg_collation
+-- ----------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.Insert_pg_collation_temp(
+IN collname text,
+IN collnamespace integer,
+IN collowner integer,
+IN collencoding integer,
+IN collcollate text,
+IN collctype text,
+IN collpadattr text,
+IN collisdef bool
+)
+RETURNS void
+AS $$
+DECLARE
+ row_name record;
+ query_str_nodes text;
+BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ insert into pg_catalog.pg_collation values (collname, collnamespace, collowner, collencoding, collcollate, collctype, collpadattr, collisdef);
+ END LOOP;
+ return;
+END; $$
+LANGUAGE 'plpgsql';
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1026;
+select pg_catalog.Insert_pg_collation_temp('binary', 11, 10, 0, 'binary', 'binary', 'NO PAD', true);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1537;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_general_ci', 11, 10, 7, 'utf8mb4_general_ci', 'utf8mb4_general_ci', 'PAD SPACE', true);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1538;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_unicode_ci', 11, 10, 7, 'utf8mb4_unicode_ci', 'utf8mb4_unicode_ci', 'PAD SPACE', null);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1539;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_bin', 11, 10, 7, 'utf8mb4_bin', 'utf8mb4_bin', 'PAD SPACE', null);
+
+DROP FUNCTION pg_catalog.Insert_pg_collation_temp;
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3147;
+CREATE UNIQUE INDEX pg_collation_enc_def_index ON pg_catalog.pg_collation USING BTREE(collencoding INT4_OPS, collisdef BOOL_OPS);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5169;
+CREATE OR REPLACE FUNCTION pg_catalog.gs_validate_ext_listen_ip(clear cstring, validate_node_name cstring, validate_ip cstring, OUT pid bigint, OUT node_name text)
+RETURNS SETOF record
+LANGUAGE internal
+STABLE STRICT NOT FENCED NOT SHIPPABLE ROWS 100
+AS 'gs_validate_ext_listen_ip';DO $DO$
+DECLARE
+ ans boolean;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ if ans = true then
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP VIEW IF EXISTS DBE_PERF.statement_history cascade;
+ end if;
+END$DO$;
+
+DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx;
+DROP TABLE IF EXISTS pg_catalog.statement_history cascade;
+
+CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history(
+ db_name name,
+ schema_name name,
+ origin_node integer,
+ user_name name,
+ application_name text,
+ client_addr text,
+ client_port integer,
+ unique_query_id bigint,
+ debug_query_id bigint,
+ query text,
+ start_time timestamp with time zone,
+ finish_time timestamp with time zone,
+ slow_sql_threshold bigint,
+ transaction_id bigint,
+ thread_id bigint,
+ session_id bigint,
+ n_soft_parse bigint,
+ n_hard_parse bigint,
+ query_plan text,
+ n_returned_rows bigint,
+ n_tuples_fetched bigint,
+ n_tuples_returned bigint,
+ n_tuples_inserted bigint,
+ n_tuples_updated bigint,
+ n_tuples_deleted bigint,
+ n_blocks_fetched bigint,
+ n_blocks_hit bigint,
+ db_time bigint,
+ cpu_time bigint,
+ execution_time bigint,
+ parse_time bigint,
+ plan_time bigint,
+ rewrite_time bigint,
+ pl_execution_time bigint,
+ pl_compilation_time bigint,
+ data_io_time bigint,
+ net_send_info text,
+ net_recv_info text,
+ net_stream_send_info text,
+ net_stream_recv_info text,
+ lock_count bigint,
+ lock_time bigint,
+ lock_wait_count bigint,
+ lock_wait_time bigint,
+ lock_max_count bigint,
+ lwlock_count bigint,
+ lwlock_wait_count bigint,
+ lwlock_time bigint,
+ lwlock_wait_time bigint,
+ details bytea,
+ is_slow_sql boolean,
+ trace_id text,
+ advise text
+);
+REVOKE ALL on table pg_catalog.statement_history FROM public;
+create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql);
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true then
+ CREATE VIEW DBE_PERF.statement_history AS select * from pg_catalog.statement_history;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ advise := row_data.advise;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ advise := row_data.advise;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ SELECT SESSION_USER INTO username;
+ IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN
+ querystr := 'REVOKE ALL ON TABLE dbe_perf.statement_history FROM ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'REVOKE ALL ON TABLE pg_catalog.statement_history FROM ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE dbe_perf.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC;
+ END IF;
+ end if;
+END$DO$;
+
+DO $DO$
+DECLARE
+ ans boolean;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ if ans = true then
+ DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean);
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]);
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ end if;
+END$DO$;
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_366.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_366.sql
index e27b51141..eef1e2f15 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_366.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_366.sql
@@ -10,8 +10,8 @@ CREATE TABLE IF NOT EXISTS pg_catalog.gs_model_warehouse
createtime timestamp with time zone NOCOMPRESS NOT NULL,
processedtuples int4 NOCOMPRESS NOT NULL,
discardedtuples int4 NOCOMPRESS NOT NULL,
- pre_process_time float4 NOCOMPRESS NOT NULL,
- exec_time float4 NOCOMPRESS NOT NULL,
+ preprocesstime float4 NOCOMPRESS NOT NULL,
+ exectime float4 NOCOMPRESS NOT NULL,
iterations int4 NOCOMPRESS NOT NULL,
outputtype Oid NOCOMPRESS NOT NULL,
modeltype text,
@@ -27,7 +27,7 @@ CREATE TABLE IF NOT EXISTS pg_catalog.gs_model_warehouse
trainingscoresname text[1],
trainingscoresvalue float4[1],
modeldescribe text[1]
-)WITH OIDS TABLESPACE pg_default;
+)WITH OIDS;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3992;
CREATE UNIQUE INDEX gs_model_oid_index ON pg_catalog.gs_model_warehouse USING BTREE(oid OID_OPS);
@@ -44,6 +44,22 @@ CREATE SCHEMA db4ai;
COMMENT ON schema db4ai IS 'db4ai schema';
GRANT USAGE ON SCHEMA db4ai TO PUBLIC;
CREATE TYPE db4ai.snapshot_name AS ("schema" NAME, "name" NAME);
+
+DO $$
+DECLARE
+query_str text;
+ans bool;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
+ if ans = false then
+ query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
+ EXECUTE IMMEDIATE query_str;
+ end if;
+ update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
+ query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
+ EXECUTE IMMEDIATE query_str;
+END$$;
+
CREATE TABLE IF NOT EXISTS db4ai.snapshot
(
id BIGINT UNIQUE, -- snapshot id (surrogate key)
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_200.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_200.sql
index e6a687dc8..cb2965220 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_200.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_200.sql
@@ -5035,7 +5035,7 @@ DECLARE
BEGIN
query_str_nodes := 'select * from dbe_perf.node_name';
FOR row_name IN EXECUTE(query_str_nodes) LOOP
- query_str := 'select * from get_node_stat_reset_time()';
+ query_str := 'select * from pg_catalog.get_node_stat_reset_time()';
FOR row_data IN EXECUTE(query_str) LOOP
node_name := row_name.node_name;
reset_time := row_data.get_node_stat_reset_time;
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_666.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_666.sql
index 3d2a6dc7d..7474138f8 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_666.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_666.sql
@@ -1,15 +1,16 @@
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4098;
-CREATE FUNCTION pg_catalog.group_concat_finalfn(internal)
+CREATE OR REPLACE FUNCTION pg_catalog.group_concat_finalfn(internal)
RETURNS text
LANGUAGE internal
IMMUTABLE NOT FENCED NOT SHIPPABLE
AS 'group_concat_finalfn';
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4099;
-CREATE FUNCTION pg_catalog.group_concat_transfn(internal, text, VARIADIC "any")
+CREATE OR REPLACE FUNCTION pg_catalog.group_concat_transfn(internal, text, VARIADIC "any")
RETURNS internal
LANGUAGE internal
IMMUTABLE NOT FENCED NOT SHIPPABLE AS 'group_concat_transfn';
+UPDATE pg_catalog.pg_proc SET provariadic=0 WHERE oid=4099;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4097;
CREATE AGGREGATE pg_catalog.group_concat(text, "any") (SFUNC=group_concat_transfn, STYPE=internal, FINALFUNC=group_concat_finalfn);
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_799.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_799.sql
index 5b2a2bf4b..0f8c17db3 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_799.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_799.sql
@@ -382,20 +382,7 @@ BEGIN
END$DO$;
-DO $$
-DECLARE
-query_str text;
-ans bool;
-BEGIN
- select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
- if ans = false then
- query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
- EXECUTE IMMEDIATE query_str;
- end if;
- update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
- query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
- EXECUTE IMMEDIATE query_str;
-END$$;-- gs_stack_int8
+-- gs_stack_int8
DROP FUNCTION IF EXISTS pg_catalog.gs_stack(INT8) CASCADE;
SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 9997;
CREATE OR REPLACE FUNCTION pg_catalog.gs_stack(INT8)
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_844.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_844.sql
new file mode 100644
index 000000000..a44bd4c18
--- /dev/null
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_844.sql
@@ -0,0 +1,556 @@
+DECLARE
+ SPACE_NAME VARCHAR(64);
+ REL_NAME VARCHAR(64);
+ SQL_COMMAND VARCHAR(200);
+ CURSOR C1 IS
+ SELECT n.nspname, c.relname
+ FROM pg_catalog.pg_namespace n INNER JOIN pg_catalog.pg_class c ON n.oid = c.relnamespace
+ WHERE c.relkind = 'r' AND c.parttype IN ('p', 's');
+BEGIN
+ OPEN C1;
+ LOOP
+ FETCH C1 INTO SPACE_NAME, REL_NAME;
+ EXIT WHEN C1%NOTFOUND;
+ SQL_COMMAND := 'ALTER TABLE "' || SPACE_NAME || '"."' || REL_NAME || '" RESET PARTITION;';
+ EXECUTE SQL_COMMAND;
+ END LOOP;
+ CLOSE C1;
+END;
+/
+-- ----------------------------------------------------------------
+-- upgrade pg_catalog.pg_collation
+-- ----------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.Insert_pg_collation_temp(
+IN collname text,
+IN collnamespace integer,
+IN collowner integer,
+IN collencoding integer,
+IN collcollate text,
+IN collctype text,
+IN collpadattr text,
+IN collisdef bool
+)
+RETURNS void
+AS $$
+DECLARE
+ row_name record;
+ query_str_nodes text;
+BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ insert into pg_catalog.pg_collation values (collname, collnamespace, collowner, collencoding, collcollate, collctype, collpadattr, collisdef);
+ END LOOP;
+ return;
+END; $$
+LANGUAGE 'plpgsql';
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1026;
+select pg_catalog.Insert_pg_collation_temp('binary', 11, 10, 0, 'binary', 'binary', 'NO PAD', true);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1537;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_general_ci', 11, 10, 7, 'utf8mb4_general_ci', 'utf8mb4_general_ci', 'PAD SPACE', true);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1538;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_unicode_ci', 11, 10, 7, 'utf8mb4_unicode_ci', 'utf8mb4_unicode_ci', 'PAD SPACE', null);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 1539;
+select pg_catalog.Insert_pg_collation_temp('utf8mb4_bin', 11, 10, 7, 'utf8mb4_bin', 'utf8mb4_bin', 'PAD SPACE', null);
+
+DROP FUNCTION pg_catalog.Insert_pg_collation_temp;
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3147;
+CREATE UNIQUE INDEX pg_collation_enc_def_index ON pg_catalog.pg_collation USING BTREE(collencoding INT4_OPS, collisdef BOOL_OPS);
+
+SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5169;
+CREATE OR REPLACE FUNCTION pg_catalog.gs_validate_ext_listen_ip(clear cstring, validate_node_name cstring, validate_ip cstring, OUT pid bigint, OUT node_name text)
+RETURNS SETOF record
+LANGUAGE internal
+STABLE STRICT NOT FENCED NOT SHIPPABLE ROWS 100
+AS 'gs_validate_ext_listen_ip';DO $DO$
+DECLARE
+ ans boolean;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ if ans = true then
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp(timestamp with time zone, timestamp with time zone) cascade;
+ DROP VIEW IF EXISTS DBE_PERF.statement_history cascade;
+ end if;
+END$DO$;
+
+DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx;
+DROP TABLE IF EXISTS pg_catalog.statement_history cascade;
+
+CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history(
+ db_name name,
+ schema_name name,
+ origin_node integer,
+ user_name name,
+ application_name text,
+ client_addr text,
+ client_port integer,
+ unique_query_id bigint,
+ debug_query_id bigint,
+ query text,
+ start_time timestamp with time zone,
+ finish_time timestamp with time zone,
+ slow_sql_threshold bigint,
+ transaction_id bigint,
+ thread_id bigint,
+ session_id bigint,
+ n_soft_parse bigint,
+ n_hard_parse bigint,
+ query_plan text,
+ n_returned_rows bigint,
+ n_tuples_fetched bigint,
+ n_tuples_returned bigint,
+ n_tuples_inserted bigint,
+ n_tuples_updated bigint,
+ n_tuples_deleted bigint,
+ n_blocks_fetched bigint,
+ n_blocks_hit bigint,
+ db_time bigint,
+ cpu_time bigint,
+ execution_time bigint,
+ parse_time bigint,
+ plan_time bigint,
+ rewrite_time bigint,
+ pl_execution_time bigint,
+ pl_compilation_time bigint,
+ data_io_time bigint,
+ net_send_info text,
+ net_recv_info text,
+ net_stream_send_info text,
+ net_stream_recv_info text,
+ lock_count bigint,
+ lock_time bigint,
+ lock_wait_count bigint,
+ lock_wait_time bigint,
+ lock_max_count bigint,
+ lwlock_count bigint,
+ lwlock_wait_count bigint,
+ lwlock_time bigint,
+ lwlock_wait_time bigint,
+ details bytea,
+ is_slow_sql boolean,
+ trace_id text,
+ advise text
+);
+REVOKE ALL on table pg_catalog.statement_history FROM public;
+create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql);
+
+DO $DO$
+DECLARE
+ ans boolean;
+ username text;
+ querystr text;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ IF ans = true then
+ CREATE VIEW DBE_PERF.statement_history AS select * from pg_catalog.statement_history;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || '''';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ advise := row_data.advise;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp
+ (in start_timestamp timestamp with time zone,
+ in end_timestamp timestamp with time zone,
+ OUT node_name name,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS setof record
+ AS $$
+ DECLARE
+ row_data pg_catalog.statement_history%rowtype;
+ row_name record;
+ query_str text;
+ query_str_nodes text;
+ BEGIN
+ query_str_nodes := 'select * from dbe_perf.node_name';
+ FOR row_name IN EXECUTE(query_str_nodes) LOOP
+ query_str := 'SELECT * FROM DBE_PERF.statement_history where start_time >= ''' ||$1|| ''' and start_time <= ''' || $2 || ''' and is_slow_sql = true ';
+ FOR row_data IN EXECUTE(query_str) LOOP
+ node_name := row_name.node_name;
+ db_name := row_data.db_name;
+ schema_name := row_data.schema_name;
+ origin_node := row_data.origin_node;
+ user_name := row_data.user_name;
+ application_name := row_data.application_name;
+ client_addr := row_data.client_addr;
+ client_port := row_data.client_port;
+ unique_query_id := row_data.unique_query_id;
+ debug_query_id := row_data.debug_query_id;
+ query := row_data.query;
+ start_time := row_data.start_time;
+ finish_time := row_data.finish_time;
+ slow_sql_threshold := row_data.slow_sql_threshold;
+ transaction_id := row_data.transaction_id;
+ thread_id := row_data.thread_id;
+ session_id := row_data.session_id;
+ n_soft_parse := row_data.n_soft_parse;
+ n_hard_parse := row_data.n_hard_parse;
+ query_plan := row_data.query_plan;
+ n_returned_rows := row_data.n_returned_rows;
+ n_tuples_fetched := row_data.n_tuples_fetched;
+ n_tuples_returned := row_data.n_tuples_returned;
+ n_tuples_inserted := row_data.n_tuples_inserted;
+ n_tuples_updated := row_data.n_tuples_updated;
+ n_tuples_deleted := row_data.n_tuples_deleted;
+ n_blocks_fetched := row_data.n_blocks_fetched;
+ n_blocks_hit := row_data.n_blocks_hit;
+ db_time := row_data.db_time;
+ cpu_time := row_data.cpu_time;
+ execution_time := row_data.execution_time;
+ parse_time := row_data.parse_time;
+ plan_time := row_data.plan_time;
+ rewrite_time := row_data.rewrite_time;
+ pl_execution_time := row_data.pl_execution_time;
+ pl_compilation_time := row_data.pl_compilation_time;
+ data_io_time := row_data.data_io_time;
+ net_send_info := row_data.net_send_info;
+ net_recv_info := row_data.net_recv_info;
+ net_stream_send_info := row_data.net_stream_send_info;
+ net_stream_recv_info := row_data.net_stream_recv_info;
+ lock_count := row_data.lock_count;
+ lock_time := row_data.lock_time;
+ lock_wait_count := row_data.lock_wait_count;
+ lock_wait_time := row_data.lock_wait_time;
+ lock_max_count := row_data.lock_max_count;
+ lwlock_count := row_data.lwlock_count;
+ lwlock_wait_count := row_data.lwlock_wait_count;
+ lwlock_time := row_data.lwlock_time;
+ lwlock_wait_time := row_data.lwlock_wait_time;
+ details := row_data.details;
+ is_slow_sql := row_data.is_slow_sql;
+ trace_id := row_data.trace_id;
+ advise := row_data.advise;
+ return next;
+ END LOOP;
+ END LOOP;
+ return;
+ END; $$
+ LANGUAGE 'plpgsql' NOT FENCED;
+
+ SELECT SESSION_USER INTO username;
+ IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN
+ querystr := 'REVOKE ALL ON TABLE dbe_perf.statement_history FROM ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'REVOKE ALL ON TABLE pg_catalog.statement_history FROM ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE dbe_perf.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';';
+ EXECUTE IMMEDIATE querystr;
+ GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC;
+ END IF;
+ end if;
+END$DO$;
+
+DO $DO$
+DECLARE
+ ans boolean;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans;
+ if ans = true then
+ DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean);
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3118;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history_1v$function$;
+
+ DROP FUNCTION IF EXISTS dbe_perf.standby_statement_history(boolean, timestamp with time zone[]);
+ SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3119;
+ CREATE OR REPLACE FUNCTION dbe_perf.standby_statement_history(
+ IN only_slow boolean,
+ VARIADIC finish_time timestamp with time zone[],
+ OUT db_name name,
+ OUT schema_name name,
+ OUT origin_node integer,
+ OUT user_name name,
+ OUT application_name text,
+ OUT client_addr text,
+ OUT client_port integer,
+ OUT unique_query_id bigint,
+ OUT debug_query_id bigint,
+ OUT query text,
+ OUT start_time timestamp with time zone,
+ OUT finish_time timestamp with time zone,
+ OUT slow_sql_threshold bigint,
+ OUT transaction_id bigint,
+ OUT thread_id bigint,
+ OUT session_id bigint,
+ OUT n_soft_parse bigint,
+ OUT n_hard_parse bigint,
+ OUT query_plan text,
+ OUT n_returned_rows bigint,
+ OUT n_tuples_fetched bigint,
+ OUT n_tuples_returned bigint,
+ OUT n_tuples_inserted bigint,
+ OUT n_tuples_updated bigint,
+ OUT n_tuples_deleted bigint,
+ OUT n_blocks_fetched bigint,
+ OUT n_blocks_hit bigint,
+ OUT db_time bigint,
+ OUT cpu_time bigint,
+ OUT execution_time bigint,
+ OUT parse_time bigint,
+ OUT plan_time bigint,
+ OUT rewrite_time bigint,
+ OUT pl_execution_time bigint,
+ OUT pl_compilation_time bigint,
+ OUT data_io_time bigint,
+ OUT net_send_info text,
+ OUT net_recv_info text,
+ OUT net_stream_send_info text,
+ OUT net_stream_recv_info text,
+ OUT lock_count bigint,
+ OUT lock_time bigint,
+ OUT lock_wait_count bigint,
+ OUT lock_wait_time bigint,
+ OUT lock_max_count bigint,
+ OUT lwlock_count bigint,
+ OUT lwlock_wait_count bigint,
+ OUT lwlock_time bigint,
+ OUT lwlock_wait_time bigint,
+ OUT details bytea,
+ OUT is_slow_sql boolean,
+ OUT trace_id text,
+ OUT advise text)
+ RETURNS SETOF record NOT FENCED NOT SHIPPABLE ROWS 10000
+ LANGUAGE internal AS $function$standby_statement_history$function$;
+ end if;
+END$DO$;
diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_366.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_366.sql
index e27b51141..eef1e2f15 100644
--- a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_366.sql
+++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_366.sql
@@ -10,8 +10,8 @@ CREATE TABLE IF NOT EXISTS pg_catalog.gs_model_warehouse
createtime timestamp with time zone NOCOMPRESS NOT NULL,
processedtuples int4 NOCOMPRESS NOT NULL,
discardedtuples int4 NOCOMPRESS NOT NULL,
- pre_process_time float4 NOCOMPRESS NOT NULL,
- exec_time float4 NOCOMPRESS NOT NULL,
+ preprocesstime float4 NOCOMPRESS NOT NULL,
+ exectime float4 NOCOMPRESS NOT NULL,
iterations int4 NOCOMPRESS NOT NULL,
outputtype Oid NOCOMPRESS NOT NULL,
modeltype text,
@@ -27,7 +27,7 @@ CREATE TABLE IF NOT EXISTS pg_catalog.gs_model_warehouse
trainingscoresname text[1],
trainingscoresvalue float4[1],
modeldescribe text[1]
-)WITH OIDS TABLESPACE pg_default;
+)WITH OIDS;
SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3992;
CREATE UNIQUE INDEX gs_model_oid_index ON pg_catalog.gs_model_warehouse USING BTREE(oid OID_OPS);
@@ -44,6 +44,22 @@ CREATE SCHEMA db4ai;
COMMENT ON schema db4ai IS 'db4ai schema';
GRANT USAGE ON SCHEMA db4ai TO PUBLIC;
CREATE TYPE db4ai.snapshot_name AS ("schema" NAME, "name" NAME);
+
+DO $$
+DECLARE
+query_str text;
+ans bool;
+BEGIN
+ select case when count(*)=1 then true else false end as ans from (select *from pg_class where relname='snapshot_sequence') into ans;
+ if ans = false then
+ query_str := 'CREATE SEQUENCE db4ai.snapshot_sequence;';
+ EXECUTE IMMEDIATE query_str;
+ end if;
+ update pg_class set relacl = null where relname = 'snapshot_sequence' and relnamespace = 4991;
+ query_str := 'GRANT UPDATE ON db4ai.snapshot_sequence TO PUBLIC;';
+ EXECUTE IMMEDIATE query_str;
+END$$;
+
CREATE TABLE IF NOT EXISTS db4ai.snapshot
(
id BIGINT UNIQUE, -- snapshot id (surrogate key)
diff --git a/src/include/client_logic/client_logic.h b/src/include/client_logic/client_logic.h
index 04c6ab629..3a9f142be 100644
--- a/src/include/client_logic/client_logic.h
+++ b/src/include/client_logic/client_logic.h
@@ -54,11 +54,16 @@ void insert_gs_sec_encrypted_column_tuple(CeHeapInfo *ce_heap_info, Relation rel
bool is_exist_encrypted_column(const ObjectAddresses *targetObjects);
bool is_enc_type(Oid type_oid);
bool is_enc_type(const char *type_name);
+bool is_full_encrypted_rel(Relation rel);
ClientLogicColumnRef *get_column_enc_def(Oid rel_oid, const char *col_name);
+bool IsFullEncryptedRel(char* objSchema, char* objName);
+bool IsFuncProcOnEncryptedRel(char* objSchema, char* objName);
/* Get description functions */
void get_global_setting_description(StringInfo buffer, const ObjectAddress* object);
void get_column_setting_description(StringInfo buffer, const ObjectAddress* object);
void get_cached_column_description(StringInfo buffer, const ObjectAddress* object);
void get_global_setting_args_description(StringInfo buffer, const ObjectAddress* object);
void get_column_setting_args_description(StringInfo buffer, const ObjectAddress* object);
+const char *get_typename_by_id(Oid typeOid);
+const char *get_encryption_type_name(EncryptionType algorithm_type);
extern Datum get_client_info(PG_FUNCTION_ARGS);
\ No newline at end of file
diff --git a/src/include/commands/copy.h b/src/include/commands/copy.h
index 08e403ece..06549ada6 100644
--- a/src/include/commands/copy.h
+++ b/src/include/commands/copy.h
@@ -360,6 +360,10 @@ typedef struct CopyStateData {
LedgerHashState hashstate;
bool is_load_copy;
bool is_useeof;
+ bool is_dumpfile;
+ char* o_enclosed;
+ char* enclosed;
+ char* line_start;
} CopyStateData;
typedef struct InsertCopyLogInfoData {
@@ -380,8 +384,8 @@ typedef struct InsertCopyLogInfoData* LogInsertState;
#define IS_TEXT(cstate) ((cstate)->fileformat == FORMAT_TEXT)
#define IS_REMOTEWRITE(cstate) ((cstate)->fileformat == FORMAT_WRITABLE)
-CopyState BeginCopyTo(
- Relation rel, Node* query, const char* queryString, const char* filename, List* attnamelist, List* options);
+CopyState BeginCopyTo(Relation rel, Node* query, const char* queryString,
+ const char* filename, List* attnamelist, List* options, CopyFileType filetype = S_COPYFILE);
void EndCopyTo(CopyState cstate);
uint64 DoCopyTo(CopyState cstate);
extern uint64 DoCopy(CopyStmt* stmt, const char* queryString);
@@ -389,6 +393,7 @@ template
void CopySendEndOfRow(CopyState cstate);
void CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum* values, const bool* nulls);
+extern void ProcessFileOptions(CopyState cstate, bool is_from, List* options, bool is_dumpfile);
extern void ProcessCopyOptions(CopyState cstate, bool is_from, List* options);
extern bool IsTypeAcceptEmptyStr(Oid typeOid);
extern CopyState BeginCopyFrom(Relation rel, const char* filename, List* attnamelist,
diff --git a/src/include/commands/dbcommands.h b/src/include/commands/dbcommands.h
index f8848e808..561eb76e4 100644
--- a/src/include/commands/dbcommands.h
+++ b/src/include/commands/dbcommands.h
@@ -55,7 +55,7 @@ extern void RenameDatabase(const char* oldname, const char* newname);
extern void AlterDatabase(AlterDatabaseStmt* stmt, bool isTopLevel);
extern void AlterDatabaseSet(AlterDatabaseSetStmt* stmt);
extern void AlterDatabaseOwner(const char* dbname, Oid newOwnerId);
-
+extern void AlterDatabasePermissionCheck(Oid dboid, const char* dbname);
extern Oid get_database_oid(const char* dbname, bool missingok);
extern char* get_database_name(Oid dbid);
extern char* get_and_check_db_name(Oid dbid, bool is_ereport = false);
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 6336802b9..bdb680407 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -23,7 +23,7 @@ extern void RemoveObjects(DropStmt* stmt, bool missing_ok, bool is_securityadmin
/* commands/indexcmds.c */
extern Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_alter_table, bool check_rights,
- bool skip_build, bool quiet);
+ bool skip_build, bool quiet, bool is_modify_primary = false);
extern void ReindexIndex(RangeVar* indexRelation, const char* partition_name, AdaptMem* mem_info, bool concurrent);
extern void ReindexTable(RangeVar* relation, const char* partition_name, AdaptMem* mem_info, bool concurrent);
extern void ReindexInternal(RangeVar* relation, const char* partition_name);
@@ -198,6 +198,11 @@ extern Oid GetFunctionNodeGroup(CreateFunctionStmt* stmt, bool* multi_group);
extern Oid GetFunctionNodeGroupByFuncid(Oid funcid);
extern Oid GetFunctionNodeGroup(AlterFunctionStmt* stmt);
+/* commands/eventcmds.c */
+extern void CreateEventCommand(CreateEventStmt* stmt);
+extern void AlterEventCommand(AlterEventStmt* stmt);
+extern void DropEventCommand(DropEventStmt* stmt);
+
#endif /* !FRONTEND_PARSER */
extern DefElem* defWithOids(bool value);
#endif /* DEFREM_H */
diff --git a/src/include/commands/sqladvisor.h b/src/include/commands/sqladvisor.h
index 0bc67bb32..3e50739cf 100644
--- a/src/include/commands/sqladvisor.h
+++ b/src/include/commands/sqladvisor.h
@@ -278,5 +278,5 @@ extern Datum analyze_workload(PG_FUNCTION_ARGS);
extern bool checkSelectIntoParse(SelectStmt* stmt);
extern PLpgSQL_datum* copypPlpgsqlDatum(PLpgSQL_datum* datum);
extern StmtResult *execute_stmt(const char *query_string, bool need_result = false);
-
+extern StmtResult *execute_select_into_varlist(Query* parsetree);
#endif /* SQLADVISOR_H */
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index 83136b7f7..ebad88083 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -138,6 +138,8 @@ extern void clearAttrInitDefVal(Oid relid);
extern void ATMatviewGroup(List* stmts, Oid mvid, LOCKMODE lockmode);
extern void AlterCreateChainTables(Oid relOid, Datum reloptions, CreateStmt *mainTblStmt);
+extern void CheckAutoIncrementDatatype(Oid typid, const char* colname);
+
/**
* @Description: Whether judge the column is partition column.
* @in rel, A relation.
diff --git a/src/include/commands/user.h b/src/include/commands/user.h
index bee6b5479..38ef84643 100644
--- a/src/include/commands/user.h
+++ b/src/include/commands/user.h
@@ -79,6 +79,8 @@ int64 SearchAllAccounts();
void InitAccountLockHashTable();
extern USER_STATUS GetAccountLockedStatusFromHashTable(Oid roleid);
extern void UpdateAccountInfoFromHashTable();
+extern bool have_createrole_privilege(void);
+extern bool IsReservedRoleName(const char* name);
extern inline void str_reset(char* str)
{
diff --git a/src/include/distributelayer/streamCore.h b/src/include/distributelayer/streamCore.h
index dc9e28181..f356afd3c 100755
--- a/src/include/distributelayer/streamCore.h
+++ b/src/include/distributelayer/streamCore.h
@@ -143,6 +143,7 @@ enum EdataWriteStatus {
typedef struct TupleVector {
TupleTableSlot** tupleVector;
int tuplePointer;
+ int tupleCount;
} TupleVector;
typedef struct StreamSharedContext {
@@ -302,7 +303,7 @@ public:
/* Synchronize quit. */
static void syncQuit(StreamObjStatus status);
- static void ReleaseStreamGroup(bool resetSession);
+ static void ReleaseStreamGroup(bool resetSession, StreamObjStatus status = STREAM_COMPLETE);
/* Grant stream connect permission. */
static void grantStreamConnectPermission();
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
old mode 100644
new mode 100755
index 919ad9d98..e76db0373
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -177,13 +177,14 @@ ScanState* search_plan_tree(PlanState* node, Oid table_oid);
* prototypes from functions in execGrouping.c
*/
extern bool execTuplesMatch(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols, AttrNumber* matchColIdx,
- FmgrInfo* eqfunctions, MemoryContext evalContext);
+ FmgrInfo* eqfunctions, MemoryContext evalContext, Oid *collations);
extern bool execTuplesUnequal(TupleTableSlot* slot1, TupleTableSlot* slot2, int numCols, AttrNumber* matchColIdx,
- FmgrInfo* eqfunctions, MemoryContext evalContext);
+ FmgrInfo* eqfunctions, MemoryContext evalContext, Oid *collations);
extern FmgrInfo* execTuplesMatchPrepare(int numCols, Oid* eqOperators);
extern void execTuplesHashPrepare(int numCols, Oid* eqOperators, FmgrInfo** eqFunctions, FmgrInfo** hashFunctions);
extern TupleHashTable BuildTupleHashTable(int numCols, AttrNumber* keyColIdx, FmgrInfo* eqfunctions,
- FmgrInfo* hashfunctions, long nbuckets, Size entrysize, MemoryContext tablecxt, MemoryContext tempcxt, int workMem);
+ FmgrInfo* hashfunctions, long nbuckets, Size entrysize, MemoryContext tablecxt, MemoryContext tempcxt, int workMem,
+ Oid *collations = NULL);
extern TupleHashEntry LookupTupleHashEntry(
TupleHashTable hashtable, TupleTableSlot* slot, bool* isnew, bool isinserthashtbl = true);
extern TupleHashEntry FindTupleHashEntry(
@@ -312,6 +313,7 @@ extern Tuplestorestate* ExecMakeTableFunctionResult(
extern Datum ExecEvalExprSwitchContext(
ExprState* expression, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone);
extern ExprState* ExecInitExpr(Expr* node, PlanState* parent);
+extern List* ExecInitExprList(List* nodes, PlanState *parent);
extern ExprState* ExecPrepareExpr(Expr* node, EState* estate);
extern bool ExecQual(List* qual, ExprContext* econtext, bool resultForNull);
extern int ExecTargetListLength(List* targetlist);
@@ -443,6 +445,22 @@ static inline RangeTblEntry *exec_rt_fetch(Index rti, EState *estate)
return (RangeTblEntry *)list_nth(estate->es_range_table, rti - 1);
}
+static inline int128 datum2autoinc(ConstrAutoInc *cons_autoinc, Datum datum)
+{
+ if (cons_autoinc->datum2autoinc_func != NULL) {
+ return DatumGetInt128(DirectFunctionCall1((PGFunction)(uintptr_t)cons_autoinc->datum2autoinc_func, datum));
+ }
+ return DatumGetInt128(datum);
+}
+
+static inline Datum autoinc2datum(ConstrAutoInc *cons_autoinc, int128 autoinc)
+{
+ if (cons_autoinc->autoinc2datum_func != NULL) {
+ return DirectFunctionCall1((PGFunction)(uintptr_t)cons_autoinc->autoinc2datum_func, Int128GetDatum(autoinc));
+ }
+ return Int128GetDatum(autoinc);
+}
+
extern Partition ExecOpenScanParitition(
EState* estate, Relation parent, PartitionIdentifier* partID, LOCKMODE lockmode);
@@ -634,6 +652,13 @@ public:
}
}
+ void ResetSmp()
+ {
+ if (u_sess != NULL) {
+ u_sess->opt_cxt.smp_enabled = m_smpEnabled;
+ }
+ }
+
private:
bool m_smpEnabled;
};
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 347694ce8..aa9379c6b 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -185,6 +185,7 @@ typedef struct HashJoinTableData {
int spreadNum; /* auto spread times */
int64* spill_size;
uint64 spill_count; /* times of spilling to disk */
+ Oid *collations;
} HashJoinTableData;
#endif /* HASHJOIN_H */
diff --git a/src/include/executor/node/nodeHash.h b/src/include/executor/node/nodeHash.h
index 00165fadd..94768b570 100644
--- a/src/include/executor/node/nodeHash.h
+++ b/src/include/executor/node/nodeHash.h
@@ -25,7 +25,7 @@ extern Node* MultiExecHash(HashState* node);
extern void ExecEndHash(HashState* node);
extern void ExecReScanHash(HashState* node);
-extern HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNulls);
+extern HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNulls, List *hash_collations);
extern void ExecHashTableDestroy(HashJoinTable hashtable);
extern void ExecHashTableInsert(HashJoinTable hashtable, TupleTableSlot* slot, uint32 hashvalue, int planid, int dop,
Instrumentation* instrument = NULL);
diff --git a/src/include/instruments/instr_statement.h b/src/include/instruments/instr_statement.h
index e6ad25c5e..49161d897 100644
--- a/src/include/instruments/instr_statement.h
+++ b/src/include/instruments/instr_statement.h
@@ -120,6 +120,14 @@ typedef enum {
// type, timestamp, lwlockId, lockmode
#define LWLOCK_RELEASE_START_DETAIL_BUFSIZE 15
+/* ----------
+ * Flags for CAUSE TYPE
+ * ----------
+ */
+#define NUM_F_TYPECASTING (1 << 1) /* cast function exists */
+#define NUM_F_LIMIT (1 << 2) /* limit to much rows */
+#define NUM_F_LEAKPROOF (1 << 3) /* proleakproof of function is false */
+
#define INVALID_DETAIL_BUFSIZE 0
#define STATEMENT_DETAIL_BUF_MULTI 10
@@ -190,6 +198,7 @@ typedef struct StatementStatContext {
uint64 plan_size;
LockSummaryStat lock_summary;
StatementDetail details;
+ uint32 cause_type; /* possible Slow SQL risks */
/* wait events */
WaitEventEntry *wait_events;
@@ -228,6 +237,9 @@ extern void instr_stmt_set_wait_events_bitmap(uint32 class_id, uint32 event_id);
extern void instr_stmt_copy_wait_events();
extern void instr_stmt_diff_wait_events();
extern void init_full_sql_wait_events();
+extern void instr_stmt_report_cause_type(uint32 type);
+extern bool instr_stmt_plan_need_report_cause_type();
+extern uint32 instr_stmt_plan_get_cause_type();
#endif
diff --git a/src/include/knl/knl_guc/knl_instance_attr_network.h b/src/include/knl/knl_guc/knl_instance_attr_network.h
index a5194b884..3f966aca9 100644
--- a/src/include/knl/knl_guc/knl_instance_attr_network.h
+++ b/src/include/knl/knl_guc/knl_instance_attr_network.h
@@ -41,6 +41,9 @@
#include "knl/knl_guc/knl_guc_common.h"
#include "libcomm/libcomm.h"
+#define MAXLISTEN 64
+#define IP_LEN 64
+
typedef struct knl_instance_attr_network {
bool PoolerStatelessReuse;
bool comm_tcp_mode;
@@ -63,7 +66,9 @@ typedef struct knl_instance_attr_network {
int cn_send_buffer_size;
char* Unix_socket_group;
char* UnixSocketDir;
+#ifdef ENABLE_MULTIPLE_NODES
char* ListenAddresses;
+#endif
char* tcp_link_addr;
bool comm_enable_SSL;
LibCommConn ** comm_ctrl_channel_conn;
diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h
index 4a0ed9e27..f260ed0ec 100755
--- a/src/include/knl/knl_guc/knl_instance_attr_storage.h
+++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h
@@ -138,6 +138,7 @@ typedef struct knl_instance_attr_storage {
bool gucMostAvailableSync;
bool enable_ustore;
bool auto_csn_barrier;
+ bool enable_availablezone;
bool enable_wal_shipping_compression;
int WalReceiverBufSize;
int DataQueueBufSize;
diff --git a/src/include/knl/knl_guc/knl_session_attr_network.h b/src/include/knl/knl_guc/knl_session_attr_network.h
index 2519e6a9d..55527be15 100644
--- a/src/include/knl/knl_guc/knl_session_attr_network.h
+++ b/src/include/knl/knl_guc/knl_session_attr_network.h
@@ -54,6 +54,9 @@ typedef struct knl_session_attr_network {
int PoolerConnectTimeout;
int PoolerCancelTimeout;
int comm_max_datanode;
+#ifndef ENABLE_MULTIPLE_NODES
+ char* ListenAddresses;
+#endif
#ifdef LIBCOMM_SPEED_TEST_ENABLE
int comm_test_thread_num;
int comm_test_msg_len;
diff --git a/src/include/knl/knl_guc/knl_session_attr_security.h b/src/include/knl/knl_guc/knl_session_attr_security.h
index 3e2aa3b91..bca18085f 100644
--- a/src/include/knl/knl_guc/knl_session_attr_security.h
+++ b/src/include/knl/knl_guc/knl_session_attr_security.h
@@ -88,6 +88,9 @@ typedef struct knl_session_attr_security {
char* tde_cmk_id;
bool Enable_Security_Policy;
int audit_xid_info;
+ char* no_audit_client;
+ char* full_audit_users;
+ int audit_system_function_exec;
} knl_session_attr_security;
#endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_SECURITY_H_ */
diff --git a/src/include/knl/knl_guc/knl_session_attr_sql.h b/src/include/knl/knl_guc/knl_session_attr_sql.h
index 1ed9645bc..c1ee290ca 100644
--- a/src/include/knl/knl_guc/knl_session_attr_sql.h
+++ b/src/include/knl/knl_guc/knl_session_attr_sql.h
@@ -188,6 +188,7 @@ typedef struct knl_session_attr_sql {
char* expected_computing_nodegroup;
char* default_storage_nodegroup;
char* inlist2join_optmode;
+ char* b_format_behavior_compat_string;
char* behavior_compat_string;
char* plsql_compile_behavior_compat_string;
char* connection_info;
diff --git a/src/include/knl/knl_instance.h b/src/include/knl/knl_instance.h
index 82cc19006..14c916ff2 100755
--- a/src/include/knl/knl_instance.h
+++ b/src/include/knl/knl_instance.h
@@ -1130,10 +1130,14 @@ typedef struct knl_g_listen_context {
#define IP_LEN 64
/* The socket(s) we're listening to. */
pgsocket ListenSocket[MAXLISTEN];
- char LocalAddrList[MAXLISTEN][IP_LEN];
- int LocalIpNum;
int listen_sock_type[MAXLISTEN]; /* ori type: enum ListenSocketType */
bool reload_fds;
+
+ /* use for reload listen_addresses */
+ char all_listen_addr_list[MAXLISTEN][IP_LEN];
+ int all_listen_port_list[MAXLISTEN];
+ int listen_chanel_type[MAXLISTEN]; /* ori type: enum ListenChanelType */
+ volatile uint32 is_reloading_listen_socket; /* mark PM is reloading listen_addresses */
} knl_g_listen_context;
typedef struct knl_g_startup_context {
diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h
index f76bdb533..c11f6ddbf 100644
--- a/src/include/knl/knl_session.h
+++ b/src/include/knl/knl_session.h
@@ -536,6 +536,7 @@ typedef struct knl_u_utils_context {
int GUCNestLevel; /* 1 when in main transaction */
+ unsigned int b_format_behavior_compat_flags;
unsigned int behavior_compat_flags;
unsigned int plsql_compile_behavior_compat_flags;
@@ -996,6 +997,7 @@ typedef struct knl_u_plancache_context {
* exploration.
*/
void *explored_plan_info;
+ bool is_plan_exploration;
HTAB *generic_roots;
} knl_u_plancache_context;
@@ -1191,10 +1193,14 @@ typedef struct knl_u_proc_context {
bool clientIsGsroach; /* gs_roach tool check flag */
bool clientIsGsRestore; /* gs_restore tool check flag */
bool clientIsSubscription; /* subscription client check flag */
+ bool clientIsCMAgent; /* CM agent check flag */
bool IsBinaryUpgrade;
bool IsWLMWhiteList; /* this proc will not be controled by WLM */
bool gsRewindAddCount;
bool PassConnLimit;
+ bool clientIsGsql; /* gsql tool check flag */
+ /* We allow gsql to copy snapshot from other threads, but set a limit number */
+ int gsqlRemainCopyNum;
char applicationName[NAMEDATALEN]; /* receive application name in ProcessStartupPacket */
@@ -1858,6 +1864,8 @@ typedef struct knl_u_storage_context {
int num_bufs_in_block;
int total_bufs_allocated;
MemoryContext LocalBufferContext;
+ List *partition_dml_oids; /* list of partitioned table's oid which is on dml operations */
+ List *partition_ddl_oids; /* list of partitioned table's oid which is on ddl operations */
} knl_u_storage_context;
diff --git a/src/include/knl/knl_thread.h b/src/include/knl/knl_thread.h
index a4a7cc80b..91053cb90 100755
--- a/src/include/knl/knl_thread.h
+++ b/src/include/knl/knl_thread.h
@@ -2830,6 +2830,8 @@ typedef enum {
typedef struct knl_t_postmaster_context {
/* Notice: the value is same sa GUC_MAX_REPLNODE_NUM */
#define MAX_REPLNODE_NUM 9
+#define MAXLISTEN 64
+#define IP_LEN 64
/* flag when process startup packet for logic conn */
bool ProcessStartupPacketForLogicConn;
@@ -2852,10 +2854,16 @@ typedef struct knl_t_postmaster_context {
struct replconninfo* CrossClusterReplConnArray[MAX_REPLNODE_NUM];
bool CrossClusterReplConnChanged[MAX_REPLNODE_NUM];
struct hashmemdata* HaShmData;
+ char LocalAddrList[MAXLISTEN][IP_LEN]; /* use for sub thread which is IsUnderPostmaster */
+ int LocalIpNum; /* use for sub thread which is IsUnderPostmaster */
gs_thread_t CurExitThread;
bool IsRPCWorkerThread;
+ bool can_listen_addresses_reload;
+ bool is_listen_addresses_reload;
+ bool all_listen_addr_can_stop[MAXLISTEN];
+ bool local_listen_addr_can_stop[MAXLISTEN];
/* private variables for reaper backend thread */
Latch ReaperBackendLatch;
@@ -3175,6 +3183,12 @@ typedef struct knl_t_proxy_context {
char identifier[IDENTIFIER_LENGTH];
} knl_t_proxy_context;
+#define RC_MAX_NUM 16
+typedef struct knl_t_rc_context {
+ int rcNum;
+ Oid rcData[RC_MAX_NUM];
+} knl_t_rc_context;
+
#define DCF_MAX_NODES 10
/* For log ctrl. Willing let standby flush and apply log under RTO seconds */
typedef struct DCFLogCtrlData {
@@ -3488,6 +3502,7 @@ typedef struct knl_thrd_context {
knl_t_sql_patch_context sql_patch_cxt;
knl_t_dms_context dms_cxt;
knl_t_libsw_context libsw_cxt;
+ knl_t_rc_context rc_cxt;
} knl_thrd_context;
#ifdef ENABLE_MOT
diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h
index 405a76549..bff0808d1 100644
--- a/src/include/libpq/libpq.h
+++ b/src/include/libpq/libpq.h
@@ -49,7 +49,7 @@ extern ProtocolExtensionConfig default_protocol_config;
*/
extern int StreamServerPort(int family, char* hostName, unsigned short portNumber, const char* unixSocketName,
pgsocket ListenSocket[], int MaxListen, bool add_localaddr_flag,
- bool is_create_psql_sock, bool is_create_libcomm_sock,
+ bool is_create_psql_sock, bool is_create_libcomm_sock, ListenChanelType listen_channel,
ProtocolExtensionConfig* protocol_config = &default_protocol_config);
extern int StreamConnection(pgsocket server_fd, Port* port);
extern void StreamClose(pgsocket sock);
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 6c43458e3..03c9987da 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -166,6 +166,7 @@ typedef unsigned int pg_wchar;
/* FREE 0xfe free (unused) */
/* FREE 0xff free (unused) */
+#define PG_INVALID_ENCODING -1
/*
* openGauss encoding identifiers
*
@@ -271,6 +272,10 @@ typedef struct pg_encname {
extern pg_encname pg_encname_tbl[];
extern unsigned int pg_encname_tbl_sz;
+extern pg_enc pg_enc_coll_map_b[];
+#define FAST_GET_CHARSET_BY_COLL(coll_oid) \
+ (pg_enc_coll_map_b[(coll_oid - B_FORMAT_COLLATION_OID_MIN) / B_FORMAT_COLLATION_INTERVAL])
+
/*
* Careful:
*
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index b77e13070..9af57da8b 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -38,6 +38,9 @@
* Backend version and inplace upgrade staffs
*****************************************************************************/
+extern const uint32 PARTITION_ENHANCE_VERSION_NUM;
+extern const uint32 SELECT_INTO_FILE_VERSION_NUM;
+extern const uint32 CHARACTER_SET_VERSION_NUM;
extern const uint32 SELECT_INTO_VAR_VERSION_NUM;
extern const uint32 LARGE_SEQUENCE_VERSION_NUM;
extern const uint32 GRAND_VERSION_NUM;
@@ -121,12 +124,29 @@ extern const uint32 FDW_SUPPORT_JOIN_AGG_VERSION_NUM;
extern const uint32 UNION_NULL_VERSION_NUM;
extern const uint32 INSERT_RIGHT_REF_VERSION_NUM;
extern const uint32 CREATE_INDEX_IF_NOT_EXISTS_VERSION_NUM;
+extern const uint32 SLOW_SQL_VERSION_NUM;
extern void register_backend_version(uint32 backend_version);
extern bool contain_backend_version(uint32 version_number);
#define INPLACE_UPGRADE_PRECOMMIT_VERSION 1
+// b_format_behavior_compat_options params
+#define B_FORMAT_OPT_ENABLE_SET_SESSION_TRANSACTION 1
+#define B_FORMAT_OPT_ENABLE_SET_VARIABLES 2
+#define B_FORMAT_OPT_ENABLE_MODIFY_COLUMN 4
+#define B_FORMAT_OPT_DEFAULT_COLLATION 8
+#define B_FORMAT_OPT_MAX 4
+
+#define ENABLE_SET_SESSION_TRANSACTION \
+ ((u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_SET_SESSION_TRANSACTION) && \
+ u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)
+#define ENABLE_SET_VARIABLES (u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_SET_VARIABLES)
+#define USE_DEFAULT_COLLATION (u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_DEFAULT_COLLATION)
+#define ENABLE_MODIFY_COLUMN \
+ ((u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_MODIFY_COLUMN) && \
+ u_sess->attr.attr_sql.sql_compatibility == B_FORMAT)
+
#define OPT_DISPLAY_LEADING_ZERO 1
#define OPT_END_MONTH_CALCULATE 2
#define OPT_COMPAT_ANALYZE_SAMPLE 4
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 7a0c55919..921ef2617 100755
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -758,6 +758,7 @@ typedef struct TupleHashTableData {
int64 width; /* records total width in memory */
bool add_width; /* if width should be added */
bool causedBySysRes; /* the batch increase caused by system resources limit? */
+ Oid *tab_collations; /* collations for hash and comparison */
} TupleHashTableData;
typedef HASH_SEQ_STATUS TupleHashIterator;
@@ -986,6 +987,7 @@ typedef struct SubPlanState {
VectorBatch* aggExprBatch; /* a batch for only one row to store the para data for vector expr */
ScalarVector* tempvector; /* a temp vector for vector expression */
MemoryContext ecxt_per_batch_memory; /* memory contexts for one batch */
+ Oid *tab_collations; /* collations for hash and comparison */
} SubPlanState;
/* ----------------
@@ -1966,7 +1968,8 @@ typedef struct FunctionScanState {
*
* rowcontext per-expression-list context
* exprlists array of expression lists being evaluated
- * array_len size of array
+ * exprstatelists array of expression state lists, for subplans only
+ * array_len size of above array
* curr_idx current array index (0-based)
* marked_idx marked position (for mark/restore)
*
@@ -1975,13 +1978,20 @@ typedef struct FunctionScanState {
* rowcontext, in which to build the executor expression state for each
* Values sublist. Resetting this context lets us get rid of expression
* state for each row, avoiding major memory leakage over a long values list.
+ * However, that doesn't work for sublists containing SubPlans, because a
+ * SubPlan has to be connected up to the outer plan tree to work properly.
+ * Therefore, for only those sublists containing SubPlans, we do expression
+ * state construction at executor start, and store those pointers in
+ * exprstatelists[]. NULL entries in that array correspond to simple
+ * subexpressions that are handled as described above.
* ----------------
*/
typedef struct ValuesScanState {
ScanState ss; /* its first field is NodeTag */
ExprContext* rowcontext;
List** exprlists;
- int array_len;
+ List** exprstatelists; /* array of expression state lists, for subplans only */
+ int array_len; /* size of above array */
int curr_idx;
int marked_idx;
} ValuesScanState;
@@ -2279,6 +2289,7 @@ typedef struct HashJoinState {
bool hj_OuterNotEmpty;
bool hj_streamBothSides;
bool hj_rebuildHashtable;
+ List* hj_hash_collations; /* list of collations OIDs */
} HashJoinState;
/* ----------------------------------------------------------------
diff --git a/src/include/nodes/nodeFuncs.h b/src/include/nodes/nodeFuncs.h
index 973794bc8..0422f936f 100644
--- a/src/include/nodes/nodeFuncs.h
+++ b/src/include/nodes/nodeFuncs.h
@@ -37,9 +37,11 @@
extern Oid exprType(const Node* expr);
extern int32 exprTypmod(const Node* expr);
+extern int exprCharset(const Node* expr);
extern bool exprIsLengthCoercion(const Node* expr, int32* coercedTypmod);
extern Node* relabel_to_typmod(Node* expr, int32 typmod);
extern bool expression_returns_set(Node* clause);
+extern bool expression_contains_rownum(Node* clause);
extern Oid exprCollation(const Node* expr);
extern Oid exprInputCollation(const Node* expr);
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 4e7fd61b4..b56fa6d18 100755
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -424,6 +424,11 @@ typedef enum NodeTag {
T_IndexStmt,
T_CreateFunctionStmt,
T_AlterFunctionStmt,
+ T_CreateEventStmt,
+ T_AlterEventStmt,
+ T_DropEventStmt,
+ T_ShowEventStmt,
+ T_CompileStmt,
T_DoStmt,
T_RenameStmt,
T_RuleStmt,
@@ -805,7 +810,8 @@ typedef enum NodeTag {
T_RelCI,
T_CentroidPoint,
T_UserSetElem,
- T_UserVar
+ T_UserVar,
+ T_CharsetCollateOptions
} NodeTag;
/* if you add to NodeTag also need to add nodeTagToString */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index b0c5ecee7..0ee1faf69 100755
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -1111,7 +1111,7 @@ typedef struct CreateRoleStmt {
RoleStmtType stmt_type; /* ROLE/USER/GROUP */
char* role; /* role name */
List* options; /* List of DefElem nodes */
- bool missing_ok; /* skip error if a role is exists */
+ bool missing_ok; /* skip error if a role is exists */
} CreateRoleStmt;
/* ----------------------
@@ -1993,6 +1993,8 @@ typedef struct AlterSchemaStmt {
char *schemaname; /* the name of the schema to create */
char *authid; /* the owner of the created schema */
bool hasBlockChain; /* whether this schema has blockchain */
+ int charset;
+ char *collate;
} AlterSchemaStmt;
/*
diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h
index 023a9704a..5240d40b6 100644
--- a/src/include/nodes/parsenodes_common.h
+++ b/src/include/nodes/parsenodes_common.h
@@ -102,7 +102,8 @@ typedef enum ObjectType {
OBJECT_PUBLICATION,
OBJECT_PUBLICATION_NAMESPACE,
OBJECT_PUBLICATION_REL,
- OBJECT_SUBSCRIPTION
+ OBJECT_SUBSCRIPTION,
+ OBJECT_EVENT
} ObjectType;
#define OBJECT_IS_SEQUENCE(obj) \
@@ -172,6 +173,7 @@ typedef struct TypeName {
int location; /* token location, or -1 if unknown */
int end_location; /* %TYPE and date specified, token end location */
bool pct_rowtype; /* %ROWTYPE specified? */
+ int charset;
} TypeName;
typedef enum FunctionParameterMode {
@@ -730,6 +732,19 @@ typedef struct CollateClause {
int location; /* token location, or -1 if unknown */
} CollateClause;
+typedef enum {
+ OPT_CHARSET,
+ OPT_COLLATE,
+ OPT_CHARSETCOLLATE
+} CharsetCollateType;
+
+typedef struct CharsetCollateOptions {
+ NodeTag type;
+ CharsetCollateType cctype;
+ int charset;
+ char* collate;
+} CharsetCollateOptions;
+
/* ----------------------
* Create Schema Statement
*
@@ -754,6 +769,8 @@ typedef struct CreateSchemaStmt {
List *schemaElts; /* schema components (list of parsenodes) */
TempType temptype; /* if the schema is temp table's schema */
List *uuids; /* the list of uuid(only create sequence or table with serial type need) */
+ int charset;
+ char *collate;
} CreateSchemaStmt;
/* ----------------------
@@ -865,6 +882,10 @@ typedef enum AlterTableType {
AT_COMMENTS,
AT_InvisibleIndex,
AT_VisibleIndex,
+ AT_ModifyColumn,
+ AT_SetCharsetCollate,
+ AT_ConvertCharset,
+ AT_ResetPartitionno
} AlterTableType;
typedef enum AlterTableStatProperty { /* Additional Property for AlterTableCmd */
@@ -891,6 +912,8 @@ typedef struct AlterTableCmd { /* one subcommand of an ALTER TABLE */
AlterTableStatProperty additional_property; /* additional property for AlterTableCmd */
List *bucket_list; /* bucket list to drop */
bool alterGPI; /* check whether is global partition index alter statement */
+ bool is_first; /* a flag of ALTER TABLE ... ADD ... FIRST */
+ char *after_name; /* column name of ALTER TABLE ... ADD ... AFTER column_name */
} AlterTableCmd;
typedef struct AddTableIntoCBIState {
@@ -1057,19 +1080,27 @@ typedef struct ColumnDef {
Node *update_default;
} ColumnDef;
+/*
+ * PartitionDefState is a basic struct of all different partition types.
+ * NEVER create a new node with this struct!
+ */
+typedef struct PartitionDefState {
+ NodeTag type;
+ char* partitionName; /* name of partition */
+ List* boundary; /* the boundary of a partition */
+ char* tablespacename; /* table space to use, or NULL */
+ List* subPartitionDefState;
+ int4 partitionno; /* the partition no of current partition */
+} PartitionDefState;
+
/*
* definition of a range partition.
* range partition pattern: PARTITION [partitionName] LESS THAN [boundary]
*
*/
-typedef struct RangePartitionDefState {
- NodeTag type;
- char *partitionName; /* name of range partition */
- List *boundary; /* the boundary of a range partition */
- char *tablespacename; /* table space to use, or NULL */
+typedef struct RangePartitionDefState : PartitionDefState {
Const *curStartVal;
char *partitionInitName;
- List* subPartitionDefState;
} RangePartitionDefState;
typedef struct RangePartitionStartEndDefState {
@@ -1081,20 +1112,10 @@ typedef struct RangePartitionStartEndDefState {
char *tableSpaceName; /* table space to use, or NULL */
} RangePartitionStartEndDefState;
-typedef struct ListPartitionDefState {
- NodeTag type;
- char* partitionName; /* name of list partition */
- List* boundary; /* the boundary of a list partition */
- char* tablespacename; /* table space to use, or NULL */
- List* subPartitionDefState;
+typedef struct ListPartitionDefState : PartitionDefState {
} ListPartitionDefState;
-typedef struct HashPartitionDefState {
- NodeTag type;
- char* partitionName; /* name of hash partition */
- List* boundary; /* the boundary of a hash partition */
- char* tablespacename; /* table space to use, or NULL */
- List* subPartitionDefState;
+typedef struct HashPartitionDefState : PartitionDefState {
} HashPartitionDefState;
typedef struct RangePartitionindexDefState {
@@ -1140,6 +1161,7 @@ typedef struct PartitionState {
RowMovementValue rowMovement; /* default: for colum-stored table means true, for row-stored means false */
PartitionState *subPartitionState;
List *partitionNameList; /* existing partitionNameList for add partition */
+ int partitionsNum; /* for PARTITIONS/SUBPARTITIONS num clause, valid when greater than zero */
} PartitionState;
typedef struct AddPartitionState { /* ALTER TABLE ADD PARTITION */
@@ -1219,6 +1241,8 @@ typedef struct CreateStmt {
List *oldToastNode; /* toastnode of resizing table */
char relkind; /* type of object */
Node *autoIncStart; /* DefElem for AUTO_INCREMENT = value*/
+ int charset;
+ char *collate;
} CreateStmt;
typedef struct LedgerHashState {
@@ -1226,6 +1250,12 @@ typedef struct LedgerHashState {
uint64 histhash;
} LedgerHashState;
+typedef enum CopyFileType {
+ S_COPYFILE,
+ S_OUTFILE,
+ S_DUMPFILE
+} CopyFileType;
+
/* ----------------------
* Copy Statement
*
@@ -1248,6 +1278,7 @@ typedef struct CopyStmt {
AdaptMem memUsage;
bool encrypted;
LedgerHashState hashstate;
+ CopyFileType filetype;
} CopyStmt;
#define ATT_KV_UNDEFINED (0)
@@ -2185,6 +2216,70 @@ typedef struct FunctionSources {
char* bodySrc;
} FunctionSources;
+/*
+ * + * * * EventStatus - for event status
+ * + * * */
+typedef enum EventStatus {
+ EVENT_ENABLE,
+ EVENT_DISABLE,
+ EVENT_DISABLE_ON_SLAVE
+} EventStatus;
+
+/* ----------------------
+ * * Create EVENT Statement
+ * * ----------------------
+ * */
+typedef struct CreateEventStmt {
+ NodeTag type;
+ char* def_name; /* definer name */
+ RangeVar* event_name;
+ Node* start_time_expr;
+ Node* end_time_expr;
+ Node *interval_time;
+ bool complete_preserve;
+ EventStatus event_status; /* ENABLE | DISABLE | DISABLE ON SLAVE */
+ char* event_comment_str;
+ char* event_query_str;
+ bool if_not_exists;
+} CreateEventStmt;
+
+
+/* ----------------------
+ * * * Alter EVENT Statement
+ * * * ----------------------
+ * * */
+
+typedef struct AlterEventStmt {
+ NodeTag type;
+ DefElem* def_name; /* definer name */
+ RangeVar* event_name;
+ DefElem* start_time_expr;
+ DefElem* end_time_expr;
+ DefElem* interval_time;
+ DefElem* complete_preserve;
+ DefElem* event_status; /* ENABLE | DISABLE | DISABLE ON SLAVE */
+ DefElem* event_comment_str;
+ DefElem* event_query_str;
+ DefElem* new_name;
+} AlterEventStmt;
+
+
+/* ----------------------
+ * * * Drop EVENT Statement
+ * * * ----------------------
+ * * */
+typedef struct DropEventStmt {
+ NodeTag type;
+ RangeVar* event_name;
+ bool missing_ok;
+} DropEventStmt;
+
+typedef struct ShowEventStmt {
+ NodeTag type;
+ Node* from_clause;
+ char* where_clause;
+} ShowEventStmt;
+
/* ----------------------
* DO Statement
*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 1a4946ab1..1469f8c19 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -190,6 +190,8 @@ typedef struct PlannedStmt {
bool multi_node_hint;
uint64 uniqueSQLId;
+
+ uint32 cause_type; /* Possible Slow SQL Risks in the Plan. */
} PlannedStmt;
typedef struct NodeGroupInfoContext {
@@ -1092,6 +1094,7 @@ typedef struct HashJoin {
bool isSonicHash;
OpMemInfo mem_info; /* Memory info for inner hash table */
double joinRows;
+ List* hash_collations;
} HashJoin;
/* ----------------
@@ -1141,6 +1144,7 @@ typedef struct Group {
int numCols; /* number of grouping columns */
AttrNumber* grpColIdx; /* their indexes in the target list */
Oid* grpOperators; /* equality operators to compare with */
+ Oid* grp_collations;
} Group;
typedef struct VecGroup : public Group {
@@ -1201,6 +1205,7 @@ typedef struct Agg {
bool is_dummy; /* just for coop analysis, if true, agg node does nothing */
uint32 skew_optimize; /* skew optimize method for agg */
bool unique_check; /* we will report an error when meet duplicate in unique check mode */
+ Oid* grp_collations;
} Agg;
/* ----------------
@@ -1220,6 +1225,8 @@ typedef struct WindowAgg {
Node* startOffset; /* expression for starting bound, if any */
Node* endOffset; /* expression for ending bound, if any */
OpMemInfo mem_info; /* Memory info for window agg with agg func */
+ Oid* part_collations; /* collations for partition columns */
+ Oid* ord_collations; /* equality collations for ordering columns */
} WindowAgg;
typedef struct VecWindowAgg : public WindowAgg {
@@ -1233,6 +1240,7 @@ typedef struct Unique {
int numCols; /* number of columns to check for uniqueness */
AttrNumber* uniqColIdx; /* their indexes in the target list */
Oid* uniqOperators; /* equality operators to compare with */
+ Oid* uniq_collations; /* collations for equality comparisons */
} Unique;
/* ----------------
@@ -1277,6 +1285,7 @@ typedef struct SetOp {
int firstFlag; /* flag value for first input relation */
long numGroups; /* estimated number of groups in input */
OpMemInfo mem_info; /* Memory info for hashagg set op */
+ Oid* dup_collations;
} SetOp;
/* ----------------
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 46fd40e86..565acb1d1 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -118,6 +118,9 @@ typedef struct IntoClause {
bool ivm; /* true for WITH IVM */
char relkind; /* RELKIND_RELATION or RELKIND_MATVIEW */
List* userVarList; /* user define variables list */
+ List* copyOption; /* copyOption for select...into statement */
+ char* filename; /* filename for select...into statement */
+ bool is_outfile; /* true for outfile */
#ifdef PGXC
struct DistributeBy* distributeby; /* distribution to use, or NULL */
struct PGXCSubCluster* subcluster; /* subcluster node members */
@@ -843,6 +846,7 @@ typedef struct CaseExpr {
List* args; /* the arguments (list of WHEN clauses) */
Expr* defresult; /* the default result (ELSE clause) */
int location; /* token location, or -1 if unknown */
+ bool fromDecode; /* whether is parsed from decode expr, no need to (de-)serialize */
} CaseExpr;
/*
@@ -1177,10 +1181,7 @@ typedef struct CurrentOfExpr {
int cursor_param; /* refcursor parameter number, or 0 */
} CurrentOfExpr;
-/*
- * SetVariableExpr used for getting guc variable's value
- * only support while dbcompability is B and enable_set_variable_b_format is on
- */
+/* SetVariableExpr used for getting guc variable's value */
typedef struct {
Expr xpr;
char* name;
diff --git a/src/include/optimizer/aioptimizer.h b/src/include/optimizer/aioptimizer.h
index 37af7ba0e..72e961163 100644
--- a/src/include/optimizer/aioptimizer.h
+++ b/src/include/optimizer/aioptimizer.h
@@ -122,7 +122,9 @@ public:
loc = 0;
};
- ~TableScannerSample2()
+ ~TableScannerSample2(){};
+
+ void delete_tuple()
{
pfree(datumTuple.values);
pfree(datumTuple.typid);
diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h
index 01f80f679..34a988b01 100644
--- a/src/include/optimizer/clauses.h
+++ b/src/include/optimizer/clauses.h
@@ -116,7 +116,7 @@ extern Node* eval_const_expressions_params(PlannerInfo* root, Node* node, ParamL
extern Node *eval_const_expression_value(PlannerInfo* root, Node* node, ParamListInfo boundParams);
-extern Node* simplify_subselect_expression(Node* node, ParamListInfo boundParams);
+extern Node* simplify_select_into_expression(Node* node, ParamListInfo boundParams);
extern Node* estimate_expression_value(PlannerInfo* root, Node* node, EState* estate = NULL);
diff --git a/src/include/optimizer/gplanmgr.h b/src/include/optimizer/gplanmgr.h
index d6777ff2d..f8c9d8ae7 100644
--- a/src/include/optimizer/gplanmgr.h
+++ b/src/include/optimizer/gplanmgr.h
@@ -63,13 +63,6 @@ typedef struct RelSelec
double selectivity;
} RelSelec;
-typedef struct IndexSelec
-{
- uint32 hashkey;
- Index dimensionId;
- double selectivity;
-} IndexSelec;
-
/*
* BaseRelCI
* confidential interval of a plan baserel.
@@ -118,11 +111,12 @@ extern PlanManager *PMGR_CreatePlanManager(MemoryContext parent_cxt, char* stmt_
void PMGR_ReleasePlanManager(CachedPlanSource *plansource);
CachedPlan *GetAdaptGenericPlan(CachedPlanSource *plansource,
ParamListInfo boundParams,
- List **qlist);
+ List **qlist,
+ bool *mode);
void DropStmtRoot(const char *stmt_name);
void DropAllStmtRoot(void);
extern List *eval_const_clauses_params(PlannerInfo *root, List *clauses);
extern bool selec_gplan_by_hint(const CachedPlanSource* plansource);
-
+extern void ReleaseCustomPlan(CachedPlanSource *plansource);
#endif
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index fe3ae7258..a0bd5f42e 100755
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -83,15 +83,15 @@ extern Sort* make_sort_from_targetlist(PlannerInfo* root, Plan* lefttree, double
extern Sort* make_sort(PlannerInfo* root, Plan* lefttree, int numCols, AttrNumber* sortColIdx, Oid* sortOperators,
Oid* collations, bool* nullsFirst, double limit_tuples);
extern Agg* make_agg(PlannerInfo* root, List* tlist, List* qual, AggStrategy aggstrategy,
- const AggClauseCosts* aggcosts, int numGroupCols, AttrNumber* grpColIdx, Oid* grpOperators, long numGroups,
- Plan* lefttree, WindowLists* wflists, bool need_stream, bool trans_agg, List* groupingSets = NIL,
+ const AggClauseCosts* aggcosts, int numGroupCols, AttrNumber* grpColIdx, Oid* grpOperators, Oid* grp_collations,
+ long numGroups, Plan* lefttree, WindowLists* wflists, bool need_stream, bool trans_agg, List* groupingSets = NIL,
Size hash_entry_size = 0, bool add_width = false, AggOrientation agg_orientation = AGG_LEVEL_1_INTENT,
bool unique_check = true);
extern WindowAgg* make_windowagg(PlannerInfo* root, List* tlist, List* windowFuncs, Index winref, int partNumCols,
AttrNumber* partColIdx, Oid* partOperators, int ordNumCols, AttrNumber* ordColIdx, Oid* ordOperators,
- int frameOptions, Node* startOffset, Node* endOffset, Plan* lefttree);
+ int frameOptions, Node* startOffset, Node* endOffset, Plan* lefttree, Oid *part_collations, Oid *ord_collations);
extern Group* make_group(PlannerInfo* root, List* tlist, List* qual, int numGroupCols, AttrNumber* grpColIdx,
- Oid* grpOperators, double numGroups, Plan* lefttree);
+ Oid* grpOperators, double numGroups, Plan* lefttree, Oid* grp_collations);
extern Plan* materialize_finished_plan(Plan* subplan, bool materialize_above_stream = false, bool vectorized = false);
extern Unique* make_unique(Plan* lefttree, List* distinctList);
extern LockRows* make_lockrows(PlannerInfo* root, Plan* lefttree);
diff --git a/src/include/optimizer/pruning.h b/src/include/optimizer/pruning.h
index 7b33b6b8c..116ef3442 100644
--- a/src/include/optimizer/pruning.h
+++ b/src/include/optimizer/pruning.h
@@ -52,7 +52,6 @@ typedef struct PruningContext {
/* used for slice pruning */
Index varno;
ParamListInfo boundParams;
- PartitionMap *partmap;
} PruningContext;
typedef enum PartKeyColumnRangeMode {
@@ -84,22 +83,23 @@ extern IndexesUsableType eliminate_partition_index_unusable(Oid IndexOid, Prunin
void destroyPruningResult(PruningResult* pruningResult);
void partitionPruningFromBoundary(PruningContext *context, PruningResult* pruningResult);
List* restrictInfoListToExprList(List* restrictInfoList);
-void generateListFromPruningBM(PruningResult* result);
+void generateListFromPruningBM(PruningResult* result, PartitionMap *partmap = NULL);
PruningResult* partitionPruningWalker(Expr* expr, PruningContext* pruningCtx);
PruningResult* partitionPruningForExpr(PlannerInfo* root, RangeTblEntry* rte, Relation rel, Expr* expr);
PruningResult* partitionPruningForRestrictInfo(
- PlannerInfo* root, RangeTblEntry* rte, Relation rel, List* restrictInfoList, PartitionMap *partmap);
+ PlannerInfo* root, RangeTblEntry* rte, Relation rel, List* restrictInfoList);
PruningResult* PartitionPruningForPartitionList(RangeTblEntry* rte, Relation rel);
extern PruningResult* copyPruningResult(PruningResult* srcPruningResult);
-extern Oid getPartitionOidFromSequence(Relation relation, int partSeq, PartitionMap *oldmap = NULL);
+extern Oid getPartitionOidFromSequence(Relation relation, int partSeq, int partitionno = 0);
extern int varIsInPartitionKey(int attrNo, int2vector* partKeyAttrs, int partKeyNum);
extern bool checkPartitionIndexUnusable(Oid indexOid, int partItrs, PruningResult* pruning_result);
extern PruningResult* GetPartitionInfo(PruningResult* result, EState* estate, Relation current_relation);
static inline PartitionMap* GetPartitionMap(PruningContext *context)
{
- return PointerIsValid(context->partmap) ? context->partmap : context->GetPartitionMap(context->relation);
+ return context->GetPartitionMap(context->relation);
}
-extern SubPartitionPruningResult* GetSubPartitionPruningResult(List* selectedSubPartitions, int partSeq);
+extern SubPartitionPruningResult* GetSubPartitionPruningResult(List* selectedSubPartitions, int partSeq,
+ int partitionno);
void MergePartitionListsForPruning(RangeTblEntry* rte, Relation rel, PruningResult* pruningRes);
#endif /* PRUNING_H_ */
diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h
index 3b3fa9125..28dadd61e 100644
--- a/src/include/optimizer/tlist.h
+++ b/src/include/optimizer/tlist.h
@@ -37,6 +37,7 @@ extern List* get_sortgrouplist_exprs(List* sgClauses, List* targetList);
extern SortGroupClause* get_sortgroupref_clause(Index sortref, List* clauses);
extern Oid* extract_grouping_ops(List* groupClause);
+extern Oid* extract_grouping_collations(List* groupClause, List* tlist);
extern AttrNumber* extract_grouping_cols(List* groupClause, List* tlist);
extern bool grouping_is_sortable(List* groupClause);
extern bool grouping_is_hashable(List* groupClause);
diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h
index d543943b8..dbcb7c022 100644
--- a/src/include/parser/analyze.h
+++ b/src/include/parser/analyze.h
@@ -33,7 +33,7 @@ extern Query* parse_analyze_varparams(Node* parseTree, const char* sourceText, O
extern Query* parse_sub_analyze(Node* parseTree, ParseState* parentParseState, CommonTableExpr* parentCTE,
bool locked_from_parent, bool resolve_unknowns);
-
+extern Node* parse_into_claues(Node* parse_tree, IntoClause* intoClause);
extern List* transformInsertRow(ParseState* pstate, List* exprlist, List* stmtcols, List* icolumns, List* attrnos);
extern Query* transformTopLevelStmt(
ParseState* pstate, Node* parseTree, bool isFirstNode = true, bool isCreateView = false);
@@ -91,6 +91,7 @@ extern void resetOperatorPlusFlag();
extern void fixResTargetNameWithTableNameRef(Relation rd, RangeVar* rel, ResTarget* res);
extern void fixResTargetListWithTableNameRef(Relation rd, RangeVar* rel, List* clause_list);
+extern void UpdateParseCheck(ParseState *pstate, Node *qry);
#endif /* !FRONTEND_PARSER */
extern bool getOperatorPlusFlag();
diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h
index 52779af3e..761318dc8 100644
--- a/src/include/parser/kwlist.h
+++ b/src/include/parser/kwlist.h
@@ -97,10 +97,12 @@ PG_KEYWORD("case", CASE, RESERVED_KEYWORD)
PG_KEYWORD("cast", CAST, RESERVED_KEYWORD)
PG_KEYWORD("catalog", CATALOG_P, UNRESERVED_KEYWORD)
PG_KEYWORD("chain", CHAIN, UNRESERVED_KEYWORD)
+PG_KEYWORD("change", CHANGE, UNRESERVED_KEYWORD)
PG_KEYWORD("char", CHAR_P, COL_NAME_KEYWORD)
PG_KEYWORD("character", CHARACTER, COL_NAME_KEYWORD)
PG_KEYWORD("characteristics", CHARACTERISTICS, UNRESERVED_KEYWORD)
PG_KEYWORD("characterset", CHARACTERSET, UNRESERVED_KEYWORD)
+PG_KEYWORD("charset", CHARSET, UNRESERVED_KEYWORD)
PG_KEYWORD("check", CHECK, RESERVED_KEYWORD)
PG_KEYWORD("checkpoint", CHECKPOINT, UNRESERVED_KEYWORD)
PG_KEYWORD("class", CLASS, UNRESERVED_KEYWORD)
@@ -119,6 +121,7 @@ PG_KEYWORD("collation", COLLATION, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("column", COLUMN, RESERVED_KEYWORD)
PG_KEYWORD("column_encryption_key", COLUMN_ENCRYPTION_KEY, UNRESERVED_KEYWORD)
PG_KEYWORD("column_encryption_keys", COLUMN_ENCRYPTION_KEYS, UNRESERVED_KEYWORD)
+PG_KEYWORD("columns", COLUMNS, UNRESERVED_KEYWORD)
PG_KEYWORD("comment", COMMENT, UNRESERVED_KEYWORD)
PG_KEYWORD("comments", COMMENTS, UNRESERVED_KEYWORD)
PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD)
@@ -126,6 +129,7 @@ PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD)
PG_KEYWORD("compact", COMPACT, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("compatible_illegal_chars", COMPATIBLE_ILLEGAL_CHARS, UNRESERVED_KEYWORD)
PG_KEYWORD("complete", COMPLETE, UNRESERVED_KEYWORD)
+PG_KEYWORD("completion", COMPLETION, UNRESERVED_KEYWORD)
PG_KEYWORD("compress", COMPRESS, UNRESERVED_KEYWORD)
PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("condition", CONDITION, UNRESERVED_KEYWORD)
@@ -139,6 +143,7 @@ PG_KEYWORD("content", CONTENT_P, UNRESERVED_KEYWORD)
PG_KEYWORD("continue", CONTINUE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("contview", CONTVIEW, UNRESERVED_KEYWORD)
PG_KEYWORD("conversion", CONVERSION_P, UNRESERVED_KEYWORD)
+PG_KEYWORD("convert", CONVERT_P, UNRESERVED_KEYWORD)
PG_KEYWORD("coordinator", COORDINATOR, UNRESERVED_KEYWORD)
PG_KEYWORD("coordinators", COORDINATORS, UNRESERVED_KEYWORD)
PG_KEYWORD("copy", COPY, UNRESERVED_KEYWORD)
@@ -201,6 +206,7 @@ PG_KEYWORD("document", DOCUMENT_P, UNRESERVED_KEYWORD)
PG_KEYWORD("domain", DOMAIN_P, UNRESERVED_KEYWORD)
PG_KEYWORD("double", DOUBLE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("drop", DROP, UNRESERVED_KEYWORD)
+PG_KEYWORD("dumpfile", DUMPFILE, UNRESERVED_KEYWORD)
PG_KEYWORD("duplicate", DUPLICATE, UNRESERVED_KEYWORD)
PG_KEYWORD("each", EACH, UNRESERVED_KEYWORD)
PG_KEYWORD("elastic", ELASTIC, UNRESERVED_KEYWORD)
@@ -213,12 +219,16 @@ PG_KEYWORD("encrypted_value", ENCRYPTED_VALUE, UNRESERVED_KEYWORD)
PG_KEYWORD("encryption", ENCRYPTION, UNRESERVED_KEYWORD)
PG_KEYWORD("encryption_type", ENCRYPTION_TYPE, UNRESERVED_KEYWORD)
PG_KEYWORD("end", END_P, RESERVED_KEYWORD)
+PG_KEYWORD("ends", ENDS, UNRESERVED_KEYWORD)
PG_KEYWORD("enforced", ENFORCED, UNRESERVED_KEYWORD)
PG_KEYWORD("enum", ENUM_P, UNRESERVED_KEYWORD)
PG_KEYWORD("eol", EOL, UNRESERVED_KEYWORD)
PG_KEYWORD("errors", ERRORS, UNRESERVED_KEYWORD)
PG_KEYWORD("escape", ESCAPE, UNRESERVED_KEYWORD)
+PG_KEYWORD("escaped", ESCAPED, UNRESERVED_KEYWORD)
PG_KEYWORD("escaping", ESCAPING, UNRESERVED_KEYWORD)
+PG_KEYWORD("event", EVENT, UNRESERVED_KEYWORD)
+PG_KEYWORD("events", EVENTS, UNRESERVED_KEYWORD)
PG_KEYWORD("every", EVERY, UNRESERVED_KEYWORD)
PG_KEYWORD("except", EXCEPT, RESERVED_KEYWORD)
PG_KEYWORD("exchange", EXCHANGE, UNRESERVED_KEYWORD)
@@ -335,6 +345,7 @@ PG_KEYWORD("less", LESS, RESERVED_KEYWORD)
PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD)
PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD)
+PG_KEYWORD("lines", LINES, UNRESERVED_KEYWORD)
PG_KEYWORD("list", LIST, UNRESERVED_KEYWORD)
PG_KEYWORD("listen", LISTEN, UNRESERVED_KEYWORD)
PG_KEYWORD("load", LOAD, UNRESERVED_KEYWORD)
@@ -419,6 +430,7 @@ PG_KEYWORD("or", OR, RESERVED_KEYWORD)
PG_KEYWORD("order", ORDER, RESERVED_KEYWORD)
PG_KEYWORD("out", OUT_P, COL_NAME_KEYWORD)
PG_KEYWORD("outer", OUTER_P, TYPE_FUNC_NAME_KEYWORD)
+PG_KEYWORD("outfile", OUTFILE, UNRESERVED_KEYWORD)
PG_KEYWORD("over", OVER, UNRESERVED_KEYWORD)
PG_KEYWORD("overlaps", OVERLAPS, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("overlay", OVERLAY, COL_NAME_KEYWORD)
@@ -520,6 +532,7 @@ PG_KEYWORD("rowtype", ROWTYPE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD)
PG_KEYWORD("sample", SAMPLE, UNRESERVED_KEYWORD)
PG_KEYWORD("savepoint", SAVEPOINT, UNRESERVED_KEYWORD)
+PG_KEYWORD("schedule", SCHEDULE, UNRESERVED_KEYWORD)
PG_KEYWORD("schema", SCHEMA, UNRESERVED_KEYWORD)
PG_KEYWORD("scroll", SCROLL, UNRESERVED_KEYWORD)
PG_KEYWORD("search", SEARCH, UNRESERVED_KEYWORD)
@@ -546,6 +559,7 @@ PG_KEYWORD("similar", SIMILAR, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("simple", SIMPLE, UNRESERVED_KEYWORD)
PG_KEYWORD("size", SIZE, UNRESERVED_KEYWORD)
PG_KEYWORD("skip", SKIP, UNRESERVED_KEYWORD)
+PG_KEYWORD("slave", SLAVE, UNRESERVED_KEYWORD)
PG_KEYWORD("slice", SLICE, UNRESERVED_KEYWORD)
PG_KEYWORD("smalldatetime", SMALLDATETIME, COL_NAME_KEYWORD)
PG_KEYWORD("smalldatetime_format", SMALLDATETIME_FORMAT_P, UNRESERVED_KEYWORD)
@@ -559,6 +573,8 @@ PG_KEYWORD("split", SPLIT, UNRESERVED_KEYWORD)
PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD)
PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("start", START, UNRESERVED_KEYWORD)
+PG_KEYWORD("starting", STARTING, UNRESERVED_KEYWORD)
+PG_KEYWORD("starts", STARTS, UNRESERVED_KEYWORD)
PG_KEYWORD("statement", STATEMENT, UNRESERVED_KEYWORD)
PG_KEYWORD("statement_id", STATEMENT_ID, UNRESERVED_KEYWORD)
PG_KEYWORD("statistics", STATISTICS, UNRESERVED_KEYWORD)
@@ -572,6 +588,7 @@ PG_KEYWORD("stream", STREAM, UNRESERVED_KEYWORD)
PG_KEYWORD("strict", STRICT_P, UNRESERVED_KEYWORD)
PG_KEYWORD("strip", STRIP_P, UNRESERVED_KEYWORD)
PG_KEYWORD("subpartition", SUBPARTITION, UNRESERVED_KEYWORD)
+PG_KEYWORD("subpartitions", SUBPARTITIONS, UNRESERVED_KEYWORD)
PG_KEYWORD("subscription", SUBSCRIPTION, UNRESERVED_KEYWORD)
PG_KEYWORD("substring", SUBSTRING, COL_NAME_KEYWORD)
PG_KEYWORD("symmetric", SYMMETRIC, RESERVED_KEYWORD)
diff --git a/src/include/parser/parse_coerce.h b/src/include/parser/parse_coerce.h
index af3a7c59d..1c76dc0d8 100644
--- a/src/include/parser/parse_coerce.h
+++ b/src/include/parser/parse_coerce.h
@@ -72,6 +72,7 @@ extern CoercionPathType find_coercion_pathway(
extern CoercionPathType find_typmod_coercion_function(Oid typeId, Oid* funcid);
extern void expression_error_callback(void* arg);
+extern Node* coerce_to_target_charset(Node* expr, int target_charset, Oid targetTypeId);
extern Node *transferConstToAconst(Node *node);
diff --git a/src/include/parser/parse_hint.h b/src/include/parser/parse_hint.h
index 92b94ce03..9135928b5 100644
--- a/src/include/parser/parse_hint.h
+++ b/src/include/parser/parse_hint.h
@@ -71,7 +71,7 @@
typedef struct pull_hint_warning_context {
List* warning;
-} pull_qual_vars_context;
+} pull_hint_warning_context;
#define append_warning_to_list(root, hint, format, ...) \
do { \
@@ -316,7 +316,7 @@ extern void transform_hints(PlannerInfo* root, Query* parse, HintState* hstate);
extern void check_scan_hint_validity(PlannerInfo* root);
extern void adjust_scanhint_relid(HintState* hstate, Index oldIdx, Index newIdx);
-extern bool pull_hint_warning_walker(Node* node, pull_qual_vars_context* context);
+extern bool pull_hint_warning_walker(Node* node, pull_hint_warning_context* context);
extern List* retrieve_query_hint_warning(Node* parse);
extern void output_utility_hint_warning(Node* query, int lev);
extern void output_hint_warning(List* warning, int lev);
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index db9caaede..aded83a74 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -177,7 +177,7 @@ struct ParseState {
bool p_hasSynonyms;
List* p_target_relation;
List* p_target_rangetblentry;
- bool p_is_case_when;
+ bool p_is_decode;
/*
* used for start with...connect by rewrite
diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h
index 8314c156b..b2df6ec57 100644
--- a/src/include/parser/parse_type.h
+++ b/src/include/parser/parse_type.h
@@ -32,7 +32,8 @@ extern char* TypeNameToString(const TypeName* typname);
extern char* TypeNameListToString(List* typenames);
extern Oid LookupCollation(ParseState* pstate, List* collnames, int location);
-extern Oid GetColumnDefCollation(ParseState* pstate, ColumnDef* coldef, Oid typeOid);
+extern Oid GetColumnDefCollation(ParseState* pstate, ColumnDef* coldef, Oid typeOid,
+ Oid rel_coll_oid = InvalidOid);
extern Type typeidType(Oid id);
diff --git a/src/include/parser/parse_utilcmd.h b/src/include/parser/parse_utilcmd.h
index 0673f4832..50231c2b5 100644
--- a/src/include/parser/parse_utilcmd.h
+++ b/src/include/parser/parse_utilcmd.h
@@ -57,6 +57,8 @@ typedef struct {
typedef enum TransformTableType { TRANSFORM_INVALID = 0, TRANSFORM_TO_HASHBUCKET, TRANSFORM_TO_NONHASHBUCKET} TransformTableType;
extern void checkPartitionSynax(CreateStmt *stmt);
+extern Oid fill_relation_collation(const char* collate, int charset, List** options,
+ Oid nsp_coll_oid = InvalidOid);
extern List* transformCreateStmt(CreateStmt* stmt, const char* queryString, const List* uuids,
bool preCheck, Oid *namespaceid, bool isFirstNode = true);
extern List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* queryString);
@@ -93,5 +95,9 @@ extern char* getTmptableIndexName(const char* srcSchema, const char* srcIndex);
extern IndexStmt* generateClonedIndexStmt(
CreateStmtContext* cxt, Relation source_idx, const AttrNumber* attmap, int attmap_length, Relation rel,
TransformTableType transformType);
+extern int get_charset_by_collation(Oid colloid);
+extern Oid get_default_collation_by_charset(int charset);
+extern Oid transform_default_collation(const char* collate, int charset, Oid def_coll_oid = InvalidOid,
+ bool is_attr = false);
#endif /* PARSE_UTILCMD_H */
diff --git a/src/include/pgaudit.h b/src/include/pgaudit.h
index c9334af33..e3aee317d 100644
--- a/src/include/pgaudit.h
+++ b/src/include/pgaudit.h
@@ -29,9 +29,14 @@
extern THR_LOCAL bool Audit_delete;
-#define AUDIT_EXEC_ENABLED (u_sess->attr.attr_security.Audit_enabled && u_sess->attr.attr_security.Audit_Exec)
-#define AUDIT_COPY_ENABLED (u_sess->attr.attr_security.Audit_enabled && u_sess->attr.attr_security.Audit_Copy)
-#define CHECK_AUDIT_DDL(type) ((unsigned int)u_sess->attr.attr_security.Audit_DDL & (1 << (type)))
+#define AUDIT_EXEC_ENABLED (u_sess->attr.attr_security.Audit_enabled && \
+(u_sess->attr.attr_security.Audit_Exec || audit_check_full_audit_user()))
+#define AUDIT_SYSTEM_EXEC_ENABLED (u_sess->attr.attr_security.Audit_enabled && \
+(u_sess->attr.attr_security.audit_system_function_exec || audit_check_full_audit_user()))
+#define AUDIT_COPY_ENABLED (u_sess->attr.attr_security.Audit_enabled && \
+(u_sess->attr.attr_security.Audit_Copy || audit_check_full_audit_user()))
+#define CHECK_AUDIT_DDL(type) ((((unsigned int)u_sess->attr.attr_security.Audit_DDL & (1 << (type))) > 0) \
+|| audit_check_full_audit_user())
#define CHECK_AUDIT_LOGIN(type) (unsigned int)u_sess->attr.attr_security.Audit_Session & (1 << (type));
#define PG_QUERY_AUDIT_ARGS_MAX 3
@@ -97,6 +102,7 @@ typedef enum {
AUDIT_DML_ACTION_SELECT,
AUDIT_INTERNAL_EVENT,
AUDIT_FUNCTION_EXEC,
+ AUDIT_SYSTEM_FUNCTION_EXEC,
AUDIT_COPY_TO,
AUDIT_COPY_FROM,
AUDIT_SET_PARAMETER,
@@ -110,7 +116,8 @@ typedef enum {
AUDIT_DDL_GLOBALCONFIG,
AUDIT_DDL_PUBLICATION_SUBSCRIPTION,
AUDIT_DDL_FOREIGN_DATA_WRAPPER,
- AUDIT_DDL_SQL_PATCH
+ AUDIT_DDL_SQL_PATCH,
+ AUDIT_DDL_EVENT
} AuditType;
/* keep the same sequence with parameter audit_system_object */
@@ -141,7 +148,8 @@ typedef enum {
DDL_PUBLICATION_SUBSCRIPTION,
DDL_GLOBALCONFIG,
DDL_FOREIGN_DATA_WRAPPER,
- DDL_SQL_PATCH
+ DDL_SQL_PATCH,
+ DDL_EVENT
} DDLType;
/*
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 4b92eed1f..3df0fe483 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -1230,6 +1230,8 @@ typedef enum WaitState {
STATE_WAIT_SYNC_PRODUCER_NEXT_STEP,
STATE_GTM_SET_CONSISTENCY_POINT,
STATE_WAIT_SYNC_BGWORKERS,
+ STATE_STANDBY_READ_RECOVERY_CONFLICT,
+ STATE_STANDBY_GET_SNAPSHOT,
STATE_WAIT_NUM // MUST be last, DO NOT use this value.
} WaitState;
@@ -3038,6 +3040,8 @@ typedef struct IoWaitStatGlobalInfo {
} IoWaitStatGlobalInfo;
void pgstat_release_session_memory_entry();
+extern void gs_stat_free_stat_node(PgBackendStatusNode* node);
+extern void gs_stat_free_stat_beentry(PgBackendStatus* beentry);
#define MAX_PATH 256
diff --git a/src/include/pgxc/execRemote.h b/src/include/pgxc/execRemote.h
old mode 100644
new mode 100755
index ac11e27a4..8e1818db9
--- a/src/include/pgxc/execRemote.h
+++ b/src/include/pgxc/execRemote.h
@@ -260,8 +260,8 @@ StringInfo* SendExplainToDNs(ExplainState*, RemoteQuery*, int*, const char*);
bool CheckPrepared(RemoteQuery* rq, Oid nodeoid);
void FindExecNodesInPBE(RemoteQueryState* planstate, ExecNodes* exec_nodes, RemoteQueryExecType exec_type);
extern PGXCNodeHandle* GetRegisteredTransactionNodes(bool write);
-extern bool check_receive_buffer(RemoteQueryState* combiner, int tapenum, bool* has_checked, int* has_err_idx);
-#endif
+extern bool check_errmsg_for_receive_buffer(RemoteQueryState* combiner, int tapenum,
+ bool* has_checked, int* has_err_idx);
#ifdef ENABLE_UT
#include "workload/cpwlm.h"
@@ -272,5 +272,5 @@ extern PGXCNodeAllHandles* make_cp_conn(ComputePoolConfig** configs, int cnum, i
extern List* get_dnlist_for_hdfs(int fnum);
extern void ReloadTransactionNodes(void);
extern void PgFdwRemoteReply(StringInfo msg);
-
+#endif
#endif
diff --git a/src/include/postgres_fe.h b/src/include/postgres_fe.h
index 99b566a8d..fed05e7a5 100644
--- a/src/include/postgres_fe.h
+++ b/src/include/postgres_fe.h
@@ -53,7 +53,8 @@
/* Type of database; increase for sql compatibility */
typedef enum {
ORA_FORMAT,
- TD_FORMAT
+ TD_FORMAT,
+ M_FORMAT
} DatabaseType;
#endif // HAVE_DATABASE_TYPE
diff --git a/src/include/postmaster/bgwriter.h b/src/include/postmaster/bgwriter.h
index 106c2b15d..6255443f2 100644
--- a/src/include/postmaster/bgwriter.h
+++ b/src/include/postmaster/bgwriter.h
@@ -66,6 +66,7 @@ extern void drop_rel_one_fork_buffers();
typedef struct DelFileTag {
RelFileNode rnode;
int32 maxSegNo;
+ bool fileUnlink;
} DelFileTag;
typedef struct ForkRelFileNode {
diff --git a/src/include/postmaster/postmaster.h b/src/include/postmaster/postmaster.h
index 66f76be7a..f99052577 100755
--- a/src/include/postmaster/postmaster.h
+++ b/src/include/postmaster/postmaster.h
@@ -216,6 +216,8 @@ extern void set_disable_conn_mode(void);
((port)->cmdline_options != NULL && strstr((port)->cmdline_options, "remotetype=coordinator") != NULL)
#else
#define IsConnPortFromCoord(port) false
+extern bool get_addr_from_socket(int sock, struct sockaddr *saddr);
+extern int get_ip_port_from_addr(char* sock_ip, int* port, struct sockaddr saddr);
#endif
const char *GetSSServerMode();
@@ -230,7 +232,7 @@ extern void startup_die(SIGNAL_ARGS);
extern void PortInitialize(Port* port, knl_thread_arg* arg);
extern void PreClientAuthorize();
extern int ClientConnInitilize(Port* port);
-extern void CheckClientIp(Port* port);
+extern bool CheckClientIp(Port* port);
extern Backend* GetBackend(int slot);
extern Backend* AssignFreeBackEnd(int slot);
extern long PostmasterRandom(void);
diff --git a/src/include/replication/pgoutput.h b/src/include/replication/pgoutput.h
index 0a2fd3570..088e235af 100644
--- a/src/include/replication/pgoutput.h
+++ b/src/include/replication/pgoutput.h
@@ -13,11 +13,11 @@
#ifndef PGOUTPUT_H
#define PGOUTPUT_H
+#include "replication/logical.h"
#include "utils/palloc.h"
typedef struct PGOutputData {
- MemoryContext context; /* private memory context for transient
- * allocations */
+ PluginTestDecodingData common;
/* client info */
uint32 protocol_version;
diff --git a/src/include/replication/replicainternal.h b/src/include/replication/replicainternal.h
index b1b747398..ea1c85d1f 100755
--- a/src/include/replication/replicainternal.h
+++ b/src/include/replication/replicainternal.h
@@ -44,6 +44,21 @@ typedef enum {
HA_LISTEN_SOCKET
} ListenSocketType;
+typedef enum {
+ POST_PORT_SOCKET = 0,
+ POOLER_PORT_SOCKET,
+ BOTH_PORT_SOCKETS
+} RecreateListenSocketType;
+
+typedef enum {
+ UNUSED_LISTEN_CHANEL = 0,
+ NORMAL_LISTEN_CHANEL, /* type of listen_addresses */
+ REPL_LISTEN_CHANEL, /* type of ReplConnArray or CrossClusterReplConnArray */
+ EXT_LISTEN_CHANEL,
+ DOLPHIN_LISTEN_CHANEL,
+ UNKNOWN_LISTEN_CHANEL
+} ListenChanelType;
+
typedef enum {
UNKNOWN_MODE = 0,
NORMAL_MODE,
diff --git a/src/include/rewrite/rewriteSupport.h b/src/include/rewrite/rewriteSupport.h
index 71248e9c3..501e9e65b 100644
--- a/src/include/rewrite/rewriteSupport.h
+++ b/src/include/rewrite/rewriteSupport.h
@@ -25,6 +25,7 @@ extern Oid get_rewrite_oid(Oid relid, const char* rulename, bool missing_ok);
extern Oid get_rewrite_oid_without_relid(const char* rulename, Oid* relid, bool missing_ok);
extern char* get_rewrite_rulename(Oid ruleid, bool missing_ok);
extern bool rel_has_rule(Oid relid, char ev_type);
+extern Oid get_rewrite_relid(Oid ruleid, bool missing_ok);
#endif /* REWRITESUPPORT_H */
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 0d74ff177..96caef04f 100755
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -217,13 +217,17 @@ struct PGPROC {
bool snapshotGroupMember;
/* next ProcArray group member waiting for snapshot getting */
pg_atomic_uint32 snapshotGroupNext;
- Snapshot snapshotGroup;
+ volatile Snapshot snapshotGroup;
TransactionId xminGroup;
TransactionId xmaxGroup;
TransactionId globalxminGroup;
volatile TransactionId replicationSlotXminGroup;
volatile TransactionId replicationSlotCatalogXminGroup;
+ TransactionId snapXmax; /* maximal running XID as it was when we were
+ * getting our snapshot. */
+ CommitSeqNo snapCSN; /* csn as it was when we were getting our snapshot. */
+
/* commit sequence number send down */
CommitSeqNo commitCSN;
diff --git a/src/include/storage/smgr/fd.h b/src/include/storage/smgr/fd.h
old mode 100644
new mode 100755
index e8122e7af..f6a6ed651
--- a/src/include/storage/smgr/fd.h
+++ b/src/include/storage/smgr/fd.h
@@ -187,5 +187,6 @@ extern bool repair_deleted_file_check(RelFileNodeForkNum fileNode, int fd);
#define PG_TEMP_FILES_DIR "pgsql_tmp"
#define PG_TEMP_FILE_PREFIX "pgsql_tmp"
#define SS_PG_TEMP_FILES_DIR "ss_pgsql_tmp"
+#define EIO_RETRY_TIMES 3
#endif /* FD_H */
diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h
index 7a92f7d2c..cb74854cc 100644
--- a/src/include/tcop/tcopprot.h
+++ b/src/include/tcop/tcopprot.h
@@ -39,6 +39,7 @@ typedef enum {
extern List* pg_parse_query(const char* query_string, List** query_string_locationlist = NULL,
List* (*parser_hook)(const char*, List**) = NULL);
extern List* pg_analyze_and_rewrite(Node* parsetree, const char* query_string, Oid* paramTypes, int numParams);
+extern List* pg_rewrite_query(Query* query);
extern List* pg_analyze_and_rewrite_params(
Node* parsetree, const char* query_string, ParserSetupHook parserSetup, void* parserSetupArg);
extern PlannedStmt* pg_plan_query(
diff --git a/src/include/threadpool/threadpool.h b/src/include/threadpool/threadpool.h
index efc72b90d..391a0100a 100644
--- a/src/include/threadpool/threadpool.h
+++ b/src/include/threadpool/threadpool.h
@@ -40,7 +40,7 @@
#define IS_THREAD_POOL_SCHEDULER (t_thrd.role == THREADPOOL_SCHEDULER)
#define IS_THREAD_POOL_STREAM (t_thrd.role == THREADPOOL_STREAM)
#define IS_THREAD_POOL_SESSION (u_sess->session_id > 0)
-#define BackendIdForTempRelations (ENABLE_THREAD_POOL ? (BackendId)u_sess->session_ctr_index : t_thrd.proc_cxt.MyBackendId)
+#define BackendIdForTempRelations ((ENABLE_THREAD_POOL && IS_THREAD_POOL_WORKER) ? (BackendId)u_sess->session_ctr_index : t_thrd.proc_cxt.MyBackendId)
#define THREAD_CORE_RATIO 1
#define DEFAULT_THREAD_POOL_SIZE 16
#define DEFAULT_THREAD_POOL_GROUPS 2
diff --git a/src/include/utils/be_module.h b/src/include/utils/be_module.h
index 6585940d4..62a13a0fd 100755
--- a/src/include/utils/be_module.h
+++ b/src/include/utils/be_module.h
@@ -129,6 +129,7 @@ enum ModuleId {
MOD_SEC_SDD, /* Security sub-module: sensitive data discovery */
MOD_SEC_TDE, /* Security sub-module: transparent data encryption */
+ MOD_COMM_FRAMEWORK, /* for basic communication framework */
MOD_COMM_PROXY, /* for cbb comm_proxy */
MOD_COMM_POOLER, /* for pooler communication */
MOD_VACUUM, /* lazy vacuum */
@@ -146,6 +147,7 @@ enum ModuleId {
MOD_DSS, /* dss api module */
MOD_GPI, /* debug info for global partition index */
+ MOD_PARTITION,
/*
* Add your module id above.
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 7aa13b9eb..89a1aac49 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -898,6 +898,7 @@ extern Datum pg_get_viewdef_ext(PG_FUNCTION_ARGS);
extern Datum pg_get_viewdef_wrap(PG_FUNCTION_ARGS);
extern Datum pg_get_viewdef_name(PG_FUNCTION_ARGS);
extern Datum pg_get_viewdef_name_ext(PG_FUNCTION_ARGS);
+extern char* pg_get_viewdef_string(Oid viewid);
extern Datum pg_get_indexdef(PG_FUNCTION_ARGS);
extern Datum pg_get_indexdef_for_dump(PG_FUNCTION_ARGS);
extern Datum pg_get_indexdef_ext(PG_FUNCTION_ARGS);
@@ -905,6 +906,7 @@ extern char* pg_get_indexdef_string(Oid indexrelid);
extern char* pg_get_indexdef_columns(Oid indexrelid, bool pretty);
extern Datum pg_get_triggerdef(PG_FUNCTION_ARGS);
extern Datum pg_get_triggerdef_ext(PG_FUNCTION_ARGS);
+extern char* pg_get_triggerdef_string(Oid trigid);
extern Datum pg_get_constraintdef(PG_FUNCTION_ARGS);
extern Datum pg_get_constraintdef_ext(PG_FUNCTION_ARGS);
extern char* pg_get_constraintdef_string(Oid constraintId);
@@ -918,6 +920,12 @@ extern Datum pg_get_function_identity_arguments(PG_FUNCTION_ARGS);
extern Datum pg_get_function_result(PG_FUNCTION_ARGS);
extern char* deparse_expression(
Node* expr, List* dpcontext, bool forceprefix, bool showimplicit, bool no_alias = false);
+extern void get_query_def(Query* query, StringInfo buf, List* parentnamespace, TupleDesc resultDesc, int prettyFlags,
+ int wrapColumn, int startIndent,
+#ifdef PGXC
+ bool finalise_aggregates, bool sortgroup_colno, void* parserArg = NULL,
+#endif /* PGXC */
+ bool qrw_phase = false, bool viewdef = false, bool is_fqs = false);
extern char* deparse_create_sequence(Node* stmt, bool owned_by_none = false);
extern char* deparse_alter_sequence(Node* stmt, bool owned_by_none = false);
@@ -1679,10 +1687,8 @@ extern Datum text_timestamp(PG_FUNCTION_ARGS);
extern void encryptOBS(char* srcplaintext, char destciphertext[], uint32 destcipherlength);
extern void decryptOBS(
const char* srcciphertext, char destplaintext[], uint32 destplainlength, const char* obskey = NULL);
-extern void encryptECString(char* src_plain_text, char* dest_cipher_text,
- uint32 dest_cipher_length, int mode);
-extern bool decryptECString(const char* src_cipher_text, char* dest_plain_text,
- uint32 dest_plain_length, int mode);
+extern char *encryptECString(char* src_plain_text, int mode);
+extern bool decryptECString(const char* src_cipher_text, char** dest_plain_text, int mode);
extern bool IsECEncryptedString(const char* src_cipher_text);
extern void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray,
int arrayLength, int mode);
@@ -1709,6 +1715,7 @@ extern Datum pg_lsn_in(PG_FUNCTION_ARGS);
/* nlssort.cpp */
extern Datum nlssort(PG_FUNCTION_ARGS);
+extern char *remove_trailing_spaces(const char *src_str);
// template function implementation
//
diff --git a/src/include/utils/dbe_scheduler.h b/src/include/utils/dbe_scheduler.h
index 30659f26a..3b38c2913 100644
--- a/src/include/utils/dbe_scheduler.h
+++ b/src/include/utils/dbe_scheduler.h
@@ -465,3 +465,7 @@ extern void drop_single_job_internal(PG_FUNCTION_ARGS);
* return_date_after IN TIMESTAMP WITH TIME ZONE
*/
extern Datum evaluate_calendar_string_internal(PG_FUNCTION_ARGS);
+extern void prepare_set_attribute(Datum attribute, Datum *name, Datum *value, Datum *extra_name, Datum extra_value);
+extern void set_attribute_with_related_rel(const Datum object_name, const Datum attribute_name,
+ const Datum attribute_value);
+extern char *CreateEventInlineProgram(Datum job_name, Datum job_type, Datum job_action, Datum job_definer);
\ No newline at end of file
diff --git a/src/include/utils/expr_distinct.h b/src/include/utils/expr_distinct.h
index a81633a03..16ff9d105 100644
--- a/src/include/utils/expr_distinct.h
+++ b/src/include/utils/expr_distinct.h
@@ -24,4 +24,58 @@
extern double GetExprNumDistinctRouter(VariableStatData *varData, bool needAdjust, STATS_EST_TYPE eType,
bool isJoinVar);
+extern bool IsFunctionTransferNumDistinct(FuncExpr *funcExpr);
+
+/*
+ * The array collects all of the type-cast functions which can transfer number of distinct from any one of
+ * its arguments, other parameters are viewed as Const.
+ */
+static Oid g_typeCastFuncOids[] = {
+ /* type cast from bool */
+ BOOLTOINT1FUNCOID, BOOLTOINT2FUNCOID, BOOLTOINT4FUNCOID, BOOLTOINT8FUNCOID, BOOLTOTEXTFUNCOID,
+
+ /* type cast from int1 */
+ I1TOI2FUNCOID, I1TOI4FUNCOID, I1TOI8FUNCOID, I1TOF4FUNCOID, I1TOF8FUNCOID, INT1TOBPCHARFUNCOID,
+ INT1TOVARCHARFUNCOID, INT1TONVARCHAR2FUNCOID, INT1TOTEXTFUNCOID, INT1TONUMERICFUNCOID, INT1TOINTERVALFUNCOID,
+
+ /* type cast from int2 */
+ I2TOI1FUNCOID, INT2TOINT4FUNCOID, INT2TOFLOAT4FUNCOID, INT2TOFLOAT8FUNCOID, INT2TOBPCHAR, INT2TOTEXTFUNCOID,
+ INT2TOVARCHARFUNCOID, INT2TOINT8FUNCOID, INT2TONUMERICFUNCOID, INT2TOINTERVALFUNCOID,
+
+ /* type cast from int4 */
+ I4TOI1FUNCOID, INT4TOINT2FUNCOID, INT4TOINT8FUNCOID, INT4TOFLOAT8FUNCOID, INTEGER2CASHFUNCOID,
+ INT4TONUMERICFUNCOID, INT4TOINTERVALFUNCOID, INT4TOHEXFUNCOID, INT4TOBPCHARFUNCOID, INT4TOTEXTFUNCOID,
+ INT4TOVARCHARFUNCOID, INT4TOCHARFUNCOID, INT4TOCHRFUNCOID,
+
+ /* type cast from int8 */
+ I8TOI1FUNCOID, INT8TOINT2FUNCOID, INT8TOINT4FUNCOID, INT8TOBPCHARFUNCOID, INT8TOTEXTFUNCOID, INT8TOVARCHARFUNCOID,
+ INT8TONUMERICFUNCOID, INT8TOHEXFUNCOID,
+
+ /* type cast from float4/float8 */
+ FLOAT4TOBPCHARFUNCOID, FLOAT4TOTEXTFUNCOID, FLOAT4TOVARCHARFUNCOID, FLOAT4TOFLOAT8FUNCOID,
+ FLOAT4TONUMERICFUNCOID, FLOAT8TOBPCHARFUNCOID, FLOAT8TOINTERVALFUNCOID, FLOAT8TOTEXTFUNCOID,
+ FLOAT8TOVARCHARFUNCOID, FLOAT8TONUMERICFUNCOID, FLOAT8TOTIMESTAMPFUNCOID,
+
+ /* type cast from numeric */
+ NUMERICTOBPCHARFUNCOID, NUMERICTOTEXTFUNCOID, NUMERICTOVARCHARFUNCOID,
+
+ /* type cast from timestamp/date/time */
+ DEFAULTFORMATTIMESTAMP2CHARFUNCOID, DEFAULTFORMATTIMESTAMPTZ2CHARFUNCOID, TIMESATMPTOTEXTFUNCOID,
+ TIMESTAMPTOVARCHARFUNCOID, TIMESTAMP2TIMESTAMPTZFUNCOID, TIMESTAMPTZ2TIMESTAMPFUNCOID,
+ DATETIMESTAMPTZFUNCOID, DATETOTIMESTAMPFUNCOID, DATETOBPCHARFUNCOID, DATETOVARCHARFUNCOID, DATETOTEXTFUNCOID,
+ DATEANDTIMETOTIMESTAMPFUNCOID, DTAETIME2TIMESTAMPTZFUNCOID, TIMETOINTERVALFUNCOID, TIMESTAMPZONETOTEXTFUNCOID,
+ TIME2TIMETZFUNCOID, RELTIMETOINTERVALFUNCOID,
+
+ /* type cast from text */
+ TODATEDEFAULTFUNCOID, TODATEFUNCOID, TOTIMESTAMPFUNCOID, TOTIMESTAMPDEFAULTFUNCOID,
+ TEXTTOREGCLASSFUNCOID, TEXTTOINT1FUNCOID, TEXTTOINT2FUNCOID, TEXTTOINT4FUNCOID, TEXTTOINT8FUNCOID,
+ TEXTTONUMERICFUNCOID, TEXTTOTIMESTAMP, TIMESTAMPTONEWTIMEZONEFUNCOID, TIMESTAMPTZTONEWTIMEZONEFUNCOID,
+ HEXTORAWFUNCOID,
+
+ /* type cast from char/varchar/bpchar */
+ VARCHARTONUMERICFUNCOID, VARCHARTOINT4FUNCOID, VARCHARTOINT8FUNCOID, VARCHARTOTIMESTAMPFUNCOID,
+ BPCHARTOINT4FUNCOID, BPCHARTOINT8FUNCOID, BPCHARTONUMERICFUNCOID, BPCHARTOTIMESTAMPFUNCOID,
+ RTRIM1FUNCOID, BPCHARTEXTFUNCOID, CHARTOBPCHARFUNCOID, CHARTOTEXTFUNCOID
+};
+
#endif /* EXPR_DISTINCT_H */
diff --git a/src/include/utils/globalplancore.h b/src/include/utils/globalplancore.h
index 379015e95..eea65c5b9 100644
--- a/src/include/utils/globalplancore.h
+++ b/src/include/utils/globalplancore.h
@@ -102,6 +102,7 @@ typedef struct GPCPlainEnv
int constraint_exclusion; // QUERY_TUNING_OTHER
int qrw_inlist2join_optmode;// QUERY_TUNING_OTHER2
int skew_strategy_store;// QUERY_TUNING_OTHER2
+ unsigned int b_format_behavior_compat_flags;
unsigned int behavior_compat_flags;
unsigned int plsql_compile_behavior_compat_flags;
int datestyle;
diff --git a/src/include/utils/guc_security.h b/src/include/utils/guc_security.h
index 8cd6f2e3f..fa4b32740 100644
--- a/src/include/utils/guc_security.h
+++ b/src/include/utils/guc_security.h
@@ -14,5 +14,6 @@
#define GUC_SECURIT_H
extern void InitSecurityConfigureNames();
+extern const int MAX_PASSWORD_LENGTH;
#endif /* GUC_SECURIT_H */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index ee00b4db6..63056862d 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -102,7 +102,6 @@ typedef struct HASHCTL {
#define HASH_DEALLOC 0x10000 /* Set memory deallocator */
#define HASH_BLOBS 0x20000 /* Select support functions for binary keys */
#define HASH_NOEXCEPT 0x40000 /* Do not throw exception when malloc memory */
-#define HASH_PACKAGE 0x80000 /* Set user defined hash package */
/* max_dsize value to indicate expansible directory */
#define NO_MAX_DSIZE (-1)
diff --git a/src/include/utils/partitionkey.h b/src/include/utils/partitionkey.h
index 0fcad414a..17d070dd0 100644
--- a/src/include/utils/partitionkey.h
+++ b/src/include/utils/partitionkey.h
@@ -113,3 +113,9 @@ extern void GetPartitionOidListForRTE(RangeTblEntry *rte, RangeVar *relation);
} while (0)
#endif
+
+#define constIsNull(x) ((x)->constisnull)
+#define constIsMaxValue(x) ((x)->ismaxvalue)
+
+int ConstCompareWithNull(Const *c1, Const *c2);
+int ListPartKeyCompare(PartitionKey* k1, PartitionKey* k2);
\ No newline at end of file
diff --git a/src/include/utils/partitionmap.h b/src/include/utils/partitionmap.h
index 983527e8d..74148b0ac 100644
--- a/src/include/utils/partitionmap.h
+++ b/src/include/utils/partitionmap.h
@@ -51,13 +51,18 @@ typedef struct PartitionMap {
bool isDirty;
} PartitionMap;
+typedef struct PartitionKey {
+ int count; /* partition key values count */
+ Const **values;
+} PartitionKey;
+
// describe range partition
-#define RANGE_PARTKEYMAXNUM 4
-#define PARTITION_PARTKEYMAXNUM 4
+#define RANGE_PARTKEYMAXNUM 16
+#define PARTITION_PARTKEYMAXNUM 16
#define VALUE_PARTKEYMAXNUM 4
#define INTERVAL_PARTKEYMAXNUM 1
-#define LIST_PARTKEYMAXNUM 1
+#define LIST_PARTKEYMAXNUM 16
#define HASH_PARTKEYMAXNUM 1
@@ -100,7 +105,8 @@ void decre_partmap_refcount(PartitionMap* map);
extern void RelationInitPartitionMap(Relation relation, bool isSubPartition = false);
extern int partOidGetPartSequence(Relation rel, Oid partOid);
-extern Oid getListPartitionOid(PartitionMap* partitionmap, Const** partKeyValue, int* partIndex, bool topClosed);
+extern Oid getListPartitionOid(
+ PartitionMap* partitionmap, Const** partKeyValue, int partKeyCount, int* partIndex, bool topClosed);
extern Oid getHashPartitionOid(PartitionMap* partitionmap, Const** partKeyValue, int* partIndex, bool topClosed);
extern Oid getRangePartitionOid(PartitionMap* partitionmap, Const** partKeyValue, int* partIndex, bool topClosed);
extern Oid GetPartitionOidByParam(PartitionMap* partitionmap, Param *paramArg, ParamExternData *prm);
@@ -119,12 +125,12 @@ extern int2vector* getPartitionKeyAttrNo(
Oid** typeOids, HeapTuple pg_part_tup, TupleDesc tupledsc, TupleDesc rel_tupledsc);
extern void unserializePartitionStringAttribute(Const** outMax, int outMaxLen, Oid* partKeyType, int partKeyTypeLen,
Oid relid, int2vector* partkey, HeapTuple pg_part_tup, int att_num, TupleDesc tupledsc);
-extern void unserializeListPartitionAttribute(int *len, Const*** listValues, Oid* partKeyType, int partKeyTypeLen,
+extern void unserializeListPartitionAttribute(int *len, PartitionKey** listValues, Oid* partKeyType, int partKeyTypeLen,
Oid relid, int2vector* partkey, HeapTuple pg_part_tup, int att_num, TupleDesc tupledsc);
extern void unserializeHashPartitionAttribute(Const** outMax, int outMaxLen,
Oid relid, int2vector* partkey, HeapTuple pg_part_tup, int att_num, TupleDesc tupledsc);
-extern int partitonKeyCompare(Const** value1, Const** value2, int len);
+extern int partitonKeyCompare(Const** value1, Const** value2, int len, bool nullEqual = false);
extern int getPartitionNumber(PartitionMap* map);
extern int GetSubPartitionNumber(Relation rel);
diff --git a/src/include/utils/partitionmap_gs.h b/src/include/utils/partitionmap_gs.h
old mode 100644
new mode 100755
index c1270b568..dc50a1c65
--- a/src/include/utils/partitionmap_gs.h
+++ b/src/include/utils/partitionmap_gs.h
@@ -62,37 +62,30 @@ typedef struct PartitionIdentifier {
Oid partitionId;
} PartitionIdentifier;
-/**
- *partition map is used to find which partition a record is mapping to.
- *and pruning the unused partition when querying.the map has two part:
- *and range part and interval part.
- *range part is a array of RangeElement which is sorted by RangeElement.boundary
- *interval part is array of IntervalElement which is sorted by IntervalElement.sequenceNum
- *binary search is used to routing in range part of map , and in interval part
- *we use (recordValue-lowBoundary_of_interval)/interval to get the sequenceNum of
- *a interval partition
- *
+/*
+ * partition map is used to find which partition a record is mapping to.
+ * and pruning the unused partition when querying.the map has two part:
+ * and range part and interval part.
+ * range part is a array of RangeElement which is sorted by RangeElement
*/
typedef struct RangeElement {
- Oid partitionOid; /*the oid of partition*/
- int len; /*the length of partition key number*/
- Const* boundary[RANGE_PARTKEYMAXNUM]; /*upper bond of partition */
- bool isInterval; /* is interval partition */
+ Oid partitionOid; /* the oid of partition */
+ int partitionno; /* the partitionno of partition */
+ int len; /* the length of partition key number */
+ Const* boundary[RANGE_PARTKEYMAXNUM]; /* upper bond of partition */
+ bool isInterval; /* is interval partition */
} RangeElement;
-typedef struct IntervalElement {
- Oid partitionOid; /* the oid of partition */
- int sequenceNum; /* the logic number of interval partition. */
-} IntervalElement;
-
typedef struct ListPartElement {
Oid partitionOid; /* the oid of partition */
+ int partitionno; /* the partitionno of partition */
int len; /* the length of values */
- Const** boundary; /* list values */
+ PartitionKey* boundary;
} ListPartElement;
typedef struct HashPartElement {
Oid partitionOid; /* the oid of partition */
+ int partitionno; /* the partitionno of partition */
Const* boundary[1]; /* hash bucket */
} HashPartElement;
@@ -118,6 +111,8 @@ typedef struct RangePartitionMap {
bool ValueSatisfyLowBoudary(Const** partKeyValue, RangeElement* partition, Interval* intervalValue, bool topClosed);
extern int2vector* GetPartitionKey(const PartitionMap* partMap);
+extern Const **transformConstIntoPartkeyType(FormData_pg_attribute* attrs, int2vector* partitionKey, Const **boundary,
+ int len);
typedef struct ListPartitionMap {
PartitionMap type;
@@ -217,7 +212,7 @@ typedef struct HashPartitionMap {
} else if ((rel)->partMap->type == PART_TYPE_LIST) { \
(result)->partArea = PART_AREA_LIST; \
(result)->partitionId = \
- getListPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \
+ getListPartitionOid(((rel)->partMap), (keyValue), (valueLen), &((result)->partSeq), topClosed); \
if ((result)->partSeq < 0) { \
(result)->fileExist = false; \
} else { \
@@ -270,25 +265,29 @@ typedef struct HashPartitionMap {
#define partitionRoutingForValueEqual(rel, keyValue, valueLen, topClosed, result) \
do { \
- if ((rel)->partMap->type == PART_TYPE_LIST) { \
- (result)->partArea = PART_AREA_LIST; \
- (result)->partitionId = getListPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \
+ (keyValue) = transformConstIntoPartkeyType(((rel)->rd_att->attrs), GetPartitionKey((rel)->partMap), (keyValue), \
+ (valueLen)); \
+ if ((rel)->partMap->type == PART_TYPE_LIST) { \
+ (result)->partArea = PART_AREA_LIST; \
+ (result)->partitionId = \
+ getListPartitionOid(((rel)->partMap), (keyValue), (valueLen), &((result)->partSeq), topClosed); \
if ((result)->partSeq < 0) { \
(result)->fileExist = false; \
} else { \
(result)->fileExist = true; \
} \
} else if ((rel)->partMap->type == PART_TYPE_HASH) { \
- (result)->partArea = PART_AREA_HASH; \
- (result)->partitionId = getHashPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \
+ (result)->partArea = PART_AREA_HASH; \
+ (result)->partitionId = \
+ getHashPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \
if ((result)->partSeq < 0) { \
(result)->fileExist = false; \
} else { \
(result)->fileExist = true; \
} \
} else { \
- ereport(ERROR, \
- (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Unsupported partition strategy:%d", (rel)->partMap->type))); \
+ ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), \
+ errmsg("Unsupported partition strategy:%d", (rel)->partMap->type))); \
} \
} while (0)
@@ -327,7 +326,9 @@ void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation
Relation *fakeRelation, Partition *partition, LOCKMODE lmode);
-#define searchFakeReationForPartitionOid(fakeRels, cxt, rel, partOid, fakeRelation, partition, lmode) \
+/* search fake relation with partOid. The partitionno is used to retry search. In some cases we don't need partitionno,
+ * such as index search/ddl operation, just input INVALID_PARTITION_NO */
+#define searchFakeReationForPartitionOid(fakeRels, cxt, rel, partOid, partitionno, fakeRelation, partition, lmode) \
do { \
PartRelIdCacheKey _key = {partOid, -1}; \
Relation partParentRel = rel; \
@@ -337,10 +338,17 @@ void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation
} \
if (RelationIsSubPartitioned(rel) && !RelationIsIndex(rel)) { \
Oid parentOid = partid_get_parentid(partOid); \
+ if (!OidIsValid(parentOid)) { \
+ ereport(ERROR, \
+ (errcode(ERRCODE_PARTITION_ERROR), \
+ errmsg("partition %u does not exist on relation \"%s\" when search the fake relation", \
+ partOid, RelationGetRelationName(rel)), \
+ errdetail("this partition may have already been dropped"))); \
+ } \
if (parentOid != rel->rd_id) { \
Partition partForSubPart = NULL; \
- getFakeReationForPartitionOid(&fakeRels, cxt, rel, parentOid, &partRelForSubPart, &partForSubPart, \
- lmode); \
+ getFakeReationForPartitionOid \
+ (&fakeRels, cxt, rel, parentOid, &partRelForSubPart, &partForSubPart, lmode); \
partParentRel = partRelForSubPart; \
} \
} \
@@ -352,7 +360,7 @@ void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation
if (PointerIsValid(fakeRels)) { \
FakeRelationIdCacheLookup(fakeRels, _key, fakeRelation, partition); \
if (!RelationIsValid(fakeRelation)) { \
- partition = partitionOpen(partParentRel, partOid, lmode); \
+ partition = PartitionOpenWithPartitionno(partParentRel, partOid, partitionno, lmode); \
fakeRelation = partitionGetRelation(partParentRel, partition); \
FakeRelationCacheInsert(fakeRels, fakeRelation, partition, -1); \
} \
@@ -367,7 +375,7 @@ void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation
ctl.hcxt = cxt; \
fakeRels = hash_create("fakeRelationCache by OID", FAKERELATIONCACHESIZE, &ctl, \
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); \
- partition = partitionOpen(partParentRel, partOid, lmode); \
+ partition = PartitionOpenWithPartitionno(partParentRel, partOid, partitionno, lmode); \
fakeRelation = partitionGetRelation(partParentRel, partition); \
FakeRelationCacheInsert(fakeRels, fakeRelation, partition, -1); \
} \
@@ -468,8 +476,10 @@ void getFakeReationForPartitionOid(HTAB **fakeRels, MemoryContext cxt, Relation
typedef struct SubPartitionPruningResult {
NodeTag type;
int partSeq;
+ int partitionno;
Bitmapset* bm_selectedSubPartitions;
List* ls_selectedSubPartitions;
+ List* ls_selectedSubPartitionnos;
} SubPartitionPruningResult;
typedef struct PruningResult {
@@ -481,13 +491,13 @@ typedef struct PruningResult {
/*if interval partitions is empty, intervalOffset=-1*/
Bitmapset* intervalSelectedPartitions;
List* ls_rangeSelectedPartitions;
+ List* ls_selectedPartitionnos;
List* ls_selectedSubPartitions;
Param* paramArg;
OpExpr* exprPart;
Expr* expr;
/* This variable applies only to single-partition key range partition tables in PBE mode. */
bool isPbeSinlePartition = false;
- PartitionMap* partMap;
} PruningResult;
extern Oid partIDGetPartOid(Relation relation, PartitionIdentifier* partID);
@@ -519,7 +529,8 @@ extern void DestroyListElements(ListPartElement* src, int elementNum);
extern void PartitionMapDestroyHashArray(HashPartElement* hashArray, int arrLen);
extern void partitionMapDestroyRangeArray(RangeElement* rangeArray, int arrLen);
extern void DestroyPartitionMap(PartitionMap* partMap);
+/* search fake relation with partOid, if no need partitionno, just input 0 */
extern bool trySearchFakeReationForPartitionOid(HTAB** fakeRels, MemoryContext cxt, Relation rel, Oid partOid,
- Relation* fakeRelation, Partition* partition, LOCKMODE lmode, bool checkSubPart = true);
+ int partitionno, Relation* fakeRelation, Partition* partition, LOCKMODE lmode, bool checkSubPart = true);
#endif /* PARTITIONMAP_GS_H_ */
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 9226d2f7d..2e4cd2219 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -277,6 +277,7 @@ typedef struct PMGRAction{
PMGRActionType type;
CachedPlanSource *psrc;
CachedPlan *selected_plan;
+ List *qRelSelec;
bool valid_plan;
PMGRStatCollectType statType;
bool is_shared; /* plansource is shared or not? */
@@ -284,6 +285,8 @@ typedef struct PMGRAction{
bool is_lock;
LWLockMode lockmode;
bool needGenericRoot;
+ PlannerInfo *genericRoot;
+ bool usePartIdx;
uint8 step;
}PMGRAction;
@@ -409,8 +412,15 @@ typedef struct CachedPlanSource {
uint64 sql_patch_sequence; /* should match g_instance.cost_cxt.sql_patch_sequence_id */
PlanManager *planManager;
int gpc_lockid;
- int nextval_default_expr_type;
+ /*
+ * PBE scenario for explain opteval:
+ * The call flow is as follows:
+ * ExplainQuery->ExplainOneQuery->ExplainOneUtility->ExplainExecuteQuery->BuildCachedPlan->pg_plan_queries
+ * so CachedPlanSource needs to add the attribute opteval to pass to pg_plan_queries
+ */
+ bool opteval;
bool hasSubQuery;
+ int nextval_default_expr_type;
} CachedPlanSource;
/*
@@ -455,6 +465,7 @@ typedef struct CachedPlan {
}
CachedPlanInfo *cpi;
bool is_candidate;
+ double cost; /* cost of generic plan, or -1 if not known */
} CachedPlan;
typedef struct CachedPlanInfo {
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index c60381ac3..98ddea6b2 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -406,6 +406,7 @@ typedef struct StdRdOptions {
bool on_commit_delete_rows; /* global temp table */
PageCompressOpts compress; /* page compress related reloptions. */
int check_option_offset; /* for views */
+ Oid collate; /* table's default collation in b format. */
} StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10
@@ -846,5 +847,7 @@ extern void RelationDecrementReferenceCount(Oid relationId);
extern void GetTdeInfoFromRel(Relation rel, TdeInfo *tde_info);
extern char RelationGetRelReplident(Relation r);
extern void SetupPageCompressForRelation(RelFileNode* node, PageCompressOpts* compressOpts, const char* name);
+extern bool IsRelationReplidentKey(Relation r, int attno);
+
#endif /* REL_H */
diff --git a/src/include/utils/rel_gs.h b/src/include/utils/rel_gs.h
index 2a425d7c0..ae7f54507 100644
--- a/src/include/utils/rel_gs.h
+++ b/src/include/utils/rel_gs.h
@@ -655,6 +655,9 @@ extern void PartitionDecrementReferenceCount(Partition part);
PARTTYPE_SUBPARTITIONED_RELATION == (relation)->rd_rel->parttype) && \
(RELKIND_RELATION == (relation)->rd_rel->relkind))
+#define RELATION_IS_INTERVAL_PARTITIONED(relation) \
+ (RELATION_IS_PARTITIONED(relation) && PartitionMapIsInterval((relation)->partMap))
+
#define RELATION_IS_VALUE_PARTITIONED(relation) \
((PARTTYPE_VALUE_PARTITIONED_RELATION == (relation)->rd_rel->parttype) && \
(RELKIND_RELATION == (relation)->rd_rel->relkind))
diff --git a/src/lib/config/cm_config.cpp b/src/lib/config/cm_config.cpp
index f5cdd769b..74fa3a2c7 100644
--- a/src/lib/config/cm_config.cpp
+++ b/src/lib/config/cm_config.cpp
@@ -1453,9 +1453,6 @@ int get_nodename_list_by_AZ(const char* AZName, const char* data_dir, char** nod
if (dn->datanodeId == 0)
continue;
- if (dn->datanodeRole == CASCADE_STANDBY_TYPE)
- continue;
-
for (n = 0; n < CM_MAX_DATANODE_STANDBY_NUM && !get_dn_in_same_shard; n++) {
peerDatanodeInfo* peer_datanode = &(dn->peerDatanodes[n]);
if (strlen(peer_datanode->datanodePeerHAIP[0]) == 0)
diff --git a/src/lib/gstrace/config/commands.in b/src/lib/gstrace/config/commands.in
index 78840d5de..9ad87e028 100644
--- a/src/lib/gstrace/config/commands.in
+++ b/src/lib/gstrace/config/commands.in
@@ -1,6 +1,7 @@
createdb
dropdb
RenameDatabase
+ AlterDatabasePermissionCheck
AlterDatabase
AlterDatabaseSet
AlterDatabaseOwner
diff --git a/src/lib/page_compression/PageCompression.cpp b/src/lib/page_compression/PageCompression.cpp
index ec02fbb7b..620704bd0 100644
--- a/src/lib/page_compression/PageCompression.cpp
+++ b/src/lib/page_compression/PageCompression.cpp
@@ -88,14 +88,8 @@ size_t PageCompression::ReadCompressedBuffer(BlockNumber blockNum, char *buffer,
size_t actualSize = CfsReadCompressedPage(buffer, bufferLen,
blockNum % CFS_LOGIC_BLOCKS_PER_EXTENT, &cfsReadStruct, globalBlockNumber);
/* valid check */
- if (actualSize == COMPRESS_FSEEK_ERROR) {
- return 0;
- } else if (actualSize == COMPRESS_FREAD_ERROR) {
- return 0;
- } else if (actualSize == COMPRESS_CHECKSUM_ERROR) {
- return 0;
- } else if (actualSize == COMPRESS_BLOCK_ERROR) {
- return 0;
+ if (actualSize > MIN_COMPRESS_ERROR_RT) {
+ return actualSize;
}
if (zeroAlign && actualSize != bufferLen) {
@@ -250,15 +244,6 @@ bool PageCompression::WriteBufferToCurrentBlock(char *buf, BlockNumber blkNumber
}
uint16 chkSize = cfsExtentHeader->chunk_size;
- uint32 nchunks = (size - 1) / (int32)chkSize + 1;
- /* fill zero in the last chunk */
- uint64 realSize = nchunks * chkSize;
- if ((uint64)size < realSize) {
- uint64 leftSize = realSize - size;
- errno_t rc = memset_s(buf + (uint32)size, (uint32)leftSize, 0, (uint32)leftSize);
- securec_check(rc, "", "");
- }
- size = realSize;
BlockNumber logicBlockNumber = blkNumber % CFS_LOGIC_BLOCKS_PER_EXTENT;
BlockNumber extentOffset = (blkNumber / CFS_LOGIC_BLOCKS_PER_EXTENT) % CFS_EXTENT_COUNT_PER_FILE;
int needChunks = size / (int32)chkSize;
diff --git a/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh b/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh
index bbbff69af..b55d91d90 100644
--- a/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh
+++ b/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh
@@ -65,7 +65,7 @@ function test_1()
#start logical decoding on standby
echo "begin to decode"
- nohup pg_recvlogical -d $db -p $dn1_standby_port -o include-xids=false -o include-timestamp=true -o skip-empty-xacts=true -o only-local=true -o white-table-list='public.*' -o parallel-decode-num=2 -o parallel-queue-size=256 -o sender_timeout='60s' -o standby-connection=false -o decode-style='j' -S slot1 --start -s 2 -f $scripts_dir/data/test1.log &
+ nohup pg_recvlogical -d $db -p $dn1_standby_port -o include-xids=false -o include-timestamp=true -o skip-empty-xacts=true -o only-local=true -o white-table-list='public.*' -o parallel-decode-num=2 -o parallel-queue-size=256 -o sender-timeout='60s' -o standby-connection=false -o decode-style='j' -S slot1 --start -s 2 -f $scripts_dir/data/test1.log &
if [ $? -eq 0 ]; then
echo "parallel decoding with type \'j\' start on standby success"
else
diff --git a/src/test/regress/data/audit_full_execute.sql b/src/test/regress/data/audit_full_execute.sql
new file mode 100644
index 000000000..639a85a26
--- /dev/null
+++ b/src/test/regress/data/audit_full_execute.sql
@@ -0,0 +1,165 @@
+-- DDL
+-- ddl_database
+DROP DATABASE IF EXISTS db_audit;
+CREATE DATABASE db_audit OWNER user1;
+-- ddl_tablespace
+CREATE TABLESPACE ds_location1 RELATIVE LOCATION 'test_tablespace/test_tablespace_1';
+-- ddl_schema
+DROP SCHEMA IF EXISTS audit;
+CREATE SCHEMA audit;
+DROP TABLE IF EXISTS audit.t_audit;
+CREATE TABLE audit.t_audit (id INTEGER, col1 VARCHAR(20));
+--ddl_user
+DROP USER IF EXISTS user_audit_test CASECADE;
+CREATE USER user_audit_test identified by 'test@2023';
+-- ddl_table
+DROP TABLE IF EXISTS t_audit;
+CREATE TABLE t_audit (id INTEGER, col1 VARCHAR(20), col2 INTEGER, col3 INTEGER);
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..100
+ LOOP
+ execute 'INSERT INTO t_audit VALUES (' || i || ', ''audit'', ' || i+1 || ', ' || i+2 || ');';
+ END LOOP;
+END$$;
+-- ddl_index
+DROP INDEX IF EXISTS index1;
+CREATE UNIQUE INDEX index1 ON audit.t_audit(id);
+-- ddl_view
+DROP VIEW IF EXISTS view1;
+CREATE VIEW view1 AS SELECT * FROM t_audit;
+-- ddl_trigger
+CREATE OR REPLACE FUNCTION audit_trigger_func() RETURNS TRIGGER AS
+ $$
+ DECLARE
+ BEGIN
+ INSERT INTO audit.t_audit VALUES(NEW.id, NEW.col1);
+ RETURN NEW;
+ END
+ $$ LANGUAGE PLPGSQL;
+
+CREATE TRIGGER audit_trigger
+ BEFORE INSERT ON t_audit
+ FOR EACH ROW
+ EXECUTE PROCEDURE audit_trigger_func();
+-- ddl_function
+DROP FUNCTION IF EXISTS func_sql;
+CREATE FUNCTION func_sql(integer, integer) RETURNS integer
+AS 'select $1 + $2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- ddl_resourcepool distributed
+-- ddl_workload distributed
+-- ddl_foreign_data_wrapper opengauss
+-- ddl_serverforhadoop
+create server server_audit foreign data wrapper log_fdw;
+-- ddl_datasource
+DROP DATA SOURCE IF EXISTS ds_audit;
+CREATE DATA SOURCE ds_audit;
+-- ddl_nodegroup distributed
+-- ddl_rowlevelsecurity
+DROP ROW LEVEL SECURITY POLICY IF EXISTS rls_audit ON t_audit;
+CREATE ROW LEVEL SECURITY POLICY rls_audit ON t_audit USING(id = 0);
+-- ddl_synonym
+DROP SYNONYM IF EXISTS s_audit;
+CREATE OR REPLACE SYNONYM s_audit FOR t_audit;
+-- ddl_type
+DROP TYPE IF EXISTS tp_audit;
+CREATE TYPE tp_audit AS (col1 int, col2 text);
+-- ddl_textsearch
+DROP TEXT SEARCH CONFIGURATION IF EXISTS ts_audit;
+CREATE TEXT SEARCH CONFIGURATION ts_audit (parser=ngram) WITH (gram_size = 2, grapsymbol_ignore = false);
+-- ddl_sequence
+DROP SEQUENCE IF EXISTS sq_audit CASCADE;
+CREATE SEQUENCE sq_audit
+START 101
+CACHE 20
+OWNED BY t_audit.id;
+-- ddl_key
+\! gs_ktool -d all
+DROP COLUMN ENCRYPTION KEY IF EXISTS cek1;
+DROP CLIENT MASTER KEY IF EXISTS cmk1;
+\! gs_ktool -g
+CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_GCM);
+CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AES_256_GCM);
+-- ddl_package
+CREATE OR REPLACE PROCEDURE pro_test()
+as
+begin
+ select count(*) from t_audit;
+end;
+/
+CREATE OR REPLACE PACKAGE pkg_audit IS
+var1 int:=1;
+var2 int:=2;
+PROCEDURE pro_test();
+END pkg_audit;
+/
+-- ddl_model
+CREATE MODEL m_audit USING linear_regression
+FEATURES id, col2
+TARGET col3
+FROM t_audit
+WITH learning_rate=0.88, max_iterations=default;
+-- ddl_sql_patch
+select * from dbe_sql_util.drop_sql_patch('patch_audit');
+select * from dbe_sql_util.create_hint_sql_patch('patch_audit', 2578396627, 'indexscan(t_audit)');
+-- audit_policy unified audit
+DROP AUDIT POLICY IF EXISTS pol_audit;
+CREATE AUDIT POLICY pol_audit PRIVILEGES CREATE;
+-- masking_policy unified audit
+DROP RESOURCE LABEL IF EXISTS mask_lb1;
+CREATE RESOURCE LABEL mask_lb1 ADD COLUMN(t_audit.col1);
+DROP MASKING POLICY IF EXISTS msk_audit;
+CREATE MASKING POLICY msk_audit maskall ON LABEL(mask_lb1);
+-- security_policy unified audit
+DROP ROW LEVEL SECURITY POLICY IF EXISTS sec_audit ON t_audit;
+CREATE ROW LEVEL SECURITY POLICY sec_audit ON t_audit USING(id = 1);
+
+-- DML
+-- dml_action
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..100
+ LOOP
+ execute 'INSERT INTO t_audit VALUES (' || i || ', ''audit'', ' || i+1 || ', ' || i+2 || ');';
+ END LOOP;
+END$$;
+-- dml_action_select
+select count(*) from t_audit;
+
+-- 用户锁定和解锁审计 audit_user_locked
+-- lock_user
+ALTER USER user_audit_test ACCOUNT LOCK;
+-- unlock_user
+ALTER USER user_audit_test ACCOUNT UNLOCK;
+
+-- 授权和回收权限审计 audit_grant_revoke
+-- grant_role
+GRANT ALL PRIVILEGES ON TABLE t_audit TO user_audit_test;
+-- revoke_role
+REVOKE INSERT ON TABLE t_audit FROM user_audit_test;
+
+-- 存储过程和自定义函数的执行审计 audit_function_exec
+-- function_exec
+CREATE OR REPLACE FUNCTION func_plpgsql(i integer) RETURNS integer AS $$
+ BEGIN
+ RETURN i + 1;
+ END;
+$$ LANGUAGE plpgsql;
+select func_plpgsql(1);
+
+-- SET审计 audit_set_parameter
+-- set_parameter
+SET datestyle TO postgres,dmy;
+
+-- create audit file, always record
+-- internal_event
+
+--delete user
+DROP DATABASE IF EXISTS db_audit;
+DROP USER IF EXISTS user_audit_test CASCADE;
+DROP COLUMN ENCRYPTION KEY IF EXISTS cek1;
+DROP CLIENT MASTER KEY IF EXISTS cmk1;
+\! gs_ktool -d all
\ No newline at end of file
diff --git a/src/test/regress/data/audit_full_superuser.sql b/src/test/regress/data/audit_full_superuser.sql
new file mode 100644
index 000000000..7e81ca53b
--- /dev/null
+++ b/src/test/regress/data/audit_full_superuser.sql
@@ -0,0 +1,35 @@
+-- audit query
+DROP TABLE IF EXISTS t_audit_type;
+CREATE TABLE t_audit_type(id INTEGER, content text[]);
+INSERT INTO t_audit_type VALUES (1, array['login_success', 'login_failed', 'user_logout', 'system_start',
+ 'system_stop', 'system_recover', 'system_switch', 'lock_user',
+ 'unlock_user', 'grant_role', 'revoke_role', 'user_violation',
+ 'ddl_database', 'ddl_directory', 'ddl_tablespace', 'ddl_schema',
+ 'ddl_user', 'ddl_table', 'ddl_index', 'ddl_view',
+ 'ddl_trigger', 'ddl_function', 'ddl_resourcepool', 'ddl_workload',
+ 'ddl_serverforhadoop', 'ddl_datasource', 'ddl_nodegroup', 'ddl_rowlevelsecurity',
+ 'ddl_synonym', 'ddl_type', 'ddl_textsearch', 'dml_action',
+ 'dml_action_select', 'internal_event', 'function_exec', 'copy_to',
+ 'copy_from', 'set_parameter', 'audit_policy', 'masking_policy',
+ 'security_policy', 'ddl_sequence', 'ddl_key', 'ddl_package',
+ 'ddl_model', 'ddl_globalconfig', 'ddl_publication_subscription', 'ddl_foreign_data_wrapper',
+ 'ddl_sql_patch']);
+INSERT INTO t_audit_type VALUES (2, array['user_violation', 'login_failed']);
+INSERT INTO t_audit_type VALUES (3, array['system_start', 'system_stop', 'system_switch', 'system_recover',
+ 'ddl_directory', 'ddl_globalconfig', 'ddl_foreign_data_wrapper', 'copy_to', 'copy_from',
+ 'ddl_publication_subscription', 'internal_event']);
+
+DROP FUNCTION IF EXISTS func_count_audit;
+CREATE OR REPLACE FUNCTION func_count_audit(anyarray text[], audit_user text, size int)
+RETURNS table (type_audit text, is_audit BOOL)
+AS $$
+DECLARE
+x int;
+BEGIN
+DROP TABLE IF EXISTS t_result;
+CREATE TABLE t_result (type_audit text, is_audit BOOL);
+FORALL x in 1..size
+INSERT INTO t_result SELECT type, (SELECT count(*) > 0) AS is_audit FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE username = audit_user and type = anyarray[x] GROUP BY type;
+RETURN query select * from t_result order by type_audit;
+end;
+$$ language plpgsql;
\ No newline at end of file
diff --git a/src/test/regress/expected/aggregate_B_database.out b/src/test/regress/expected/aggregate_B_database.out
index 845847e0c..470034ac7 100644
--- a/src/test/regress/expected/aggregate_B_database.out
+++ b/src/test/regress/expected/aggregate_B_database.out
@@ -1,15 +1,42 @@
-- test normal db
-create database test;
-\c test
+create database group_concat_test1 dbcompatibility 'A';;
+\c group_concat_test1
CREATE TABLE t(id int, v text);
INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
select id, group_concat(v separator ';') from t group by id order by id asc;
-ERROR: group_concat is supported only in B-format database
-LINE 1: select id, group_concat(v separator ';') from t group by id ...
- ^
+ id | group_concat
+----+---------------------------------
+ 1 | A;C
+ 2 | B;DDDDDDDDDDDDDDDDDDDDDDDDDDDDD
+(2 rows)
+
+create database group_concat_test2 dbcompatibility 'C';;
+\c group_concat_test2
+CREATE TABLE t(id int, v text);
+INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
+select id, group_concat(v separator ';') from t group by id order by id asc;
+ id | group_concat
+----+---------------------------------
+ 1 | A;C
+ 2 | B;DDDDDDDDDDDDDDDDDDDDDDDDDDDDD
+(2 rows)
+
+create database group_concat_test3 dbcompatibility 'PG';;
+\c group_concat_test3
+CREATE TABLE t(id int, v text);
+INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
+select id, group_concat(v separator ';') from t group by id order by id asc;
+ id | group_concat
+----+---------------------------------
+ 1 | A;C
+ 2 | B;DDDDDDDDDDDDDDDDDDDDDDDDDDDDD
+(2 rows)
+
\c regression
-drop database test;
--- test group_concat (compatible with B db)
+drop database group_concat_test1;
+drop database group_concat_test2;
+drop database group_concat_test3;
+-- test group_concat (in B db)
create database test_group_concat_B_db dbcompatibility 'B';
\c test_group_concat_B_db
set group_concat_max_len to 20480;
@@ -474,44 +501,35 @@ LINE 1: SELECT mgrno, ename, job, group_concat(ename,job) OVER(PARTI...
^
-- test for plan changes, dfx
SET explain_perf_mode=pretty;
-EXPLAIN verbose SELECT deptno, group_concat(ename ORDER BY ename SEPARATOR ',') AS employees_order_by_ename_varchar FROM emp GROUP BY deptno;
- QUERY PLAN
-------------------------------------------------------------------------
- GroupAggregate (cost=1.41..1.55 rows=3 width=8203)
- Output: deptno, group_concat(ename ORDER BY ename SEPARATOR ',')
- Group By Key: emp.deptno
- -> Sort (cost=1.41..1.44 rows=14 width=11)
- Output: deptno, ename
- Sort Key: emp.deptno
- -> Seq Scan on public.emp (cost=0.00..1.14 rows=14 width=11)
- Output: deptno, ename
-(8 rows)
+EXPLAIN (costs off) SELECT deptno, group_concat(ename ORDER BY ename SEPARATOR ',') AS employees_order_by_ename_varchar FROM emp GROUP BY deptno;
+ QUERY PLAN
+-----------------------------
+ GroupAggregate
+ Group By Key: deptno
+ -> Sort
+ Sort Key: deptno
+ -> Seq Scan on emp
+(5 rows)
-EXPLAIN verbose SELECT deptno, group_concat(sign ORDER BY email SEPARATOR '##') AS email_order_by_email_text_en FROM emp GROUP BY deptno;
- QUERY PLAN
-------------------------------------------------------------------------
- GroupAggregate (cost=1.41..1.55 rows=3 width=8217)
- Output: deptno, group_concat(sign ORDER BY email SEPARATOR '##')
- Group By Key: emp.deptno
- -> Sort (cost=1.41..1.44 rows=14 width=25)
- Output: deptno, sign, email
- Sort Key: emp.deptno
- -> Seq Scan on public.emp (cost=0.00..1.14 rows=14 width=25)
- Output: deptno, sign, email
-(8 rows)
+EXPLAIN (costs off) SELECT deptno, group_concat(sign ORDER BY email SEPARATOR '##') AS email_order_by_email_text_en FROM emp GROUP BY deptno;
+ QUERY PLAN
+-----------------------------
+ GroupAggregate
+ Group By Key: deptno
+ -> Sort
+ Sort Key: deptno
+ -> Seq Scan on emp
+(5 rows)
-EXPLAIN verbose SELECT deptno, group_concat(VARIADIC ARRAY[ename,':',job] ORDER BY ename) AS bonus_order_by_bonus_numeric FROM emp GROUP BY deptno;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------
- GroupAggregate (cost=1.41..1.58 rows=3 width=8214)
- Output: deptno, group_concat(VARIADIC ARRAY[ename, ':'::character varying, (job)::character varying] ORDER BY ename SEPARATOR ',')
- Group By Key: emp.deptno
- -> Sort (cost=1.41..1.44 rows=14 width=22)
- Output: deptno, ename, job
- Sort Key: emp.deptno
- -> Seq Scan on public.emp (cost=0.00..1.14 rows=14 width=22)
- Output: deptno, ename, job
-(8 rows)
+EXPLAIN (costs off) SELECT deptno, group_concat(VARIADIC ARRAY[ename,':',job] ORDER BY ename) AS bonus_order_by_bonus_numeric FROM emp GROUP BY deptno;
+ QUERY PLAN
+-----------------------------
+ GroupAggregate
+ Group By Key: deptno
+ -> Sort
+ Sort Key: deptno
+ -> Seq Scan on emp
+(5 rows)
-- test for date print format
SET datestyle = 'SQL,DMY';
@@ -692,9 +710,10 @@ select group_concat(BT_COL2,BT_COL3,BT_COL4 order by BT_COL1 separator '') from
(1 row)
\c regression
+clean connection to all force for database test_group_concat_B_db;
drop database test_group_concat_B_db;
-create database t dbcompatibility 'B';
-\c t;
+create database test_group_concat_max_len dbcompatibility 'B';
+\c test_group_concat_max_len;
CREATE TABLE t(id int, v text);
INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
--select into statement
@@ -720,7 +739,7 @@ select id, group_concat(v separator ';') from t group by id order by id asc;
(2 rows)
--alter database XXX set XXX to XXX (current session)
-alter database t set group_concat_max_len to 10;
+alter database test_group_concat_max_len set group_concat_max_len to 10;
show group_concat_max_len;
group_concat_max_len
----------------------
@@ -736,7 +755,7 @@ select id, group_concat(v separator ';') from t group by id order by id asc;
--new session
\c regression
-\c t
+\c test_group_concat_max_len
show group_concat_max_len;
group_concat_max_len
----------------------
@@ -768,7 +787,7 @@ select id, group_concat(v separator ';') from t group by id order by id asc;
--show database value above
\c regression
-\c t
+\c test_group_concat_max_len
show group_concat_max_len;
group_concat_max_len
----------------------
@@ -788,4 +807,5 @@ ERROR: -1 is outside the valid range for parameter "group_concat_max_len" (0 ..
set group_concat_max_len to 9223372036854775808;
ERROR: parameter "group_concat_max_len" requires a numeric value
\c regression
-drop database t;
+clean connection to all force for database test_group_concat_max_len;
+drop database test_group_concat_max_len;
diff --git a/src/test/regress/expected/alter_table_000.out b/src/test/regress/expected/alter_table_000.out
index 17de6b348..73c2a85d6 100644
--- a/src/test/regress/expected/alter_table_000.out
+++ b/src/test/regress/expected/alter_table_000.out
@@ -49,11 +49,17 @@ column_clause
| ENCRYPTION KEY ROTATION
| AUTO_INCREMENT [ = ] value
| ALTER INDEX index_name [ VISBLE | INVISIBLE ]
+ | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]
+ | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ]
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database!
where column_clause can be:
-ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
+ADD [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] [ FIRST | AFTER column_name ]
| MODIFY column_name data_type
| MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
| MODIFY column_name [ CONSTRAINT constraint_name ] NULL
+ | MODIFY [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
+ | CHANGE [ COLUMN ] column_name new_column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
| DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ]
| ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ]
| ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT }
@@ -64,20 +70,23 @@ ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ c
| ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] )
| ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] )
| ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
+NOTICE: 'MODIFY [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'CHANGE [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
where column_constraint can be:
[ CONSTRAINT constraint_name ]
{ NOT NULL |
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
where table_constraint can be:
@@ -98,11 +107,14 @@ where table_constraint_using_index can be:
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ FIRST | AFTER column_name ]' clause is only available in CENTRALIZED mode!
+NOTICE: '[ FIRST | AFTER column_name ]' in 'MODIFY | CHANGE [ COLUMN ] ...' clause is only available in B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET charset ]' is only available in CENTRALIZED mode and B-format database!
NOTICE: '[ VISIBLE | INVISIBLE ]' is only avaliable in CENTRALIZED mode and B-format database!
--custom script
diff --git a/src/test/regress/expected/alter_table_002.out b/src/test/regress/expected/alter_table_002.out
index 8427b3d0c..e2186356e 100644
--- a/src/test/regress/expected/alter_table_002.out
+++ b/src/test/regress/expected/alter_table_002.out
@@ -480,6 +480,27 @@ alter table def_test alter column c2 set default 20;
-- set defaults on a non-existent column: this should fail
alter table def_test alter column c3 set default 30;
ERROR: column "c3" of relation "def_test" does not exist
+-- create rule based on table
+create table t_base (id int);
+create table t_actual (id int);
+insert into t_actual values(2);
+select relname,reloptions,relkind from pg_class where relname='t_base' or relname='t_actual' order by 1;
+ relname | reloptions | relkind
+----------+----------------------------------+---------
+ t_actual | {orientation=row,compression=no} | r
+ t_base | {orientation=row,compression=no} | r
+(2 rows)
+
+CREATE RULE "_RETURN" AS ON SELECT TO t_base DO INSTEAD SELECT * FROM t_actual;
+select relname,reloptions,relkind from pg_class where relname='t_base' or relname='t_actual' order by 1;
+ relname | reloptions | relkind
+----------+----------------------------------+---------
+ t_actual | {orientation=row,compression=no} | r
+ t_base | | v
+(2 rows)
+
+drop table t_actual cascade;
+NOTICE: drop cascades to view t_base
-- set defaults on views: we need to create a view, add a rule
-- to allow insertions into it, and then alter the view to add
-- a default
diff --git a/src/test/regress/expected/alter_table_003.out b/src/test/regress/expected/alter_table_003.out
index e14348710..203f1ef49 100644
--- a/src/test/regress/expected/alter_table_003.out
+++ b/src/test/regress/expected/alter_table_003.out
@@ -963,3 +963,7774 @@ Check constraints:
DROP TABLE alter2.tt8;
DROP SCHEMA alter2;
+create database test_first_after_A dbcompatibility 'A';
+\c test_first_after_A
+-- test add column ... first | after columnname
+-- common scenatios
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f9 | text | | extended | |
+ f6 | clob | | extended | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f7 | blob | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f8 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 1 primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1;
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f7 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 7 | 01000001 | t
+ 1 | b | Wed Nov 09 19:56:10.158564 2022 | 7 | 01000010 | f
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1;
+ f5 | f6 | f1 | f2 | f3 | f4
+----+----+----+----+----+----
+ 5 | 4 | 1 | 2 | 3 | 3
+ 55 | 44 | 11 | 22 | 33 | 33
+(2 rows)
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | not null | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 6 check constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+ f6 | f1 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | 1 | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 1 | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 7 foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+NOTICE: table "t_pri1" does not exist, skipping
+drop table if exists t_pri2 cascade;
+NOTICE: table "t_pri2" does not exist, skipping
+create table t_pri1(f1 text, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+ f4 | f1 | f3
+----+----+----
+ | 1 | t
+ | 2 | f
+(2 rows)
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+ f1 | f4 | f3
+----+----+----
+ 1 | | t
+ 2 | | f
+(2 rows)
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 4
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+-- subpartition table
+drop table if exists range_range cascade;
+NOTICE: table "range_range" does not exist, skipping
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+--------------------------------+-----------+----------+--------------+-------------
+ f1 | integer | default 1 | plain | |
+ id | integer | | plain | |
+ f2 | text | | extended | |
+ gender | character varying | not null | extended | |
+ birthday | timestamp(0) without time zone | not null | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 5
+ p_2 | p | 5
+ p_3 | p | 5
+ range_range | r | 2
+(4 rows)
+
+select * from range_range;
+ f1 | id | f2 | gender | birthday
+----+-----+----+--------+--------------------------
+ 1 | 33 | | boy | Mon Aug 11 00:00:00 2003
+ 1 | 78 | | girl | Tue Jun 24 00:00:00 2014
+ 1 | 15 | | girl | Mon Jan 12 00:00:00 2009
+ 1 | 198 | | boy | Mon Feb 15 00:00:00 2010
+ 1 | 146 | | girl | Tue Mar 08 00:00:00 2005
+ 1 | 111 | | girl | Tue Nov 19 00:00:00 2013
+ 1 | 156 | | boy | Sat May 21 00:00:00 2011
+ 1 | 233 | | girl | Fri Jan 01 00:00:00 2010
+ 1 | 360 | | boy | Mon May 14 00:00:00 2007
+(9 rows)
+
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+NOTICE: table "t_pri" does not exist, skipping
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri_pkey" for table "t_pri"
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique(f3, f4),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f3_f4_key" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f4_f5_f6_f7_key" for table "t1"
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {1,2} | {2,3} | | |
+ t1_f3_f4_key | u | {3,4} | | | |
+ t1_f4_f5_f6_f7_key | u | {4,5} | | | | {6,7}
+ t1_f5_check | c | {5} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 182} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 187 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 185} | (f5 = 10) |
+(4 rows)
+
+alter table t_pri add f4 int first, add f5 int after f2;
+alter table t1 add f8 int primary key first, add f9 int unique after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for table "t1"
+\d+ t_pri
+ Table "public.t_pri"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f5 | integer | | plain | |
+ f3 | integer | not null | plain | |
+Indexes:
+ "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default
+Referenced by:
+ TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+------------+---------+--------+---------+--------+--------+--------------
+ t_pri_pkey | p | {3,5} | | | |
+(1 row)
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f8 | integer | not null | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | character varying(20) | | extended | |
+ f9 | integer | | plain | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f3_f4_key" UNIQUE CONSTRAINT, ubtree (f3, f4) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=USTORE) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Foreign-key constraints:
+ "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {2,3} | {3,5} | | |
+ t1_f3_f4_key | u | {4,6} | | | |
+ t1_f4_f5_f6_f7_key | u | {6,7} | | | | {8,9}
+ t1_f5_check | c | {7} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 182} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 187 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 185} | (f5 = 10) |
+ t1_f9_key | u | {5} | | | |
+ t1_pkey | p | {1} | | | |
+(6 rows)
+
+set enable_default_ustore_table = off;
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique(f3, f4),
+ check(f5 = 10)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f3_f4_key" for table "t1"
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+ indkey | indexprs | indpred
+--------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1 2 | |
+ 3 4 | |
+ 5 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 49}) :location 45 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 60} {VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 65}) :location 63} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 74}) :location 70 :refSynOid 0}) :location 68} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 80 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 78}
+(3 rows)
+
+alter table t1 add f8 int first, add f9 int unique after f1;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f8 | integer | | plain | |
+ f1 | integer | not null | plain | |
+ f9 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f3 | character varying(20) | | extended | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+ "partial_t1_idx" UNIQUE, btree (f5, abs(f6)) TABLESPACE pg_default WHERE (f5 + f6 - abs(f7)) > 0
+ "t1_f3_f4_key" UNIQUE CONSTRAINT, btree (f3, f4) TABLESPACE pg_default
+ "t1_f9_key" UNIQUE CONSTRAINT, btree (f9) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+ indkey | indexprs | indpred
+--------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 2 4 | |
+ 3 | |
+ 5 6 | |
+ 7 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 8 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 8 :location 49}) :location 45 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 60} {VAR :varno 1 :varattno 8 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 8 :location 65}) :location 63} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 9 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 9 :location 74}) :location 70 :refSynOid 0}) :location 68} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 80 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 78}
+(4 rows)
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f1 | 1 | f | f
+ f2 | 2 | f | f
+ f3 | 3 | f | f
+(3 rows)
+
+alter table t1 add f4 int default 4 first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | default 4 | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f4 | 1 | t | f
+ f1 | 2 | f | f
+ f2 | 3 | f | f
+ f3 | 4 | f | f
+(4 rows)
+
+alter table t1 drop f2, add f5 int default 5 after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | default 4 | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | default 5 | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+------------------------------+--------+-----------+--------------
+ f4 | 1 | t | f
+ f1 | 2 | f | f
+ f5 | 3 | t | f
+ ........pg.dropped.4........ | 4 | f | t
+ f3 | 5 | f | f
+(5 rows)
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+-----------+----------
+ 3 | 3 |
+ 4 | (f2 + f3) | s
+(2 rows)
+
+alter table t1 add f5 text default 'aaa' first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+----------+--------------+-------------
+ f5 | text | default 'aaa'::text | extended | |
+ f1 | integer | not null | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | generated always as ((f2 + f3)) stored | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+-------------+----------
+ 1 | 'aaa'::text |
+ 4 | 3 |
+ 5 | (f2 + f3) | s
+(3 rows)
+
+alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+---------------------------------------------+----------+--------------+-------------
+ f5 | text | default 'aaa'::text | extended | |
+ f1 | integer | not null | plain | |
+ f6 | integer | generated always as ((f1 + abs(f3))) stored | plain | |
+ f3 | integer | default 3 | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+----------------+----------
+ 1 | 'aaa'::text |
+ 3 | (f1 + abs(f3)) | s
+ 5 | 3 |
+(3 rows)
+
+-- pg_depend test
+drop table if exists t1 cascade;
+create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+ classid | objsubid | refclassid | refobjsubid | deptype
+---------+----------+------------+-------------+---------
+ 1247 | 0 | 1259 | 0 | i
+ 1259 | 0 | 2615 | 0 | n
+ 1259 | 3 | 1259 | 1 | a
+ 1259 | 3 | 1259 | 2 | a
+ 2604 | 0 | 1259 | 1 | a
+ 2604 | 0 | 1259 | 3 | a
+ 2606 | 0 | 1259 | 2 | a
+(7 rows)
+
+alter table t1 add t1 add f4 int first;
+ERROR: syntax error at or near "f4"
+LINE 1: alter table t1 add t1 add f4 int first;
+ ^
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f1 | integer | default 10 | plain | |
+ f2 | integer | not null | plain | |
+ f3 | integer | generated always as ((f1 + f2)) stored | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f2) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+ classid | objsubid | refclassid | refobjsubid | deptype
+---------+----------+------------+-------------+---------
+ 1247 | 0 | 1259 | 0 | i
+ 1259 | 0 | 2615 | 0 | n
+ 1259 | 3 | 1259 | 1 | a
+ 1259 | 3 | 1259 | 2 | a
+ 2604 | 0 | 1259 | 1 | a
+ 2604 | 0 | 1259 | 3 | a
+ 2606 | 0 | 1259 | 2 | a
+(7 rows)
+
+alter table t1 drop f2, add f6 int, add f7 int generated always as (f1 + f6) stored after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f1 | integer | default 10 | plain | |
+ f7 | integer | generated always as ((f1 + f6)) stored | plain | |
+ f6 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+ classid | objsubid | refclassid | refobjsubid | deptype
+---------+----------+------------+-------------+---------
+ 1247 | 0 | 1259 | 0 | i
+ 1259 | 0 | 2615 | 0 | n
+ 1259 | 2 | 1259 | 1 | a
+ 1259 | 2 | 1259 | 5 | a
+ 2604 | 0 | 1259 | 1 | a
+ 2604 | 0 | 1259 | 2 | a
+(6 rows)
+
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f2 from t1;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+View definition:
+ SELECT *
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+--------------------
+ SELECT * FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f2 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+select * from t1_view2;
+ f1 | f2
+----+----
+ 1 | 2
+ 11 | 22
+(2 rows)
+
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2, t1.f3
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+-------------------------------------
+ SELECT t1.f1, t1.f2, t1.f3 FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f2 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+select * from t1_view2;
+ f1 | f2
+----+----
+ 1 | 2
+ 11 | 22
+(2 rows)
+
+select * from t1;
+ f4 | f1 | f5 | f2 | f3
+----+----+----+----+----
+ | 1 | | 2 | 3
+ | 11 | | 22 | 33
+(2 rows)
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to view t1_view1
+drop cascades to view t1_view2
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+drop trigger if exists f1_trig_update on t1;
+NOTICE: trigger "t1.f1_trig_update" for table "t1" does not exist, skipping
+drop trigger if exists f1_trig_insert on t1;
+NOTICE: trigger "t1.f1_trig_insert" for table "t1" does not exist, skipping
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 80}) :location 76}
+ f1_trig_update | 1 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 86}) :location 82} {VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 97}) :location 93}
+(2 rows)
+
+alter table t1 add f5 int after f1, add f6 boolean first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+--------------------------------+-----------+----------+--------------+-------------
+ f6 | boolean | | plain | |
+ f1 | boolean | not null | plain | |
+ f5 | integer | | plain | |
+ f2 | text | | extended | |
+ f3 | integer | | plain | |
+ f4 | timestamp(0) without time zone | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Triggers:
+ f1_trig_insert AFTER INSERT ON t1 FOR EACH ROW WHEN (NOT new.f1) EXECUTE PROCEDURE dummy_update_func('insert')
+ f1_trig_update AFTER UPDATE OF f1 ON t1 FOR EACH ROW WHEN (NOT old.f1 AND new.f1) EXECUTE PROCEDURE dummy_update_func('update')
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 2 :location 80}) :location 76}
+ f1_trig_update | 2 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 86}) :location 82} {VAR :varno 2 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 2 :location 97}) :location 93}
+(2 rows)
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy;
+NOTICE: role "test_rlspolicy" does not exist, skipping
+create role test_rlspolicy nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy using (f1 < 30);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f1 | f2 | f3
+----+----+--------
+ 1 | 1 | huawei
+ 2 | 2 | huawei
+ 3 | 3 | huawei
+ 4 | 4 | huawei
+ 5 | 5 | huawei
+ 6 | 6 | huawei
+ 7 | 7 | huawei
+ 8 | 8 | huawei
+ 9 | 9 | huawei
+ 0 | 24 | huawei
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 2 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 87} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 92 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 90}
+(2 rows)
+
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------------------------------------+----------+--------------+-------------
+ f5 | integer | generated always as ((f2 + 100)) stored | plain | |
+ f1 | integer | | plain | |
+ f4 | integer | generated always as ((f1 + 100)) stored | plain | |
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f5 | f1 | f4 | f2 | f3
+-----+----+-----+----+--------
+ 101 | 1 | 101 | 1 | huawei
+ 102 | 2 | 102 | 2 | huawei
+ 103 | 3 | 103 | 3 | huawei
+ 104 | 4 | 104 | 4 | huawei
+ 105 | 5 | 105 | 5 | huawei
+ 106 | 6 | 106 | 6 | huawei
+ 107 | 7 | 107 | 7 | huawei
+ 108 | 8 | 108 | 8 | huawei
+ 109 | 9 | 109 | 9 | huawei
+ 124 | 0 | 100 | 24 | huawei
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 4 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 2 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 87} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 92 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 90}
+(2 rows)
+
+drop table if exists t1 cascade;
+\c postgres
+drop database test_first_after_A;
+-- test add column ... first | after columnname in B compatibility
+create database test_first_after_B dbcompatibility 'b';
+\c test_first_after_B
+-- test add column ... first | after columnname in astore table
+-- ASTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f9 | text | | extended | |
+ f6 | clob | | extended | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f7 | blob | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f8 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 1 primary key
+-- 1.1.1 primary key in original table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f9 | text | | extended | |
+ f6 | clob | | extended | |
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f7 | blob | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f8 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 1.1.2 primary key in original table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 1.2.1 primary key in a table without data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+ERROR: multiple primary keys for table "t1" are not allowed
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+----+----+----
+(0 rows)
+
+-- 1.2.2 primary key in a table with data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+ERROR: multiple primary keys for table "t1" are not allowed
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1;
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f7 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+ERROR: column "f7" contains null values
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+--------------------+----------+--------------+-------------
+ f7 | integer | not null default 7 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+ERROR: could not create unique index "t1_pkey"
+DETAIL: Key (f7)=(7) is duplicated.
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f7 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select * from t1;
+ f7 | f2 | f3 | f8 | f4 | f5 | f6
+----+----+---------------------------------+----+----------+----+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 2 unique index
+-- 2.1.1 unique index in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 2.1.2 unique index in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.2.1 unique index in a table without data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+ "t1_f6_key" UNIQUE CONSTRAINT, btree (f6) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 2.2.2 unique index in a table with data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f6_key" UNIQUE CONSTRAINT, btree (f6) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int unique default 6 first;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+alter table t1 drop f1, add f7 float unique default 7 after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f6_key" UNIQUE CONSTRAINT, btree (f6) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, btree (f7) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int unique default 6 first;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+ERROR: could not create unique index "t1_f6_key"
+DETAIL: Key (f6)=(6) is duplicated.
+alter table t1 drop f1;
+-- error
+alter table t1 add f7 float unique default 7 after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+ERROR: could not create unique index "t1_f7_key"
+DETAIL: Key (f7)=(7) is duplicated.
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 3 default and generated column
+-- 3.1.1 default in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | default 1 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 3.1.2 default in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 3.2.1 default in a table without data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f1 | integer | default 1 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 3.2.2 default in a table with data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ 6 | 1 | a | Tue Nov 08 19:56:10.158564 2022 | 7 | 01000001 | t
+ 6 | 2 | b | Wed Nov 09 19:56:10.158564 2022 | 7 | 01000010 | f
+(2 rows)
+
+-- 3.3.1 default in a table without data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 3.3.2 default in a table with data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 7 | 01000001 | t
+ 1 | b | Wed Nov 09 19:56:10.158564 2022 | 7 | 01000010 | f
+(2 rows)
+
+-- 3.4.1 generated column in a table without data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f5 | integer | generated always as ((f2 + f3)) stored | plain | |
+ f6 | integer | generated always as ((f3 * 10)) stored | plain | |
+ f2 | integer | default 2 | plain | |
+ f3 | integer | default 3 | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 3.4.1 generated column in a table with data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+select * from t1;
+ f5 | f6 | f2 | f3
+----+-----+----+----
+ 5 | 30 | 2 | 3
+ 55 | 330 | 22 | 33
+(2 rows)
+
+-- 3.5.1 generated column in a table without data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f2 + f3) stored after f5;
+\d+ t1;
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f5 | integer | generated always as ((f2 + f3)) stored | plain | |
+ f6 | integer | generated always as ((f2 + f3)) stored | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | generated always as ((f1 + f2)) stored | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 3.5.2 generated column in table with data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1;
+ f5 | f6 | f1 | f2 | f3 | f4
+----+----+----+----+----+----
+ 5 | 4 | 1 | 2 | 3 | 3
+ 55 | 44 | 11 | 22 | 33 | 33
+(2 rows)
+
+-- 4 auto_increment
+-- 4.1.1 auto_increment in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 4.1.2 auto_increment in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 4.2.1 auto_increment in a table without data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 4.2.2 auto_increment in a table with data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f6 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f6) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: multiple default values specified for column "f6" of table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: multiple default values specified for column "f6" of table "t1"
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5 NULL and NOT NULL
+-- 5.1.1 null and not null in a table without data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 add f6 text first, add f7 float after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+-- 5.1.2 null and not null in a table with data, add column without constraints
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', ...
+ ^
+alter table t1 add f6 text first, add f7 float after f3;
+ERROR: relation "t1" does not exist
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+-- 5.2.1 null and not null in table without data, add column with null or not null
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 add f6 int null first;
+ERROR: relation "t1" does not exist
+alter table t1 add f7 float not null after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+-- 5.2.2 null and not null in a table with data, add column with null or not null
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f1 | f2 | f3 | f4 | f5
+----+----+----+---------------------------------+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f2 | character varying(20) | not null | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | not null | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | not null | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, drop f2, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 6 check constraint
+-- 6.1.1 check constraint in a table without data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 6.1.2 check constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: new row for relation "t1" violates check constraint "t1_f1_check"
+DETAIL: N/A
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+----+----+----+----
+(0 rows)
+
+-- 6.2.1 check constraint in a table without data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7);
+ERROR: operator does not exist: bigint == integer
+HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 6.2.2 check constraint in a table with data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3;
+ERROR: operator does not exist: bigint == integer
+HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 1 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 6.3.1 check constraint in a table without data, drop check, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | integer | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_check" CHECK ((f7 - f1) > 0)
+ "t1_f1_check" CHECK (f1 = 1)
+ "t1_f1_check1" CHECK (f1 > 0)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 6.3.2 check constraint in a table with data, drop check, add column with with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+ f6 | f1 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | 1 | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 1 | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 7 foreign key
+-- 7.1.1 foreign key constraint in a table without data, add column without constraint
+drop table if exists t_pri1 cascade;
+NOTICE: table "t_pri1" does not exist, skipping
+create table t_pri1(f1 int, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+ERROR: syntax error at or near "f6"
+LINE 1: alter table t1 add f4 int, add f5 text first, f6 float after...
+ ^
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 7.1.2 foreign key constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t1(f1 text, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values('a', 1, true), ('b', 2, false);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+ERROR: syntax error at or near "f6"
+LINE 1: alter table t1 add f4 int, add f5 text first, f6 float after...
+ ^
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ a | 1 | t
+ b | 2 | f
+(2 rows)
+
+-- 7.2.1 foreign key constraint in a table without data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+NOTICE: table "t_pri2" does not exist, skipping
+create table t_pri1(f1 text, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+create table t_pri2(f1 int, f2 int, f4 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 add f4 int references t_pri2(f4) first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f4 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 7.2.2 foreign key constraint in a table with data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 int, f2 bool, f4 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+insert into t_pri2 values(11, true, 1), (22, false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 add f4 int references t_pri2(f4) first;
+select * from t1;
+ f4 | f1 | f2 | f3
+----+----+----+----
+ | 1 | 1 | t
+ | 2 | 2 | f
+(2 rows)
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+select * from t1;
+ f1 | f2 | f4 | f3
+----+----+----+----
+ 1 | 1 | | t
+ 2 | 2 | | f
+(2 rows)
+
+-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+create table t_pri2(f1 int, f2 int, f4 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f4 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+ f4 | f1 | f3
+----+----+----
+ | 1 | t
+ | 2 | f
+(2 rows)
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+ f1 | f4 | f3
+----+----+----
+ 1 | | t
+ 2 | | f
+(2 rows)
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 4
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+-- subpartition table
+drop table if exists range_range cascade;
+NOTICE: table "range_range" does not exist, skipping
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ f1 | integer | default 1 | plain | |
+ id | integer | | plain | |
+ f2 | text | | extended | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 5
+ p_2 | p | 5
+ p_3 | p | 5
+ range_range | r | 2
+(4 rows)
+
+select * from range_range;
+ f1 | id | f2 | gender | birthday
+----+-----+----+--------+------------
+ 1 | 33 | | boy | 08-11-2003
+ 1 | 78 | | girl | 06-24-2014
+ 1 | 15 | | girl | 01-12-2009
+ 1 | 198 | | boy | 02-15-2010
+ 1 | 146 | | girl | 03-08-2005
+ 1 | 111 | | girl | 11-19-2013
+ 1 | 156 | | boy | 05-21-2011
+ 1 | 233 | | girl | 01-01-2010
+ 1 | 360 | | boy | 05-14-2007
+(9 rows)
+
+-- USTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f9 | text | | extended | |
+ f6 | clob | | extended | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f7 | blob | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f8 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 1 primary key
+-- 1.1.1 primary key in original table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f9 | text | | extended | |
+ f6 | clob | | extended | |
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f7 | blob | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f8 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 1.1.2 primary key in original table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+ f9 | f6 | f1 | f2 | f7 | f3 | f10 | f4 | f5 | f8
+----+----+----+----+----+---------------------------------+-----+----------+----+----
+ | | 1 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ | | 2 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 1.2.1 primary key in a table without data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+ERROR: multiple primary keys for table "t1" are not allowed
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+----+----+----
+(0 rows)
+
+-- 1.2.2 primary key in a table with data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+ERROR: multiple primary keys for table "t1" are not allowed
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1;
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f7 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+ERROR: column "f7" contains null values
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+--------------------+----------+--------------+-------------
+ f7 | integer | not null default 7 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+ERROR: could not create unique index "t1_pkey"
+DETAIL: Key (f7)=(7) is duplicated.
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f7 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f8 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | text | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select * from t1;
+ f7 | f2 | f3 | f8 | f4 | f5 | f6
+----+----+---------------------------------+----+----------+----+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t |
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f |
+(2 rows)
+
+-- 2 unique index
+-- 2.1.1 unique index in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 2.1.2 unique index in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.2.1 unique index in a table without data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 2.2.2 unique index in a table with data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 add f6 int unique default 6 first;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+alter table t1 drop f1, add f7 float unique default 7 after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f6_key" UNIQUE CONSTRAINT, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f7_key" UNIQUE CONSTRAINT, ubtree (f7) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int unique default 6 first;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f6_key" for table "t1"
+ERROR: could not create unique index "t1_f6_key"
+DETAIL: Key (f6)=(6) is duplicated.
+alter table t1 drop f1;
+-- error
+alter table t1 add f7 float unique default 7 after f3;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f7_key" for table "t1"
+ERROR: could not create unique index "t1_f7_key"
+DETAIL: Key (f7)=(7) is duplicated.
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 3 default and generated column
+-- 3.1.1 default in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f1 | integer | default 1 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 3.1.2 default in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 3.2.1 default in a table without data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f1 | integer | default 1 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 3.2.2 default in a table with data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ 6 | 1 | a | Tue Nov 08 19:56:10.158564 2022 | 7 | 01000001 | t
+ 6 | 2 | b | Wed Nov 09 19:56:10.158564 2022 | 7 | 01000010 | f
+(2 rows)
+
+-- 3.3.1 default in a table without data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | default 6 | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | default 7 | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 3.3.2 default in a table with data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+ f6 | f2 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 7 | 01000001 | t
+ 1 | b | Wed Nov 09 19:56:10.158564 2022 | 7 | 01000010 | f
+(2 rows)
+
+-- 3.4.1 generated column in a table without data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f5 | integer | generated always as ((f2 + f3)) stored | plain | |
+ f6 | integer | generated always as ((f3 * 10)) stored | plain | |
+ f2 | integer | default 2 | plain | |
+ f3 | integer | default 3 | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 3.4.1 generated column in a table with data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+select * from t1;
+ f5 | f6 | f2 | f3
+----+-----+----+----
+ 5 | 30 | 2 | 3
+ 55 | 330 | 22 | 33
+(2 rows)
+
+-- 3.5.1 generated column in a table without data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+\d+ t1;
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f5 | integer | generated always as ((f2 + f3)) stored | plain | |
+ f6 | integer | generated always as ((f1 + f3)) stored | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | generated always as ((f1 + f2)) stored | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 3.5.2 generated column in table with data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1 cascade;
+ f5 | f6 | f1 | f2 | f3 | f4
+----+----+----+----+----+----
+ 5 | 4 | 1 | 2 | 3 | 3
+ 55 | 44 | 11 | 22 | 33 | 33
+(2 rows)
+
+-- 4 auto_increment
+-- 4.1.1 auto_increment in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 4.1.2 auto_increment in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+---------------------------------+----+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 4.2.1 auto_increment in a table without data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 4.2.2 auto_increment in a table with data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f7_seq" for serial column "t1.f7"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f6 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f6) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: multiple default values specified for column "f6" of table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f6_seq" for serial column "t1.f6"
+ERROR: multiple default values specified for column "f6" of table "t1"
+select * from t1;
+ f2 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5 NULL and NOT NULL
+-- 5.1.1 null and not null in a table without data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 add f6 text first, add f7 float after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+-- 5.1.2 null and not null in a table with data, add column without constraints
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', ...
+ ^
+alter table t1 add f6 text first, add f7 float after f3;
+ERROR: relation "t1" does not exist
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+-- 5.2.1 null and not null in table without data, add column with null or not null
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 add f6 int null first, add f7 float not null after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+-- 5.2.2 null and not null in a table with data, add column with null or not null
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f1 | f2 | f3 | f4 | f5
+----+----+----+---------------------------------+----------+----
+ | 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f2 | character varying(20) | not null | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | not null | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | not null | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, drop f2, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+ERROR: column "f7" contains null values
+select * from t1;
+ f6 | f3 | f4 | f5
+----+---------------------------------+----------+----
+ | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 6 check constraint
+-- 6.1.1 check constraint in a table without data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | double precision | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 6.1.2 check constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: new row for relation "t1" violates check constraint "t1_f1_check"
+DETAIL: N/A
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+ f6 | f1 | f2 | f3 | f7 | f4 | f5
+----+----+----+----+----+----+----
+(0 rows)
+
+-- 6.2.1 check constraint in a table without data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7);
+ERROR: operator does not exist: bigint == integer
+HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 6.2.2 check constraint in a table with data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3;
+ERROR: operator does not exist: bigint == integer
+HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 1 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+-- 6.3.1 check constraint in a table without data, drop check, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f6 | text | | extended | |
+ f1 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f7 | integer | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_check" CHECK ((f7 - f1) > 0)
+ "t1_f1_check" CHECK (f1 = 1)
+ "t1_f1_check1" CHECK (f1 > 0)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no, toast.storage_type=USTORE
+
+-- 6.3.2 check constraint in a table with data, drop check, add column with with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+ f6 | f1 | f3 | f7 | f4 | f5
+----+----+---------------------------------+----+----------+----
+ | 1 | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t
+ | 1 | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f
+(2 rows)
+
+-- 7 foreign key
+-- 7.1.1 foreign key constraint in a table without data, add column without constraint
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+ERROR: syntax error at or near "f6"
+LINE 1: alter table t1 add f4 int, add f5 text first, f6 float after...
+ ^
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 7.1.2 foreign key constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t1(f1 text, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values('a', 1, true), ('b', 2, false);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+ERROR: syntax error at or near "f6"
+LINE 1: alter table t1 add f4 int, add f5 text first, f6 float after...
+ ^
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ a | 1 | t
+ b | 2 | f
+(2 rows)
+
+-- 7.2.1 foreign key constraint in a table without data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+create table t_pri2(f1 int, f2 int, f4 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 add f4 int references t_pri2(f4) first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f4 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 7.2.2 foreign key constraint in a table with data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 int, f2 bool, f4 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+insert into t_pri2 values(11, true, 1), (22, false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 add f4 int references t_pri2(f4) first;
+select * from t1;
+ f4 | f1 | f2 | f3
+----+----+----+----
+ | 1 | 1 | t
+ | 2 | 2 | f
+(2 rows)
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+select * from t1;
+ f1 | f2 | f4 | f3
+----+----+----+----
+ 1 | 1 | | t
+ 2 | 2 | | f
+(2 rows)
+
+-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 int, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+create table t_pri2(f1 int, f2 int, f4 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f4 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f4_fkey" FOREIGN KEY (f4) REFERENCES t_pri2(f4)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key) with (storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri2_pkey" for table "t_pri2"
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+ f4 | f1 | f3
+----+----+----
+ | 1 | t
+ | 2 | f
+(2 rows)
+
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+ f1 | f4 | f3
+----+----+----
+ 1 | | t
+ 2 | | f
+(2 rows)
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with (storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 4
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with (storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ f1 | integer | default 1 | plain | |
+ id | integer | | plain | |
+ f2 | text | | extended | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 5
+ p_2 | p | 5
+ p_3 | p | 5
+ range_range | r | 2
+(4 rows)
+
+select * from range_range;
+ f1 | id | f2 | gender | birthday
+----+-----+----+--------+------------
+ 1 | 33 | | boy | 08-11-2003
+ 1 | 78 | | girl | 06-24-2014
+ 1 | 15 | | girl | 01-12-2009
+ 1 | 198 | | boy | 02-15-2010
+ 1 | 146 | | girl | 03-08-2005
+ 1 | 111 | | girl | 11-19-2013
+ 1 | 156 | | boy | 05-21-2011
+ 1 | 233 | | girl | 01-01-2010
+ 1 | 360 | | boy | 05-14-2007
+(9 rows)
+
+-- orientation = column not support
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (orientation = column);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text first;
+ERROR: Un-supported feature
+DETAIL: column orientated table is not supported for add column first|after columnName
+-- error
+alter table t1 add f6 text after f1;
+ERROR: Un-supported feature
+DETAIL: column orientated table is not supported for add column first|after columnName
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+NOTICE: table "t_pri" does not exist, skipping
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri_pkey" for table "t_pri"
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_lower_abs_key" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f4_f5_f6_f7_key" for table "t1"
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {1,2} | {2,3} | | |
+ t1_f4_f5_f6_f7_key | u | {4,5} | | | | {6,7}
+ t1_f5_check | c | {5} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 198} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 203 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 201} | (f5 = 10) |
+ t1_lower_abs_key | u | {0,0} | | | |
+(4 rows)
+
+alter table t_pri add f4 int first, add f5 int after f2;
+alter table t1 add f8 int primary key first, add f9 int unique after f3;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for table "t1"
+\d+ t_pri
+ Table "public.t_pri"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f5 | integer | | plain | |
+ f3 | integer | not null | plain | |
+Indexes:
+ "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default
+Referenced by:
+ TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+------------+---------+--------+---------+--------+--------+--------------
+ t_pri_pkey | p | {3,5} | | | |
+(1 row)
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f8 | integer | not null | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | character varying(20) | | extended | |
+ f9 | integer | | plain | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f8) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_f9_key" UNIQUE CONSTRAINT, ubtree (f9) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=USTORE) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Foreign-key constraints:
+ "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {2,3} | {3,5} | | |
+ t1_f4_f5_f6_f7_key | u | {6,7} | | | | {8,9}
+ t1_f5_check | c | {7} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 198} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 203 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 201} | (f5 = 10) |
+ t1_f9_key | u | {5} | | | |
+ t1_lower_abs_key | u | {0,0} | | | |
+ t1_pkey | p | {1} | | | |
+(6 rows)
+
+set enable_default_ustore_table = off;
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_lower_abs_key" for table "t1"
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+ indkey | indexprs | indpred
+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 0 0 | ({FUNCEXPR :funcid 870 :funcresulttype 25 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 100 :inputcollid 100 :args ({RELABELTYPE :arg {VAR :varno 1 :varattno 3 :vartype 1043 :vartypmod 24 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 3 :location 141} :resulttype 25 :resulttypmod -1 :resultcollid 100 :relabelformat 2 :location -1}) :location 135 :refSynOid 0} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 4 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 152}) :location 148 :refSynOid 0}) |
+ 1 2 | |
+ 5 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 49}) :location 45 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 60} {VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 65}) :location 63} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 74}) :location 70 :refSynOid 0}) :location 68} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 80 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 78}
+(3 rows)
+
+alter table t1 add f8 int first, add f9 int unique after f1;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "t1_f9_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f8 | integer | | plain | |
+ f1 | integer | not null | plain | |
+ f9 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f3 | character varying(20) | | extended | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+ "partial_t1_idx" UNIQUE, btree (f5, abs(f6)) TABLESPACE pg_default WHERE (f5 + f6 - abs(f7)) > 0
+ "t1_f9_key" UNIQUE CONSTRAINT, btree (f9) TABLESPACE pg_default
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, btree (lower(f3::text), abs(f4)) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+ indkey | indexprs | indpred
+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 0 0 | ({FUNCEXPR :funcid 870 :funcresulttype 25 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 100 :inputcollid 100 :args ({RELABELTYPE :arg {VAR :varno 1 :varattno 5 :vartype 1043 :vartypmod 24 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 141} :resulttype 25 :resulttypmod -1 :resultcollid 100 :relabelformat 2 :location -1}) :location 135 :refSynOid 0} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 152}) :location 148 :refSynOid 0}) |
+ 2 4 | |
+ 3 | |
+ 7 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 8 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 8 :location 49}) :location 45 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 60} {VAR :varno 1 :varattno 8 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 8 :location 65}) :location 63} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 9 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 9 :location 74}) :location 70 :refSynOid 0}) :location 68} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 80 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 78}
+(4 rows)
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f1 | 1 | f | f
+ f2 | 2 | f | f
+ f3 | 3 | f | f
+(3 rows)
+
+alter table t1 add f4 int default 4 first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | default 4 | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f4 | 1 | t | f
+ f1 | 2 | f | f
+ f2 | 3 | f | f
+ f3 | 4 | f | f
+(4 rows)
+
+alter table t1 drop f2, add f5 int default 5 after f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | default 4 | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | default 5 | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+------------------------------+--------+-----------+--------------
+ f4 | 1 | t | f
+ f1 | 2 | f | f
+ f5 | 3 | t | f
+ ........pg.dropped.4........ | 4 | f | t
+ f3 | 5 | f | f
+(5 rows)
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+----------------+----------
+ 1 | AUTO_INCREMENT |
+ 3 | 3 |
+ 4 | (f2 + f3) | s
+(3 rows)
+
+alter table t1 add f5 text default 'aaa' first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+----------+--------------+-------------
+ f5 | text | default 'aaa'::text | extended | |
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | generated always as ((f2 + f3)) stored | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+----------------+----------
+ 1 | 'aaa'::text |
+ 2 | AUTO_INCREMENT |
+ 4 | 3 |
+ 5 | (f2 + f3) | s
+(4 rows)
+
+alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; -- ERROR
+ERROR: generated column cannot refer to auto_increment column
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f2 from t1;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+View definition:
+ SELECT *
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+--------------------
+ SELECT * FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f2 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+select * from t1_view2;
+ f1 | f2
+----+----
+ 1 | 2
+ 11 | 22
+(2 rows)
+
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2, t1.f3
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+-------------------------------------
+ SELECT t1.f1, t1.f2, t1.f3 FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f2 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+select * from t1_view2;
+ f1 | f2
+----+----
+ 1 | 2
+ 11 | 22
+(2 rows)
+
+select * from t1;
+ f4 | f1 | f5 | f2 | f3
+----+----+----+----+----
+ | 1 | | 2 | 3
+ | 11 | | 22 | 33
+(2 rows)
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to view t1_view1
+drop cascades to view t1_view2
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+drop trigger if exists f1_trig_update on t1;
+NOTICE: trigger "t1.f1_trig_update" for table "t1" does not exist, skipping
+drop trigger if exists f1_trig_insert on t1;
+NOTICE: trigger "t1.f1_trig_insert" for table "t1" does not exist, skipping
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 80}) :location 76}
+ f1_trig_update | 1 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 86}) :location 82} {VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 97}) :location 93}
+(2 rows)
+
+alter table t1 add f5 int after f1, add f6 boolean first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f6 | boolean | | plain | |
+ f1 | boolean | not null | plain | |
+ f5 | integer | | plain | |
+ f2 | text | | extended | |
+ f3 | integer | | plain | |
+ f4 | date | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Triggers:
+ f1_trig_insert AFTER INSERT ON t1 FOR EACH ROW WHEN (NOT new.f1) EXECUTE PROCEDURE dummy_update_func('insert')
+ f1_trig_update AFTER UPDATE OF f1 ON t1 FOR EACH ROW WHEN (NOT old.f1 AND new.f1) EXECUTE PROCEDURE dummy_update_func('update')
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 2 :location 80}) :location 76}
+ f1_trig_update | 2 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 86}) :location 82} {VAR :varno 2 :varattno 2 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 2 :location 97}) :location 93}
+(2 rows)
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy2;
+NOTICE: role "test_rlspolicy2" does not exist, skipping
+create role test_rlspolicy2 nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy2 using (f1 < 30);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy2
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f1 | f2 | f3
+----+----+--------
+ 1 | 1 | huawei
+ 2 | 2 | huawei
+ 3 | 3 | huawei
+ 4 | 4 | huawei
+ 5 | 5 | huawei
+ 6 | 6 | huawei
+ 7 | 7 | huawei
+ 8 | 8 | huawei
+ 9 | 9 | huawei
+ 0 | 24 | huawei
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 2 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 88} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 93 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 91}
+(2 rows)
+
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------------------------------------+----------+--------------+-------------
+ f5 | integer | generated always as ((f2 + 100)) stored | plain | |
+ f1 | integer | | plain | |
+ f4 | integer | generated always as ((f1 + 100)) stored | plain | |
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy2
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f5 | f1 | f4 | f2 | f3
+-----+----+-----+----+--------
+ 101 | 1 | 101 | 1 | huawei
+ 102 | 2 | 102 | 2 | huawei
+ 103 | 3 | 103 | 3 | huawei
+ 104 | 4 | 104 | 4 | huawei
+ 105 | 5 | 105 | 5 | huawei
+ 106 | 6 | 106 | 6 | huawei
+ 107 | 7 | 107 | 7 | huawei
+ 108 | 8 | 108 | 8 | huawei
+ 109 | 9 | 109 | 9 | huawei
+ 124 | 0 | 100 | 24 | huawei
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 4 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 2 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 88} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 93 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 91}
+(2 rows)
+
+-- expression test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text);
+insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei');
+-- T_FuncExpr
+create index t1_idx1 on t1(abs(f1), f2);
+-- T_OpExpr
+create index t1_idx2 on t1((f1 + f2), (f1 - f3));
+-- T_BooleanTest
+create index t1_idx3 on t1((f4 is true));
+-- T_CaseExpr and T_CaseWhen
+create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end));
+-- T_ArrayExpr
+create index t1_idx5 on t1((array[f1, f2, f3]));
+-- T_TypeCast
+create index t1_idx6 on t1(((f1 + f2 + 1) :: text));
+-- T_BoolExpr
+create index t1_idx7 on t1((f1 and f2), (f2 or f3));
+-- T_ArrayRef
+create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1]));
+-- T_ScalarArrayOpExpr
+create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10])));
+-- T_RowCompareExpr
+create index t1_idx10 on t1((row(f1, f5) < row(f2, f6)));
+-- T_MinMaxExpr
+create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3));
+-- T_RowExpr
+drop table if exists mytable cascade;
+NOTICE: table "mytable" does not exist, skipping
+create table mytable(f1 int, f2 int, f3 text);
+create function getf1(mytable) returns int as 'select $1.f1' language sql;
+create index t1_idx12 on t1(getf1(row(f1, 2, 'a')));
+-- T_CoalesceExpr
+create index t1_idx13 on t1(nvl(f1, f2));
+-- T_NullTest
+create index t1_idx14 on t1((f1 is null));
+-- T_ScalarArrayOpExpr
+create index t1_idx16 on t1((f1 in (1,2,3)));
+-- T_NullIfExpr
+create index t1_idx17 on t1(nullif(f5,f6));
+-- T_RelabelType
+alter table t1 add f7 oid;
+create index t1_idx18 on t1((f7::int4));
+-- T_CoerceViaIO
+alter table t1 add f8 json;
+create index t1_idx19 on t1((f8::jsonb));
+-- T_ArrayCoerceExpr
+alter table t1 add f9 float[];
+create index t1_idx20 on t1((f9::int[]));
+-- T_PrefixKey
+create index t1_idx21 on t1(f6(5));
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+--------------------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f4 | boolean | | plain | |
+ f5 | text | | extended | |
+ f6 | text | | extended | |
+ f7 | oid | | plain | |
+ f8 | json | | extended | |
+ f9 | double precision[] | | extended | |
+Indexes:
+ "t1_idx1" btree (abs(f1), f2) TABLESPACE pg_default
+ "t1_idx10" btree (((ROW(f1, f5) < ROW(f2, f6)))) TABLESPACE pg_default
+ "t1_idx11" btree ((GREATEST(f1, f2, f3)), (LEAST(f1, f2, f3))) TABLESPACE pg_default
+ "t1_idx12" btree (getf1(ROW(f1, 2, 'a'::text))) TABLESPACE pg_default
+ "t1_idx13" btree ((COALESCE(f1, f2))) TABLESPACE pg_default
+ "t1_idx14" btree ((f1 IS NULL)) TABLESPACE pg_default
+ "t1_idx16" btree ((f1 = ANY (ARRAY[1, 2, 3]))) TABLESPACE pg_default
+ "t1_idx17" btree ((NULLIF(f5, f6))) TABLESPACE pg_default
+ "t1_idx18" btree ((f7::integer)) TABLESPACE pg_default
+ "t1_idx19" btree ((f8::jsonb)) TABLESPACE pg_default
+ "t1_idx2" btree ((f1 + f2), (f1 - f3)) TABLESPACE pg_default
+ "t1_idx20" btree ((f9::integer[])) TABLESPACE pg_default
+ "t1_idx21" btree (f6(5)) TABLESPACE pg_default
+ "t1_idx3" btree ((f4 IS TRUE)) TABLESPACE pg_default
+ "t1_idx4" btree ((
+CASE f1
+ WHEN f2 THEN 'yes'::text
+ WHEN f3 THEN 'no'::text
+ ELSE 'unknow'::text
+END)) TABLESPACE pg_default
+ "t1_idx5" btree ((ARRAY[f1, f2, f3])) TABLESPACE pg_default
+ "t1_idx6" btree (((f1 + f2 + 1)::text)) TABLESPACE pg_default
+ "t1_idx7" btree ((f1 AND f2), (f2 OR f3)) TABLESPACE pg_default
+ "t1_idx8" btree ((f1 = (ARRAY[f1, f2, 3])[1])) TABLESPACE pg_default
+ "t1_idx9" btree ((f1 = ANY (ARRAY[f2, 1, f1 + 10]))) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3 | f4 | f5 | f6 | f7 | f8 | f9
+----+----+----+----+--------+--------+----+----+----
+ 1 | 2 | 3 | t | nanjin | huawei | | |
+(1 row)
+
+alter table t1 add f10 int primary key auto_increment after f4,
+ add f11 int generated always as (f1 + f2) stored after f1,
+ add f12 date default '2023-01-05' first,
+ add f13 int not null default 13 first;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f10_seq" for serial column "t1.f10"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+--------------------+----------------------------------------+----------+--------------+-------------
+ f13 | integer | not null default 13 | plain | |
+ f12 | date | default '01-05-2023'::date | plain | |
+ f1 | integer | | plain | |
+ f11 | integer | generated always as ((f1 + f2)) stored | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f4 | boolean | | plain | |
+ f10 | integer | not null AUTO_INCREMENT | plain | |
+ f5 | text | | extended | |
+ f6 | text | | extended | |
+ f7 | oid | | plain | |
+ f8 | json | | extended | |
+ f9 | double precision[] | | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f10) TABLESPACE pg_default
+ "t1_idx1" btree (abs(f1), f2) TABLESPACE pg_default
+ "t1_idx10" btree (((ROW(f1, f5) < ROW(f2, f6)))) TABLESPACE pg_default
+ "t1_idx11" btree ((GREATEST(f1, f2, f3)), (LEAST(f1, f2, f3))) TABLESPACE pg_default
+ "t1_idx12" btree (getf1(ROW(f1, 2, 'a'::text))) TABLESPACE pg_default
+ "t1_idx13" btree ((COALESCE(f1, f2))) TABLESPACE pg_default
+ "t1_idx14" btree ((f1 IS NULL)) TABLESPACE pg_default
+ "t1_idx16" btree ((f1 = ANY (ARRAY[1, 2, 3]))) TABLESPACE pg_default
+ "t1_idx17" btree ((NULLIF(f5, f6))) TABLESPACE pg_default
+ "t1_idx18" btree ((f7::integer)) TABLESPACE pg_default
+ "t1_idx19" btree ((f8::jsonb)) TABLESPACE pg_default
+ "t1_idx2" btree ((f1 + f2), (f1 - f3)) TABLESPACE pg_default
+ "t1_idx20" btree ((f9::integer[])) TABLESPACE pg_default
+ "t1_idx21" btree (f6(5)) TABLESPACE pg_default
+ "t1_idx3" btree ((f4 IS TRUE)) TABLESPACE pg_default
+ "t1_idx4" btree ((
+CASE f1
+ WHEN f2 THEN 'yes'::text
+ WHEN f3 THEN 'no'::text
+ ELSE 'unknow'::text
+END)) TABLESPACE pg_default
+ "t1_idx5" btree ((ARRAY[f1, f2, f3])) TABLESPACE pg_default
+ "t1_idx6" btree (((f1 + f2 + 1)::text)) TABLESPACE pg_default
+ "t1_idx7" btree ((f1 AND f2), (f2 OR f3)) TABLESPACE pg_default
+ "t1_idx8" btree ((f1 = (ARRAY[f1, f2, 3])[1])) TABLESPACE pg_default
+ "t1_idx9" btree ((f1 = ANY (ARRAY[f2, 1, f1 + 10]))) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f13 | f12 | f1 | f11 | f2 | f3 | f4 | f10 | f5 | f6 | f7 | f8 | f9
+-----+------------+----+-----+----+----+----+-----+--------+--------+----+----+----
+ 13 | 01-05-2023 | 1 | 3 | 2 | 3 | t | 1 | nanjin | huawei | | |
+(1 row)
+
+-- test modify column ... first | after column in astore table
+-- ASTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f5 | f4 | f1
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | a | t | 01000001 | 1
+ Wed Nov 09 19:56:10.158564 2022 | b | f | 01000010 | 2
+(2 rows)
+
+alter table t1 modify
+-- 1 primary key
+drop table if exists t1 cascade;
+ERROR: syntax error at or near "table"
+LINE 3: drop table if exists t1 cascade;
+ ^
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+ERROR: relation "t1" already exists in schema "public"
+DETAIL: creating new table with existing name in the same schema
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1,2,3),(11,22,33);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1
+----+----+----+----
+ 3 | 2 | 3 | 1
+ 33 | 22 | 33 | 11
+(2 rows)
+
+alter table t1 drop f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4
+----+----+----
+ 3 | 2 | 3
+ 33 | 22 | 33
+(2 rows)
+
+-- 4 auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3);
+select f1 from t1;
+ f1
+----
+ 1
+ 2
+ 3
+(3 rows)
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 modify f1 int after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 modify f2 varchar(20) after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', ...
+ ^
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 modify f1 int after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 drop f1, modify f5 bool first;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 modify f2 varchar(20) after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+-- 6 check constraint
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: new row for relation "t1" violates check constraint "t1_f1_check"
+DETAIL: N/A
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+----+----+----+----+----
+(0 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+----+----+----+----+----
+(0 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+----+----+----
+(0 rows)
+
+-- 7 foreign key
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f2 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+ f2 | integer | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t_pri1 cascade;
+NOTICE: drop cascades to constraint t1_f2_fkey on table t1
+create table t_pri1(f1 int, f2 int primary key);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values(1,1),(2,2);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f2 | f1 | f3
+----+----+----
+ 1 | 1 | t
+ 2 | 2 | f
+(2 rows)
+
+alter table t1 modify f2 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+ f2 | integer | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f3 | f2
+----+----+----
+ 1 | t | 1
+ 2 | f | 2
+(2 rows)
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int, primary key (f1, f2))
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+-- modify operation before add
+alter table t1 add f4 int after f2, modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int, primary key (f1, f2))
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | not null | plain | |
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f1 | f2 | f3
+----+----+----
+ 9 | -1 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f1 | f2 | f3
+----+----+----
+ 19 | -1 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f1 | f2 | f3
+----+----+----
+ 29 | -1 | 3
+(1 row)
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f3 | f1
+----+----+----
+ -1 | 1 | 9
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f3 | f1
+----+----+----
+ -1 | 2 | 19
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f3 | f1
+----+----+----
+ -1 | 3 | 29
+(1 row)
+
+alter table t1 modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+ f3 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f1 | f3
+----+----+----
+ -1 | 9 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f1 | f3
+----+----+----
+ -1 | 19 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f1 | f3
+----+----+----
+ -1 | 29 | 3
+(1 row)
+
+alter table t1 add f4 int after f2, modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | not null | plain | |
+ f3 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 9 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 19 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 29 | 3
+(1 row)
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday))
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ birthday | date | not null | plain | |
+ gender | character varying | not null | extended | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id, birthday) LOCAL TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 1
+ p_2 | p | 1
+ p_3 | p | 1
+ range_range | r | 3
+(4 rows)
+
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday))
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ birthday | date | not null | plain | |
+ gender | character varying | not null | extended | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id, birthday) LOCAL TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 1
+ p_2 | p | 1
+ p_3 | p | 1
+ range_range | r | 3
+(4 rows)
+
+select * from range_range;
+ birthday | gender | id
+------------+--------+-----
+ 08-11-2003 | boy | 33
+ 06-24-2014 | girl | 78
+ 01-12-2009 | girl | 15
+ 02-15-2010 | boy | 198
+ 03-08-2005 | girl | 146
+ 11-19-2013 | girl | 111
+ 05-21-2011 | boy | 156
+ 01-01-2010 | girl | 233
+ 05-14-2007 | boy | 360
+(9 rows)
+
+-- USTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f5 | f4 | f1
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | a | t | 01000001 | 1
+ Wed Nov 09 19:56:10.158564 2022 | b | f | 01000010 | 2
+(2 rows)
+
+alter table t1 modify
+-- 1 primary key
+drop table if exists t1 cascade;
+ERROR: syntax error at or near "table"
+LINE 3: drop table if exists t1 cascade;
+ ^
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+ERROR: relation "t1" already exists in schema "public"
+DETAIL: creating new table with existing name in the same schema
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f5 | boolean | | plain | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f1_key" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_f1_key" UNIQUE CONSTRAINT, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+---------------------------------+----+----+----------+----
+ Tue Nov 08 19:56:10.158564 2022 | 1 | a | 01000001 | t
+ Wed Nov 09 19:56:10.158564 2022 | 2 | b | 01000010 | f
+(2 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+---------------------------------+----+----------
+ t | Tue Nov 08 19:56:10.158564 2022 | a | 01000001
+ f | Wed Nov 09 19:56:10.158564 2022 | b | 01000010
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with(storage_type = ustore);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with(storage_type = ustore);
+insert into t1 values(1,2,3),(11,22,33);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1
+----+----+----+----
+ 3 | 2 | 3 | 1
+ 33 | 22 | 33 | 11
+(2 rows)
+
+alter table t1 drop f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | default 2 | plain | |
+ f4 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4
+----+----+----
+ 3 | 2 | 3
+ 33 | 22 | 33
+(2 rows)
+
+-- 4 auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-------------------------+----------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | not null | plain | |
+ f5 | boolean | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, ubtree (f1) WITH (storage_type=ustore) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+---------------------------------+----+----------+----+----
+ Tue Nov 08 19:56:10.158564 2022 | a | 01000001 | 1 | t
+ Wed Nov 09 19:56:10.158564 2022 | b | 01000010 | 2 | f
+(2 rows)
+
+insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3);
+select f1 from t1;
+ f1
+----
+ 1
+ 2
+ 3
+(3 rows)
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 modify f1 int after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+ERROR: relation "t1" does not exist
+\d+ t1
+alter table t1 modify f2 varchar(20) after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+ERROR: syntax error at or near "("
+LINE 1: alter table t1(f1 int null, f2 varchar(20) not null, f3 time...
+ ^
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', ...
+ ^
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 modify f1 int after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 drop f1, modify f5 bool first;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+alter table t1 modify f2 varchar(20) after f3;
+ERROR: relation "t1" does not exist
+\d+ t1
+select * from t1;
+ERROR: relation "t1" does not exist on datanode1
+LINE 1: select * from t1;
+ ^
+-- 6 check constraint
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+ERROR: new row for relation "t1" violates check constraint "t1_f1_check"
+DETAIL: N/A
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f1 | integer | | plain | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f2 | f4 | f1 | f5
+----+----+----+----+----
+(0 rows)
+
+alter table t1 modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f3 | timestamp without time zone | | plain | |
+ f1 | integer | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+Check constraints:
+ "t1_f1_check" CHECK (f1 = 1)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f3 | f1 | f2 | f4 | f5
+----+----+----+----+----
+(0 rows)
+
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------+----------+--------------+-------------
+ f5 | boolean | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f2 | character varying(20) | | extended | |
+ f4 | bit(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f5 | f3 | f2 | f4
+----+----+----+----
+(0 rows)
+
+-- 7 foreign key
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key) with(storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with(storage_type = ustore);
+alter table t1 modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table t1 modify f2 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+ f2 | integer | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists t_pri1 cascade;
+NOTICE: drop cascades to constraint t1_f2_fkey on table t1
+create table t_pri1(f1 int, f2 int primary key) with(storage_type = ustore);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri1_pkey" for table "t_pri1"
+insert into t_pri1 values(1,1),(2,2);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with(storage_type = ustore);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f2 | f1 | f3
+----+----+----
+ 1 | 1 | t
+ 2 | 2 | f
+(2 rows)
+
+alter table t1 modify f2 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f3 | boolean | | plain | |
+ f2 | integer | | plain | |
+Foreign-key constraints:
+ "t1_f2_fkey" FOREIGN KEY (f2) REFERENCES t_pri1(f2)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select * from t1;
+ f1 | f3 | f2
+----+----+----
+ 1 | t | 1
+ 2 | f | 2
+(2 rows)
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with(storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+alter table t1 modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+-- modify operation before add
+alter table t1 add f4 int after f2, modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with(storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 1 2
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f1 | f2 | f3
+----+----+----
+ 9 | -1 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f1 | f2 | f3
+----+----+----
+ 19 | -1 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f1 | f2 | f3
+----+----+----
+ 29 | -1 | 3
+(1 row)
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f3 | f1
+----+----+----
+ -1 | 1 | 9
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f3 | f1
+----+----+----
+ -1 | 2 | 19
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f3 | f1
+----+----+----
+ -1 | 3 | 29
+(1 row)
+
+alter table t1 modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 2 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f1 | f3
+----+----+----
+ -1 | 9 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f1 | f3
+----+----+----
+ -1 | 19 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f1 | f3
+----+----+----
+ -1 | 29 | 3
+(1 row)
+
+alter table t1 add f4 int after f2, modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f4 | integer | | plain | |
+ f1 | integer | | plain | |
+ f3 | integer | | plain | |
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+ relname | parttype | partkey
+---------+----------+---------
+ t1 | r | 3 1
+ t1_p0 | p |
+ t1_p1 | p |
+ t1_p2 | p |
+(4 rows)
+
+select * from t1 partition (t1_p0);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 9 | 1
+(1 row)
+
+select * from t1 partition (t1_p1);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 19 | 2
+(1 row)
+
+select * from t1 partition (t1_p2);
+ f2 | f4 | f1 | f3
+----+----+----+----
+ -1 | | 29 | 3
+(1 row)
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with(storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ birthday | date | | plain | |
+ gender | character varying | not null | extended | |
+ id | integer | | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 1
+ p_2 | p | 1
+ p_3 | p | 1
+ range_range | r | 3
+(4 rows)
+
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with(storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ birthday | date | | plain | |
+ gender | character varying | not null | extended | |
+ id | integer | | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 1
+ p_2 | p | 1
+ p_3 | p | 1
+ range_range | r | 3
+(4 rows)
+
+select * from range_range;
+ birthday | gender | id
+------------+--------+-----
+ 08-11-2003 | boy | 33
+ 06-24-2014 | girl | 78
+ 01-12-2009 | girl | 15
+ 02-15-2010 | boy | 198
+ 03-08-2005 | girl | 146
+ 11-19-2013 | girl | 111
+ 05-21-2011 | boy | 156
+ 01-01-2010 | girl | 233
+ 05-14-2007 | boy | 360
+(9 rows)
+
+-- orientation = column not support
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (orientation = column);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 modify f1 int after f3;
+ERROR: Un-support feature
+DETAIL: column-store relation doesn't support this ALTER yet
+-- error
+alter table t1 modify f3 timestamp first;
+ERROR: Un-support feature
+DETAIL: column-store relation doesn't support this ALTER yet
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_pri_pkey" for table "t_pri"
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_lower_abs_key" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_f4_f5_f6_f7_key" for table "t1"
+\d+ t_pri
+ Table "public.t_pri"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f3 | integer | not null | plain | |
+Indexes:
+ "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=USTORE) TABLESPACE pg_default
+Referenced by:
+ TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | character varying(20) | | extended | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=USTORE) TABLESPACE pg_default
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=USTORE) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Foreign-key constraints:
+ "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+------------+---------+--------+---------+--------+--------+--------------
+ t_pri_pkey | p | {2,3} | | | |
+(1 row)
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {1,2} | {2,3} | | |
+ t1_f4_f5_f6_f7_key | u | {4,5} | | | | {6,7}
+ t1_f5_check | c | {5} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 198} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 203 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 201} | (f5 = 10) |
+ t1_lower_abs_key | u | {0,0} | | | |
+(4 rows)
+
+alter table t_pri modify f3 int first;
+alter table t1 modify f2 int first, modify f4 int after f1, modify f5 int after f7;
+\d+ t_pri
+ Table "public.t_pri"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | not null | plain | |
+ f1 | integer | | plain | |
+ f2 | integer | not null | plain | |
+Indexes:
+ "t_pri_pkey" PRIMARY KEY, ubtree (f2, f3) WITH (storage_type=ustore) TABLESPACE pg_default
+Referenced by:
+ TABLE "t1" CONSTRAINT "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+ f4 | integer | | plain | |
+ f3 | character varying(20) | | extended | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+ f5 | integer | | plain | |
+Indexes:
+ "t1_f4_f5_f6_f7_key" UNIQUE CONSTRAINT, ubtree (f4, f5) WITH (storage_type=ustore) TABLESPACE pg_default
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, ubtree (lower(f3::text), abs(f4)) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Foreign-key constraints:
+ "t1_f1_fkey" FOREIGN KEY (f1, f2) REFERENCES t_pri(f2, f3)
+Has OIDs: no
+Options: orientation=row, compression=no, storage_type=USTORE
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+------------+---------+--------+---------+--------+--------+--------------
+ t_pri_pkey | p | {3,1} | | | |
+(1 row)
+
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+ conname | contype | conkey | confkey | conbin | consrc | conincluding
+--------------------+---------+--------+---------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------------
+ t1_f1_fkey | f | {2,1} | {3,1} | | |
+ t1_f4_f5_f6_f7_key | u | {3,7} | | | | {5,6}
+ t1_f5_check | c | {7} | | {OPEXPR :opno 96 :opfuncid 65 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 55} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 60 :constvalue 4 [ 10 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 58} | (f5 = 10) |
+ t1_lower_abs_key | u | {0,0} | | | |
+(4 rows)
+
+set enable_default_ustore_table = off;
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_lower_abs_key" for table "t1"
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f1 | integer | not null | plain | |
+ f2 | integer | not null | plain | |
+ f3 | character varying(20) | | extended | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f6 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+ "partial_t1_idx" UNIQUE, btree (f5, abs(f6)) TABLESPACE pg_default WHERE (f5 + f6 - abs(f7)) > 0
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, btree (lower(f3::text), abs(f4)) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1');
+ indkey | indexprs | indpred
+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 5 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 49}) :location 45 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 60} {VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 65}) :location 63} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 74}) :location 70 :refSynOid 0}) :location 68} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 80 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 78}
+ 0 0 | ({FUNCEXPR :funcid 870 :funcresulttype 25 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 100 :inputcollid 100 :args ({RELABELTYPE :arg {VAR :varno 1 :varattno 3 :vartype 1043 :vartypmod 24 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 3 :location 141} :resulttype 25 :resulttypmod -1 :resultcollid 100 :relabelformat 2 :location -1}) :location 135 :refSynOid 0} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 4 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 152}) :location 148 :refSynOid 0}) |
+ 1 2 | |
+(3 rows)
+
+alter table t1 modify f1 int after f2, modify f4 int after f6, modify f5 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f5 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+ f3 | character varying(20) | | extended | |
+ f6 | integer | | plain | |
+ f4 | integer | | plain | |
+ f7 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+ "partial_t1_idx" UNIQUE, btree (f5, abs(f6)) TABLESPACE pg_default WHERE (f5 + f6 - abs(f7)) > 0
+ "t1_lower_abs_key" UNIQUE CONSTRAINT, btree (lower(f3::text), abs(f4)) TABLESPACE pg_default
+Check constraints:
+ "t1_f5_check" CHECK (f5 = 10)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1');
+ indkey | indexprs | indpred
+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1 0 | ({FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 62}) :location 58 :refSynOid 0}) | {OPEXPR :opno 521 :opfuncid 147 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 555 :opfuncid 181 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({OPEXPR :opno 551 :opfuncid 177 :opresulttype 23 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 98} {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location 103}) :location 101} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 7 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 7 :location 113}) :location 109 :refSynOid 0}) :location 107} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 120 :constvalue 4 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 118}
+ 0 0 | ({FUNCEXPR :funcid 870 :funcresulttype 25 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 100 :inputcollid 100 :args ({RELABELTYPE :arg {VAR :varno 1 :varattno 4 :vartype 1043 :vartypmod 24 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 67} :resulttype 25 :resulttypmod -1 :resultcollid 100 :relabelformat 1 :location 69}) :location 61 :refSynOid 0} {FUNCEXPR :funcid 1397 :funcresulttype 23 :funcresulttype_orig -1 :funcretset false :funcformat 0 :funccollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 6 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 6 :location 84}) :location 80 :refSynOid 0}) |
+ 3 2 | |
+(3 rows)
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f1 | 1 | f | f
+ f2 | 2 | f | f
+ f3 | 3 | f | f
+(3 rows)
+
+alter table t1 modify f3 int first, modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+ attname | attnum | atthasdef | attisdropped
+---------+--------+-----------+--------------
+ f3 | 1 | f | f
+ f2 | 2 | f | f
+ f1 | 3 | f | f
+(3 rows)
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f1 | integer | not null AUTO_INCREMENT | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | generated always as ((f2 + f3)) stored | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+----------------+----------
+ 1 | AUTO_INCREMENT |
+ 3 | 3 |
+ 4 | (f2 + f3) | s
+(3 rows)
+
+alter table t1 modify f3 int first, modify f1 int after f4, modify f4 int first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f3 | integer | | plain | |
+ f2 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+ adnum | adsrc | adgencol
+-------+-------+----------
+(0 rows)
+
+-- pg_depend test
+drop table if exists t1 cascade;
+create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored, f4 int, unique ((abs(f4))));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t1_abs_key" for table "t1"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+----------------------------------------+---------+--------------+-------------
+ f1 | integer | default 10 | plain | |
+ f2 | integer | not null | plain | |
+ f3 | integer | generated always as ((f1 + f2)) stored | plain | |
+ f4 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f2) TABLESPACE pg_default
+ "t1_abs_key" UNIQUE CONSTRAINT, btree (abs(f4)) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+ classid | objsubid | refclassid | refobjsubid | deptype
+---------+----------+------------+-------------+---------
+ 1247 | 0 | 1259 | 0 | i
+ 1259 | 0 | 1259 | 4 | a
+ 1259 | 0 | 2615 | 0 | n
+ 1259 | 3 | 1259 | 1 | a
+ 1259 | 3 | 1259 | 2 | a
+ 2604 | 0 | 1259 | 1 | a
+ 2604 | 0 | 1259 | 3 | a
+ 2606 | 0 | 1259 | 0 | a
+ 2606 | 0 | 1259 | 2 | a
+(9 rows)
+
+alter table t1 modify f4 int first, modify f3 int after f1, modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f4 | integer | | plain | |
+ f3 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f2) TABLESPACE pg_default
+ "t1_abs_key" UNIQUE CONSTRAINT, btree (abs(f4)) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+ classid | objsubid | refclassid | refobjsubid | deptype
+---------+----------+------------+-------------+---------
+ 1247 | 0 | 1259 | 0 | i
+ 1259 | 0 | 1259 | 1 | a
+ 1259 | 0 | 2615 | 0 | n
+ 2606 | 0 | 1259 | 0 | a
+ 2606 | 0 | 1259 | 3 | a
+(5 rows)
+
+-- pg_partition test
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 3
+ p_2 | p | 3
+ p_3 | p | 3
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify gender varchar after birthday;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ birthday | date | not null | plain | |
+ gender | character varying | | extended | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 2
+ p_2 | p | 2
+ p_3 | p | 2
+ range_range | r | 1
+(4 rows)
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+ Table "public.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ birthday | date | | plain | |
+ gender | character varying | | extended | |
+ id | integer | | plain | |
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+ relname | parttype | partkey
+-------------+----------+---------
+ p_1 | p | 1
+ p_2 | p | 1
+ p_3 | p | 1
+ range_range | r | 3
+(4 rows)
+
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 int);
+insert into t1 values(1, 2, 3, 4), (11, 22, 33, 44);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f4 from t1;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+ f4 | integer | | plain |
+View definition:
+ SELECT *
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f4 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f4
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f4 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+--------------------
+ SELECT * FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f4 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3 | f4
+----+----+----+----
+ 1 | 2 | 3 | 4
+ 11 | 22 | 33 | 44
+(2 rows)
+
+select * from t1_view2;
+ f1 | f4
+----+----
+ 1 | 4
+ 11 | 44
+(2 rows)
+
+select * from t1;
+ f1 | f2 | f3 | f4
+----+----+----+----
+ 1 | 2 | 3 | 4
+ 11 | 22 | 33 | 44
+(2 rows)
+
+alter table t1 modify f2 int first, modify f1 int after f4, add f5 int after f4;
+\d+ t1_view1
+ View "public.t1_view1"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f2 | integer | | plain |
+ f3 | integer | | plain |
+ f4 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f2, t1.f3, t1.f4
+ FROM t1;
+
+\d+ t1_view2
+ View "public.t1_view2"
+ Column | Type | Modifiers | Storage | Description
+--------+---------+-----------+---------+-------------
+ f1 | integer | | plain |
+ f4 | integer | | plain |
+View definition:
+ SELECT t1.f1, t1.f4
+ FROM t1;
+
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f4 | integer | | plain | |
+ f5 | integer | | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_viewdef('t1_view1');
+ pg_get_viewdef
+--------------------------------------------
+ SELECT t1.f1, t1.f2, t1.f3, t1.f4 FROM t1;
+(1 row)
+
+select pg_get_viewdef('t1_view2');
+ pg_get_viewdef
+------------------------------
+ SELECT t1.f1, t1.f4 FROM t1;
+(1 row)
+
+select * from t1_view1;
+ f1 | f2 | f3 | f4
+----+----+----+----
+ 1 | 2 | 3 | 4
+ 11 | 22 | 33 | 44
+(2 rows)
+
+select * from t1_view2;
+ f1 | f4
+----+----
+ 1 | 4
+ 11 | 44
+(2 rows)
+
+select * from t1;
+ f2 | f3 | f4 | f5 | f1
+----+----+----+----+----
+ 2 | 3 | 4 | | 1
+ 22 | 33 | 44 | | 11
+(2 rows)
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to view t1_view2
+drop cascades to view t1_view1
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+drop trigger if exists f1_trig_update on t1;
+NOTICE: trigger "t1.f1_trig_update" for table "t1" does not exist, skipping
+drop trigger if exists f1_trig_insert on t1;
+NOTICE: trigger "t1.f1_trig_insert" for table "t1" does not exist, skipping
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f1 | boolean | not null | plain | |
+ f2 | text | | extended | |
+ f3 | integer | | plain | |
+ f4 | date | | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Triggers:
+ f1_trig_insert AFTER INSERT ON t1 FOR EACH ROW WHEN (NOT new.f1) EXECUTE PROCEDURE dummy_update_func('insert')
+ f1_trig_update AFTER UPDATE OF f1 ON t1 FOR EACH ROW WHEN (NOT old.f1 AND new.f1) EXECUTE PROCEDURE dummy_update_func('update')
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 80}) :location 76}
+ f1_trig_update | 1 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 86}) :location 82} {VAR :varno 2 :varattno 1 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 1 :location 97}) :location 93}
+(2 rows)
+
+alter table t1 modify f3 int first, modify f1 boolean after f4;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | text | | extended | |
+ f4 | date | | plain | |
+ f1 | boolean | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1) TABLESPACE pg_default
+Triggers:
+ f1_trig_insert AFTER INSERT ON t1 FOR EACH ROW WHEN (NOT new.f1) EXECUTE PROCEDURE dummy_update_func('insert')
+ f1_trig_update AFTER UPDATE OF f1 ON t1 FOR EACH ROW WHEN (NOT old.f1 AND new.f1) EXECUTE PROCEDURE dummy_update_func('update')
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+ tgname | tgattr | tgqual
+----------------+--------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ f1_trig_insert | | {BOOLEXPR :boolop not :args ({VAR :varno 2 :varattno 4 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 4 :location 80}) :location 76}
+ f1_trig_update | 4 | {BOOLEXPR :boolop and :args ({BOOLEXPR :boolop not :args ({VAR :varno 1 :varattno 4 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 4 :location 86}) :location 82} {VAR :varno 2 :varattno 4 :vartype 16 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 2 :varoattno 4 :location 97}) :location 93}
+(2 rows)
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy3;
+NOTICE: role "test_rlspolicy3" does not exist, skipping
+create role test_rlspolicy3 nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy3 using (f1 < 30);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy3
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f1 | f2 | f3
+----+----+--------
+ 1 | 1 | huawei
+ 2 | 2 | huawei
+ 3 | 3 | huawei
+ 4 | 4 | huawei
+ 5 | 5 | huawei
+ 6 | 6 | huawei
+ 7 | 7 | huawei
+ 8 | 8 | huawei
+ 9 | 9 | huawei
+ 0 | 24 | huawei
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 2 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 88} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 93 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 91}
+(2 rows)
+
+alter table t1 modify f2 int first, modify f1 int after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+----------+--------------+-------------
+ f2 | integer | | plain | |
+ f3 | text | | extended | |
+ f1 | integer | | plain | |
+Row Level Security Policies:
+ POLICY "t1_rls1" FOR ALL
+ TO public
+ USING ((f2 <= 20))
+ POLICY "t1_rls2" AS RESTRICTIVE FOR ALL
+ TO test_rlspolicy3
+ USING ((f1 < 30))
+Partition By RANGE(f1)
+Number of partitions: 4 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1 limit 10;
+ f2 | f3 | f1
+----+--------+----
+ 1 | huawei | 1
+ 2 | huawei | 2
+ 3 | huawei | 3
+ 4 | huawei | 4
+ 5 | huawei | 5
+ 6 | huawei | 6
+ 7 | huawei | 7
+ 8 | huawei | 8
+ 9 | huawei | 9
+ 24 | huawei | 0
+(10 rows)
+
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+ polname | polqual
+---------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ t1_rls1 | {OPEXPR :opno 523 :opfuncid 149 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 78} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 84 :constvalue 4 [ 20 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 81}
+ t1_rls2 | {OPEXPR :opno 97 :opfuncid 66 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ({VAR :varno 1 :varattno 3 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 3 :location 88} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 93 :constvalue 4 [ 30 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false}) :location 91}
+(2 rows)
+
+-- expression test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text);
+insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei');
+-- T_FuncExpr
+create index t1_idx1 on t1(abs(f1), f2);
+-- T_OpExpr
+create index t1_idx2 on t1((f1 + f2), (f1 - f3));
+-- T_BooleanTest
+create index t1_idx3 on t1((f4 is true));
+-- T_CaseExpr and T_CaseWhen
+create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end));
+-- T_ArrayExpr
+create index t1_idx5 on t1((array[f1, f2, f3]));
+-- T_TypeCast
+create index t1_idx6 on t1(((f1 + f2 + 1) :: text));
+-- T_BoolExpr
+create index t1_idx7 on t1((f1 and f2), (f2 or f3));
+-- T_ArrayRef
+create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1]));
+-- T_ScalarArrayOpExpr
+create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10])));
+-- T_RowCompareExpr
+create index t1_idx10 on t1((row(f1, f5) < row(f2, f6)));
+-- T_MinMaxExpr
+create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3));
+-- T_RowExpr
+drop table if exists mytable cascade;
+NOTICE: drop cascades to function getf1(mytable)
+create table mytable(f1 int, f2 int, f3 text);
+create function getf1(mytable) returns int as 'select $1.f1' language sql;
+create index t1_idx12 on t1(getf1(row(f1, 2, 'a')));
+-- T_CoalesceExpr
+create index t1_idx13 on t1(nvl(f1, f2));
+-- T_NullTest
+create index t1_idx14 on t1((f1 is null));
+-- T_ScalarArrayOpExpr
+create index t1_idx16 on t1((f1 in (1,2,3)));
+-- T_NullIfExpr
+create index t1_idx17 on t1(nullif(f5,f6));
+-- T_RelabelType
+alter table t1 add f7 oid;
+create index t1_idx18 on t1((f7::int4));
+-- T_CoerceViaIO
+alter table t1 add f8 json;
+create index t1_idx19 on t1((f8::jsonb));
+-- T_ArrayCoerceExpr
+alter table t1 add f9 float[];
+create index t1_idx20 on t1((f9::int[]));
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+--------------------+-----------+----------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | | plain | |
+ f4 | boolean | | plain | |
+ f5 | text | | extended | |
+ f6 | text | | extended | |
+ f7 | oid | | plain | |
+ f8 | json | | extended | |
+ f9 | double precision[] | | extended | |
+Indexes:
+ "t1_idx1" btree (abs(f1), f2) TABLESPACE pg_default
+ "t1_idx10" btree (((ROW(f1, f5) < ROW(f2, f6)))) TABLESPACE pg_default
+ "t1_idx11" btree ((GREATEST(f1, f2, f3)), (LEAST(f1, f2, f3))) TABLESPACE pg_default
+ "t1_idx12" btree (getf1(ROW(f1, 2, 'a'::text))) TABLESPACE pg_default
+ "t1_idx13" btree ((COALESCE(f1, f2))) TABLESPACE pg_default
+ "t1_idx14" btree ((f1 IS NULL)) TABLESPACE pg_default
+ "t1_idx16" btree ((f1 = ANY (ARRAY[1, 2, 3]))) TABLESPACE pg_default
+ "t1_idx17" btree ((NULLIF(f5, f6))) TABLESPACE pg_default
+ "t1_idx18" btree ((f7::integer)) TABLESPACE pg_default
+ "t1_idx19" btree ((f8::jsonb)) TABLESPACE pg_default
+ "t1_idx2" btree ((f1 + f2), (f1 - f3)) TABLESPACE pg_default
+ "t1_idx20" btree ((f9::integer[])) TABLESPACE pg_default
+ "t1_idx3" btree ((f4 IS TRUE)) TABLESPACE pg_default
+ "t1_idx4" btree ((
+CASE f1
+ WHEN f2 THEN 'yes'::text
+ WHEN f3 THEN 'no'::text
+ ELSE 'unknow'::text
+END)) TABLESPACE pg_default
+ "t1_idx5" btree ((ARRAY[f1, f2, f3])) TABLESPACE pg_default
+ "t1_idx6" btree (((f1 + f2 + 1)::text)) TABLESPACE pg_default
+ "t1_idx7" btree ((f1 AND f2), (f2 OR f3)) TABLESPACE pg_default
+ "t1_idx8" btree ((f1 = (ARRAY[f1, f2, 3])[1])) TABLESPACE pg_default
+ "t1_idx9" btree ((f1 = ANY (ARRAY[f2, 1, f1 + 10]))) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3 | f4 | f5 | f6 | f7 | f8 | f9
+----+----+----+----+--------+--------+----+----+----
+ 1 | 2 | 3 | t | nanjin | huawei | | |
+(1 row)
+
+alter table t1 modify f8 json first, modify f2 int after f6, modify f7 oid after f3;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+--------------------+-----------+----------+--------------+-------------
+ f8 | json | | extended | |
+ f1 | integer | | plain | |
+ f3 | integer | | plain | |
+ f7 | oid | | plain | |
+ f4 | boolean | | plain | |
+ f5 | text | | extended | |
+ f6 | text | | extended | |
+ f2 | integer | | plain | |
+ f9 | double precision[] | | extended | |
+Indexes:
+ "t1_idx1" btree (abs(f1), f2) TABLESPACE pg_default
+ "t1_idx10" btree (((ROW(f1, f5) < ROW(f2, f6)))) TABLESPACE pg_default
+ "t1_idx11" btree ((GREATEST(f1, f2, f3)), (LEAST(f1, f2, f3))) TABLESPACE pg_default
+ "t1_idx12" btree (getf1(ROW(f1, 2, 'a'::text))) TABLESPACE pg_default
+ "t1_idx13" btree ((COALESCE(f1, f2))) TABLESPACE pg_default
+ "t1_idx14" btree ((f1 IS NULL)) TABLESPACE pg_default
+ "t1_idx16" btree ((f1 = ANY (ARRAY[1, 2, 3]))) TABLESPACE pg_default
+ "t1_idx17" btree ((NULLIF(f5, f6))) TABLESPACE pg_default
+ "t1_idx18" btree ((f7::integer)) TABLESPACE pg_default
+ "t1_idx19" btree ((f8::jsonb)) TABLESPACE pg_default
+ "t1_idx2" btree ((f1 + f2), (f1 - f3)) TABLESPACE pg_default
+ "t1_idx20" btree ((f9::integer[])) TABLESPACE pg_default
+ "t1_idx3" btree ((f4 IS TRUE)) TABLESPACE pg_default
+ "t1_idx4" btree ((
+CASE f1
+ WHEN f2 THEN 'yes'::text
+ WHEN f3 THEN 'no'::text
+ ELSE 'unknow'::text
+END)) TABLESPACE pg_default
+ "t1_idx5" btree ((ARRAY[f1, f2, f3])) TABLESPACE pg_default
+ "t1_idx6" btree (((f1 + f2 + 1)::text)) TABLESPACE pg_default
+ "t1_idx7" btree ((f1 AND f2), (f2 OR f3)) TABLESPACE pg_default
+ "t1_idx8" btree ((f1 = (ARRAY[f1, f2, 3])[1])) TABLESPACE pg_default
+ "t1_idx9" btree ((f1 = ANY (ARRAY[f2, 1, f1 + 10]))) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f8 | f1 | f3 | f7 | f4 | f5 | f6 | f2 | f9
+----+----+----+----+----+--------+--------+----+----
+ | 1 | 3 | | t | nanjin | huawei | 2 |
+(1 row)
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int default 3, add f4 int default 4 after f3, add f5 int default 5, add f6 int default 6 after f3;
+select * from t1;
+ f1 | f2 | f3 | f6 | f4 | f5
+----+----+----+----+----+----
+ 1 | 2 | 3 | 6 | 4 | 5
+(1 row)
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int default 3, add f4 int default 4 after f1, add f5 int default 5, add f6 int default 6 after f5;
+select * from t1;
+ f1 | f4 | f2 | f3 | f5 | f6
+----+----+----+----+----+----
+ 1 | 4 | 2 | 3 | 5 | 6
+(1 row)
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int, add f4 int after f3, add f5 int, add f6 int first;
+select * from t1;
+ f6 | f1 | f2 | f3 | f4 | f5
+----+----+----+----+----+----
+ | 1 | 2 | | |
+(1 row)
+
+drop table if exists t1;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f5,
+ add f6 int default 6 , add f7 int first, add f8 int default 8 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f1 text, modify f2 text after f4;
+ERROR: column "f6" of relation "t1" does not exist
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, primary key(f1, f3));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 2, 3), (11, 22, 33);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f1 | integer | not null | plain | |
+ f2 | integer | | plain | |
+ f3 | integer | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f3) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 22 | 33
+(2 rows)
+
+alter table t1 modify f3 int first, modify f1 int after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | not null | plain | |
+ f2 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f3) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f3 | f2 | f1
+----+----+----
+ 3 | 2 | 1
+ 33 | 22 | 11
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 12, 13), (21, 22, 23);
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 11 | 12 | 13
+ 21 | 22 | 23
+(3 rows)
+
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 * 10) stored first;
+select * from t1;
+ f5 | f1 | f4 | f2 | f3
+-----+----+-----+----+----
+ 20 | 1 | 101 | 2 | 3
+ 120 | 11 | 111 | 12 | 13
+ 220 | 21 | 121 | 22 | 23
+(3 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+ f1 | f2 | f3 | f4 | f5 | f6
+----+----+---------------------------------+----------+----+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t | 10
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f | 20
+(2 rows)
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------------------------------------+----------+--------------+-------------
+ f11 | integer | generated always as ((f1 * 100)) stored | plain | |
+ f8 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | timestamp without time zone | generated always as (f3) stored | plain | |
+ f9 | integer | default 9 | plain | |
+ f1 | integer | not null | plain | |
+ f6 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | text | not null | extended | |
+ f7 | integer | default 7 | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f11 | f8 | f3 | f10 | f9 | f1 | f6 | f5 | f2 | f7
+-----+----+---------------------------------+---------------------------------+----+----+----+----+----+----
+ 100 | | Tue Nov 08 19:56:10.158564 2022 | Tue Nov 08 19:56:10.158564 2022 | 9 | 1 | 10 | 1 | a | 7
+ 200 | | Wed Nov 09 19:56:10.158564 2022 | Wed Nov 09 19:56:10.158564 2022 | 9 | 2 | 20 | 0 | b | 7
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 int, primary key(f1, f3));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', 1), (2, 'b', 2);
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+-----------+----------+--------------+-------------
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | | extended | |
+ f3 | integer | not null | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f3) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3
+----+----+----
+ 1 | a | 1
+ 2 | b | 2
+(2 rows)
+
+alter table t1 modify f1 text after f3, add f10 int default 10 after f2;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+------------+----------+--------------+-------------
+ f2 | character varying(20) | | extended | |
+ f10 | integer | default 10 | plain | |
+ f3 | integer | not null | plain | |
+ f1 | text | not null | extended | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f3) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f2 | f10 | f3 | f1
+----+-----+----+----
+ a | 10 | 1 | 1
+ b | 10 | 2 | 2
+(2 rows)
+
+-- unlogged table
+drop table if exists t1 cascade;
+create unlogged table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+ Unlogged table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+----------------------------------------+----------+--------------+-------------
+ f1 | integer | not null | plain | |
+ f2 | character varying(20) | not null | extended | |
+ f3 | timestamp without time zone | | plain | |
+ f4 | bit(8) | | extended | |
+ f5 | boolean | | plain | |
+ f6 | integer | generated always as ((f1 * 10)) stored | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2 | f3 | f4 | f5 | f6
+----+----+---------------------------------+----------+----+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t | 10
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f | 20
+(2 rows)
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+\d+ t1
+ Unlogged table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------------+-----------------------------------------+----------+--------------+-------------
+ f11 | integer | generated always as ((f1 * 100)) stored | plain | |
+ f8 | integer | | plain | |
+ f3 | timestamp without time zone | | plain | |
+ f10 | timestamp without time zone | generated always as (f3) stored | plain | |
+ f9 | integer | default 9 | plain | |
+ f1 | integer | not null | plain | |
+ f6 | integer | | plain | |
+ f5 | integer | | plain | |
+ f2 | text | not null | extended | |
+ f7 | integer | default 7 | plain | |
+Indexes:
+ "t1_pkey" PRIMARY KEY, btree (f1, f2) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f11 | f8 | f3 | f10 | f9 | f1 | f6 | f5 | f2 | f7
+-----+----+---------------------------------+---------------------------------+----+----+----+----+----+----
+ 100 | | Tue Nov 08 19:56:10.158564 2022 | Tue Nov 08 19:56:10.158564 2022 | 9 | 1 | 10 | 1 | a | 7
+ 200 | | Wed Nov 09 19:56:10.158564 2022 | Wed Nov 09 19:56:10.158564 2022 | 9 | 2 | 20 | 0 | b | 7
+(2 rows)
+
+-- temp table
+drop table if exists t1 cascade;
+create temp table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+ f1 | f2 | f3 | f4 | f5 | f6
+----+----+---------------------------------+----------+----+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t | 10
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f | 20
+(2 rows)
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+select * from t1;
+ f11 | f8 | f3 | f10 | f9 | f1 | f6 | f5 | f2 | f7
+-----+----+---------------------------------+---------------------------------+----+----+----+----+----+----
+ 100 | | Tue Nov 08 19:56:10.158564 2022 | Tue Nov 08 19:56:10.158564 2022 | 9 | 1 | 10 | 1 | a | 7
+ 200 | | Wed Nov 09 19:56:10.158564 2022 | Wed Nov 09 19:56:10.158564 2022 | 9 | 2 | 20 | 0 | b | 7
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan'));
+NOTICE: CREATE TABLE will create implicit set "t1_f2_set" for column "t1.f2"
+insert into t1 values(1, 'shanghai,beijing'), (2, 'wuhan');
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------+-----------+---------+--------------+-------------
+ f1 | integer | | plain | |
+ f2 | t1_f2_set | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f1 | f2
+----+------------------
+ 1 | beijing,shanghai
+ 2 | wuhan
+(2 rows)
+
+alter table t1 add f3 int default 3 first, add f4 int default 4 after f3,
+ add f5 SET('beijing','shanghai','nanjing','wuhan') default 'nanjing' first;
+NOTICE: ALTER TABLE will create implicit set "t1_f5_set" for column "t1.f5"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------+------------------------------+---------+--------------+-------------
+ f5 | t1_f5_set | default 'nanjing'::t1_f5_set | plain | |
+ f3 | integer | default 3 | plain | |
+ f4 | integer | default 4 | plain | |
+ f1 | integer | | plain | |
+ f2 | t1_f2_set | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select * from t1;
+ f5 | f3 | f4 | f1 | f2
+---------+----+----+----+------------------
+ nanjing | 3 | 4 | 1 | beijing,shanghai
+ nanjing | 3 | 4 | 2 | wuhan
+(2 rows)
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan'));
+NOTICE: CREATE TABLE will create implicit set "t1_f2_set" for column "t1.f2"
+-- error
+alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') first;
+ERROR: can not alter column type to another set
+alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') after f1;
+ERROR: can not alter column type to another set
+drop table if exists t1 cascade;
+--DTS
+drop table if exists unit cascade;
+NOTICE: table "unit" does not exist, skipping
+CREATE TABLE unit
+(
+ f11 INTEGER CHECK (f11 >=2),
+ f12 bool,
+ f13 text,
+ f14 varchar(20),
+ primary key (f11,f12)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "unit_pkey" for table "unit"
+insert into unit values(2,3,4,5);
+insert into unit values(3,4,5,6);
+ALTER TABLE unit ADD f1 int CHECK (f1 >=10) FIRST;
+insert into unit values (10,6,1,1,1);
+insert into unit values (11,7,1,1,1);
+ALTER TABLE unit ADD f2 int CHECK (f2 >=10) after f11;
+select * from unit;
+ f1 | f11 | f2 | f12 | f13 | f14
+----+-----+----+-----+-----+-----
+ | 2 | | t | 4 | 5
+ | 3 | | t | 5 | 6
+ 10 | 6 | | t | 1 | 1
+ 11 | 7 | | t | 1 | 1
+(4 rows)
+
+ALTER TABLE unit MODIFY f12 int FIRST;
+select * from unit;
+ f12 | f1 | f11 | f2 | f13 | f14
+-----+----+-----+----+-----+-----
+ 1 | | 2 | | 4 | 5
+ 1 | | 3 | | 5 | 6
+ 1 | 10 | 6 | | 1 | 1
+ 1 | 11 | 7 | | 1 | 1
+(4 rows)
+
+drop table if exists unit cascade;
+-- dts for set
+drop table if exists test_s1 cascade;
+NOTICE: table "test_s1" does not exist, skipping
+create table test_s1 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1));
+NOTICE: CREATE TABLE will create implicit set "test_s1_c2_set" for column "test_s1.c2"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_s1_pkey" for table "test_s1"
+insert into test_s1 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7);
+alter table test_s1 add f1 text after c1;
+alter table test_s1 modify c2 int first;
+select * from test_s1;
+ c2 | c1 | f1 | c3
+----+----+----+----
+ 2 | 1 | | t
+ 1 | 2 | | t
+ 4 | 3 | | t
+ 5 | 4 | | t
+ 1 | 5 | | t
+ 3 | 6 | | t
+(6 rows)
+
+drop table if exists test_s1 cascade;
+drop table if exists test_s2 cascade;
+NOTICE: table "test_s2" does not exist, skipping
+create table test_s2 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1));
+NOTICE: CREATE TABLE will create implicit set "test_s2_c2_set" for column "test_s2.c2"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_s2_pkey" for table "test_s2"
+insert into test_s2 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7);
+alter table test_s2 add f1 text check(f1 >= 2) after c1;
+alter table test_s2 add f2 SET('w','ww','www','wwww') first;
+NOTICE: ALTER TABLE will create implicit set "test_s2_f2_set" for column "test_s2.f2"
+alter table test_s2 modify f2 text after c1;
+alter table test_s2 modify c2 int first;
+select * from test_s2;
+ c2 | c1 | f2 | f1 | c3
+----+----+----+----+----
+ 2 | 1 | | | t
+ 1 | 2 | | | t
+ 4 | 3 | | | t
+ 5 | 4 | | | t
+ 1 | 5 | | | t
+ 3 | 6 | | | t
+(6 rows)
+
+drop table if exists test_s2 cascade;
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 set('aaa','bbb','ccc'), f2 set('1','2','3'), f3 set('beijing','shannghai','nanjing'),
+ f4 set('aaa','bbb','ccc') generated always as(f1+f2+f3) stored,
+ f5 set('1','2','3') generated always as(f1+f2+f3) stored,
+ f6 set('beijing','shannghai','nanjing') generated always as(f1+f2+f3) stored);
+NOTICE: CREATE TABLE will create implicit set "t1_f1_set" for column "t1.f1"
+NOTICE: CREATE TABLE will create implicit set "t1_f2_set" for column "t1.f2"
+NOTICE: CREATE TABLE will create implicit set "t1_f3_set" for column "t1.f3"
+NOTICE: CREATE TABLE will create implicit set "t1_f4_set" for column "t1.f4"
+NOTICE: CREATE TABLE will create implicit set "t1_f5_set" for column "t1.f5"
+NOTICE: CREATE TABLE will create implicit set "t1_f6_set" for column "t1.f6"
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------+--------------------------------------------------------------------------------+---------+--------------+-------------
+ f1 | t1_f1_set | | plain | |
+ f2 | t1_f2_set | | plain | |
+ f3 | t1_f3_set | | plain | |
+ f4 | t1_f4_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+ f5 | t1_f5_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+ f6 | t1_f6_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 modify f1 int after f6;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------+--------------------------------------------------------------------------------+---------+--------------+-------------
+ f2 | t1_f2_set | | plain | |
+ f3 | t1_f3_set | | plain | |
+ f4 | t1_f4_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+ f5 | t1_f5_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+ f6 | t1_f6_set | generated always as ((((f1)::numeric + (f2)::numeric) + (f3)::numeric)) stored | plain | |
+ f1 | integer | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table t1 drop f1;
+\d+ t1
+ Table "public.t1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------+-----------+---------+--------------+-------------
+ f2 | t1_f2_set | | plain | |
+ f3 | t1_f3_set | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists t1 cascade;
+drop table t1 cascade;
+ERROR: table "t1" does not exist
+create table t1(f1 int, f2 text, f3 int, f4 bool, f5 int generated always as (f1 + f3) stored);
+insert into t1 values(1, 'aaa', 3, true);
+insert into t1 values(11, 'bbb', 33, false);
+insert into t1 values(111, 'ccc', 333, true);
+insert into t1 values(1111, 'ddd', 3333, true);
+
+create view t1_view1 as select * from t1;
+select * from t1_view1;
+ f1 | f2 | f3 | f4 | f5
+------+-----+------+----+------
+ 1 | aaa | 3 | t | 4
+ 11 | bbb | 33 | f | 44
+ 111 | ccc | 333 | t | 444
+ 1111 | ddd | 3333 | t | 4444
+(4 rows)
+
+alter table t1 modify f1 int after f2, modify f3 int first;
+drop view t1_view1;
+create view t1_view1 as select * from t1;
+alter table t1 modify f1 int after f2, modify f3 int first;
+drop table t1 cascade;
+NOTICE: drop cascades to view t1_view1
+create table t1(f1 int, f2 text, f3 int, f4 bigint, f5 int generated always as (f1 + f3) stored);
+insert into t1 values(1, 'aaa', 3, 1);
+insert into t1 values(11, 'bbb', 33, 2);
+insert into t1 values(111, 'ccc', 333, 3);
+insert into t1 values(1111, 'ddd', 3333, 4);
+create view t1_view1 as select * from t1;
+select * from t1_view1;
+ f1 | f2 | f3 | f4 | f5
+------+-----+------+----+------
+ 1 | aaa | 3 | 1 | 4
+ 11 | bbb | 33 | 2 | 44
+ 111 | ccc | 333 | 3 | 444
+ 1111 | ddd | 3333 | 4 | 4444
+(4 rows)
+
+alter table t1 add f6 int first, add f7 int after f4, modify f1 int after f2, modify f3 int first;
+select * from t1_view1;
+ f1 | f2 | f3 | f4 | f5
+------+-----+------+----+------
+ 1 | aaa | 3 | 1 | 4
+ 11 | bbb | 33 | 2 | 44
+ 111 | ccc | 333 | 3 | 444
+ 1111 | ddd | 3333 | 4 | 4444
+(4 rows)
+
+drop view t1_view1;
+create view t1_view2 as select f1, f3, f5 from t1 where f2='aaa';
+select * from t1_view2;
+ f1 | f3 | f5
+----+----+----
+ 1 | 3 | 4
+(1 row)
+
+alter table t1 add f8 int first, add f9 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view2;
+ f1 | f3 | f5
+----+----+----
+ 1 | 3 | 4
+(1 row)
+
+drop view t1_view2;
+create view t1_view3 as select * from (select f1+f3, f5 from t1);
+select * from t1_view3;
+ ?column? | f5
+----------+------
+ 4 | 4
+ 44 | 44
+ 444 | 444
+ 4444 | 4444
+(4 rows)
+
+alter table t1 add f10 int first, add f11 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view3;
+ ?column? | f5
+----------+------
+ 4 | 4
+ 44 | 44
+ 444 | 444
+ 4444 | 4444
+(4 rows)
+
+drop view t1_view3;
+create view t1_view4 as select * from (select abs(f1+f3) as col1, abs(f5) as col2 from t1);
+select * from t1_view4;
+ col1 | col2
+------+------
+ 4 | 4
+ 44 | 44
+ 444 | 444
+ 4444 | 4444
+(4 rows)
+
+alter table t1 add f12 int first, add f13 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view4;
+ col1 | col2
+------+------
+ 4 | 4
+ 44 | 44
+ 444 | 444
+ 4444 | 4444
+(4 rows)
+
+drop view t1_view4;
+create view t1_view5 as select * from (select * from t1);
+select * from t1_view5;
+ f12 | f2 | f3 | f10 | f1 | f8 | f6 | f4 | f13 | f11 | f9 | f7 | f5
+-----+-----+------+-----+------+----+----+----+-----+-----+----+----+------
+ | aaa | 3 | | 1 | | | 1 | | | | | 4
+ | bbb | 33 | | 11 | | | 2 | | | | | 44
+ | ccc | 333 | | 111 | | | 3 | | | | | 444
+ | ddd | 3333 | | 1111 | | | 4 | | | | | 4444
+(4 rows)
+
+alter table t1 add f14 int first, add f15 int after f4, modify f1 int after f2, modify f3 int first;
+select * from t1_view5;
+ f12 | f2 | f3 | f10 | f1 | f8 | f6 | f4 | f13 | f11 | f9 | f7 | f5
+-----+-----+------+-----+------+----+----+----+-----+-----+----+----+------
+ | aaa | 3 | | 1 | | | 1 | | | | | 4
+ | bbb | 33 | | 11 | | | 2 | | | | | 44
+ | ccc | 333 | | 111 | | | 3 | | | | | 444
+ | ddd | 3333 | | 1111 | | | 4 | | | | | 4444
+(4 rows)
+
+drop view t1_view5;
+create view t1_view6 as select f1, f3, f5 from t1 where f2='aaa';
+select * from t1_view6;
+ f1 | f3 | f5
+----+----+----
+ 1 | 3 | 4
+(1 row)
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view6;
+ f1 | f3 | f5
+----+----+----
+ 1 | 3 | 4
+(1 row)
+
+drop view t1_view6;
+drop table t1 cascade;
+-- dts for add
+drop table if exists test_d;
+NOTICE: table "test_d" does not exist, skipping
+create table test_d (f2 int primary key, f3 bool, f5 text);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_d_pkey" for table "test_d"
+insert into test_d values(1,2,3), (2,3,4), (3,4,5);
+select * from test_d;
+ f2 | f3 | f5
+----+----+----
+ 1 | t | 3
+ 2 | t | 4
+ 3 | t | 5
+(3 rows)
+
+alter table test_d add f1 int default 1,add f11 text check (f11 >=2) first;
+select * from test_d;
+ f11 | f2 | f3 | f5 | f1
+-----+----+----+----+----
+ | 1 | t | 3 | 1
+ | 2 | t | 4 | 1
+ | 3 | t | 5 | 1
+(3 rows)
+
+
+drop table if exists test_d;
+create table test_d (f2 int primary key, f3 bool, f5 text);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_d_pkey" for table "test_d"
+insert into test_d values(1,2,3), (2,3,4), (3,4,5);
+select * from test_d;
+ f2 | f3 | f5
+----+----+----
+ 1 | t | 3
+ 2 | t | 4
+ 3 | t | 5
+(3 rows)
+
+alter table test_d add f1 int default 1;
+alter table test_d add f11 text check (f11 >=2) first;
+select * from test_d;
+ f11 | f2 | f3 | f5 | f1
+-----+----+----+----+----
+ | 1 | t | 3 | 1
+ | 2 | t | 4 | 1
+ | 3 | t | 5 | 1
+(3 rows)
+
+drop table if exists test_d;
+
+drop table if exists t1 cascade;
+NOTICE: table "t1" does not exist, skipping
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '777' first,
+ add f8 int default 8, add f9 int primary key auto_increment after f6;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f9_seq" for serial column "t1.f9"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select * from t1;
+ f7 | f1 | f2 | f3 | f4 | f5 | f6 | f9 | f8
+-----+----+----+---------------------------------+----------+----+----+----+----
+ 777 | 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t | 10 | 1 | 8
+ 777 | 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f | 20 | 2 | 8
+(2 rows)
+
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+ f1 | f2 | f3 | f4 | f5
+----+----+---------------------------------+----------+----
+ 1 | a | Tue Nov 08 19:56:10.158564 2022 | 01000001 | t
+ 2 | b | Wed Nov 09 19:56:10.158564 2022 | 01000010 | f
+(2 rows)
+
+alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '7' first,
+ add f8 int default 8, add f9 int primary key auto_increment after f1,
+ add f10 bool default true, add f11 timestamp after f2,
+ add f12 text after f3, add f14 int default '14', add f15 int default 15 check(f15 = 15) after f9;
+NOTICE: ALTER TABLE will create implicit sequence "t1_f9_seq" for serial column "t1.f9"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
+select * from t1;
+ f7 | f1 | f9 | f15 | f2 | f11 | f3 | f12 | f4 | f5 | f6 | f8 | f10 | f14
+----+----+----+-----+----+-----+---------------------------------+-----+----------+----+----+----+-----+-----
+ 7 | 1 | 1 | 15 | a | | Tue Nov 08 19:56:10.158564 2022 | | 01000001 | t | 10 | 8 | t | 14
+ 7 | 2 | 2 | 15 | b | | Wed Nov 09 19:56:10.158564 2022 | | 01000010 | f | 20 | 8 | t | 14
+(2 rows)
+
+drop table if exists t1 cascade;
+\c postgres
+drop database test_first_after_B;
diff --git a/src/test/regress/expected/alter_table_modify.out b/src/test/regress/expected/alter_table_modify.out
new file mode 100644
index 000000000..451500975
--- /dev/null
+++ b/src/test/regress/expected/alter_table_modify.out
@@ -0,0 +1,3368 @@
+create database atbdb WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb
+CREATE SCHEMA atbdb_schema;
+SET CURRENT_SCHEMA TO atbdb_schema;
+ALTER TABLE pg_catalog.pg_class MODIFY COLUMN relname int; -- ERROR
+ERROR: permission denied: "pg_class" is a system catalog
+-- cstore not supported
+CREATE TABLE test_at_modify_cstore(
+ a int,
+ b int NOT NULL
+) WITH (ORIENTATION=column, COMPRESSION=high, COMPRESSLEVEL=2);
+ALTER TABLE test_at_modify_cstore MODIFY COLUMN b int PRIMARY KEY;
+ERROR: Un-support feature
+DETAIL: column-store relation doesn't support this ALTER yet
+DROP TABLE test_at_modify_cstore;
+-- test modify column syntax
+CREATE TABLE test_at_modify_syntax(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify_syntax MODIFY b int INVISIBLE; -- ERROR
+ERROR: syntax error at or near "INVISIBLE"
+LINE 1: ALTER TABLE test_at_modify_syntax MODIFY b int INVISIBLE;
+ ^
+ALTER TABLE test_at_modify_syntax MODIFY b int COMMENT 'string'; -- ERROR
+ERROR: syntax error at or near "COMMENT"
+LINE 1: ALTER TABLE test_at_modify_syntax MODIFY b int COMMENT 'stri...
+ ^
+ALTER TABLE test_at_modify_syntax MODIFY b int CHECK (b < 100) NOT ENFORCED; -- ERROR
+ERROR: syntax error at or near "ENFORCED"
+LINE 1: ..._at_modify_syntax MODIFY b int CHECK (b < 100) NOT ENFORCED;
+ ^
+ALTER TABLE test_at_modify_syntax MODIFY b int GENERATED ALWAYS AS (a+1) VIRTUAL; -- ERROR
+ERROR: syntax error at or near "VIRTUAL"
+LINE 1: ...odify_syntax MODIFY b int GENERATED ALWAYS AS (a+1) VIRTUAL;
+ ^
+ALTER TABLE test_at_modify_syntax MODIFY b int KEY; -- ERROR
+ERROR: syntax error at or near "KEY"
+LINE 1: ALTER TABLE test_at_modify_syntax MODIFY b int KEY;
+ ^
+ALTER TABLE test_at_modify_syntax MODIFY COLUMN xmax int; -- ERROR
+ERROR: cannot alter system column "xmax"
+ALTER TABLE test_at_modify_syntax MODIFY b int encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column to encrypted column is not supported
+ALTER TABLE test_at_modify_syntax MODIFY b varchar(8); -- alter column type only
+\d+ test_at_modify_syntax;
+ Table "atbdb_schema.test_at_modify_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+set b_format_behavior_compat_options = 'enable_modify_column';
+show b_format_behavior_compat_options;
+ b_format_behavior_compat_options
+----------------------------------
+ enable_modify_column
+(1 row)
+
+ALTER TABLE test_at_modify_syntax MODIFY b varchar(8); -- modify column
+\d+ test_at_modify_syntax;
+ Table "atbdb_schema.test_at_modify_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+set b_format_behavior_compat_options = '';
+show b_format_behavior_compat_options;
+ b_format_behavior_compat_options
+----------------------------------
+
+(1 row)
+
+ALTER TABLE test_at_modify_syntax MODIFY b int UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_syntax_b_key" for table "test_at_modify_syntax"
+\d+ test_at_modify_syntax;
+ Table "atbdb_schema.test_at_modify_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | | plain | |
+Indexes:
+ "test_at_modify_syntax_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify_syntax MODIFY COLUMN b int PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_syntax_pkey" for table "test_at_modify_syntax"
+\d+ test_at_modify_syntax;
+ Table "atbdb_schema.test_at_modify_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_modify_syntax_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_modify_syntax_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_modify_syntax;
+-- test modify column without data
+CREATE TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_b_seq" for serial column "test_at_modify.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_pkey" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_b_key" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+ Table "atbdb_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_tabledef('test_at_modify'::regclass);
+ pg_get_tabledef
+---------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_modify ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_modify_b_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, compression=no); +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_b_key UNIQUE USING btree (b) DEFERRABLE DEFERRABLE; +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_pkey PRIMARY KEY USING btree (b) DEFERRABLE INITIALLY DEFERRED DEFERRABLE INITIALLY DEFERRED;
+(1 row)
+
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+-- test modify column datatype
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ a | b
+---+---
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_modify_type_b_seq" for serial column "test_at_modify_type.b"
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ERROR: column "b" cannot be cast automatically to type integer[]
+HINT: Specify a USING expression to perform the conversion.
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+---
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ERROR: invalid input value for set test_at_modify_type_b_set: 'guangzhou'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_modify_type'::regclass);
+ pg_get_tabledef
+----------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_modify_type ( +
+ a integer, +
+ b SET('beijing', 'shanghai', 'nanjing', 'guangzhou')+
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+-------
+ 1 | {1,1}
+ 2 | {2,2}
+ 3 | {3,3}
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+-- test modify column constraint
+CREATE TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b" of table "test_at_modify_constr"
+LINE 1: ...BLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_constr_b_key" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_modify_constr_b_key"
+DETAIL: Key (b)=(3) already exists.
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_modify_constr" already exists
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_modify_constr" are not allowed
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ pg_get_tabledef
+-----------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_modify_constr ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, compression=no); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_b_key UNIQUE USING btree (b); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_pkey PRIMARY KEY USING btree (b);
+(1 row)
+
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_modify_constr;
+-- test modify column default
+CREATE TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ Table "atbdb_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_modify_default"
+LINE 1: ...at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+ Table "atbdb_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b" of table "test_at_modify_default"
+LINE 1: ...BLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_modify_default;
+ Table "atbdb_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------------------------------+----------+--------------+-------------
+ a | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_modify_default;
+-- test modify column depended by generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_modify_generated
+ Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | date | | plain | |
+ c | character varying(32) | generated always as (b) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32) AFTER c;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | c | b
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a) STORED FIRST, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY b::int,a::int;
+ b | a
+----+----
+ -1 | -1
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int AFTER b, MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(4,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+---+----
+ 0 | -1
+ 1 | 0
+ 2 | 1
+ 3 | 2
+ 4 | 3
+ 5 | 4
+(6 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) AFTER a, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(5,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+---+----
+ 0 | -1
+ 1 | 0
+ 2 | 1
+ 3 | 2
+ 4 | 3
+ 5 | 4
+ | 5
+(7 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a) STORED;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int FIRST, MODIFY COLUMN b int FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(6,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+----+----
+ -1 | -1
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ | 6
+(8 rows)
+
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(4 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool, MODIFY COLUMN b varchar(32);
+\d test_at_modify_generated
+Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ a | boolean |
+ b | character varying(32) |
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+-----
+ f | 1
+ t | 101
+ t | 2
+ t | 2
+(4 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, MODIFY COLUMN a int;
+\d test_at_modify_generated
+ Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+------------------------------------------------
+ a | integer |
+ b | integer | generated always as (((a)::bigint + 1)) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+(4 rows)
+
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_modify_generated
+ Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+\d test_at_modify_generated
+ Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | boolean |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+---
+ f | f
+ t | t
+ t | t
+ t | t
+ t | t
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_modify_generated
+ Table "atbdb_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(6 rows)
+
+DROP TABLE test_at_modify_generated;
+-- error generated column reference generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,1);
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED; -- ERROR
+ERROR: cannot use generated column "c" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED, MODIFY c float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ERROR: cannot use generated column "a" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY COLUMN c float4, MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED;
+DROP TABLE test_at_modify_generated;
+-- test modify column AUTO_INCREMENT
+CREATE TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: The datatype of column 'b' does not support auto_increment
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_autoinc_pkey" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq1" for serial column "test_at_modify_autoinc.b"
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_c_key" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key1" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+(8 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+ 1003 | 9
+(9 rows)
+
+DROP TABLE test_at_modify_autoinc;
+-- test generated column reference auto_increment column
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+DROP TABLE test_at_modify_fa;
+-- test modify column depended by other objects
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_depend VALUES(1,1);
+INSERT INTO test_at_modify_depend VALUES(2,2);
+INSERT INTO test_at_modify_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_modify_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_modify_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NOT NULL;
+CALL test_at_modify_proc(2);
+ test_at_modify_proc
+---------------------
+
+(1 row)
+
+DROP PROCEDURE test_at_modify_proc;
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_modify_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_modify_depend SET b = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_modify
+ AFTER UPDATE ON test_at_modify_depend
+ FOR EACH ROW WHEN ( NEW.b IS NULL AND OLD.b = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b int NULL DEFAULT 0;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 | 1
+ 2 | 2
+ 2 | 2
+ 3 | 3
+(4 rows)
+
+UPDATE test_at_modify_depend SET b = NULL WHERE a = 2;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 |
+ 2 |
+ 3 | 3
+(4 rows)
+
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+-- --TRIGGER contains but does not depend column
+CREATE TRIGGER tg_bf_test_at_modify
+ BEFORE INSERT ON test_at_modify_depend
+ FOR EACH ROW
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+INSERT INTO test_at_modify_depend VALUES (4, 4);
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 |
+ 2 |
+ 3 |
+ 4 | 4
+(5 rows)
+
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+DROP PROCEDURE tg_bf_test_at_modify_func;
+-- --VIEW depends column
+CREATE VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to bigint
+ALTER TABLE test_at_modify_depend MODIFY b int NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to integer
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+SELECT * FROM test_at_modify_view ORDER BY 1;
+ b
+---
+ 4
+
+
+
+
+(5 rows)
+
+DROP VIEW test_at_modify_view;
+CREATE VIEW test_at_modify_view AS SELECT a FROM test_at_modify_depend where b > 0;
+CREATE VIEW test_at_modify_view1 AS SELECT * FROM test_at_modify_view;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify_depend MODIFY b int NULL;
+DROP VIEW test_at_modify_view1;
+DROP VIEW test_at_modify_view;
+CREATE materialized VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bigint not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP MATERIALIZED VIEW test_at_modify_view;
+-- --TABLE reference column.
+DELETE FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b INT PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_depend_pkey" for table "test_at_modify_depend"
+CREATE TABLE test_at_modify_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b varchar(8);
+INSERT INTO test_at_modify_ref VALUES(0,0); -- ERROR
+ERROR: insert or update on table "test_at_modify_ref" violates foreign key constraint "test_at_modify_ref_b_fkey"
+DETAIL: Key (b)=(0) is not present in table "test_at_modify_depend".
+INSERT INTO test_at_modify_depend VALUES(0,0);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+\d+ test_at_modify_ref
+ Table "atbdb_schema.test_at_modify_ref"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Foreign-key constraints:
+ "test_at_modify_ref_b_fkey" FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_modify_ref;
+-- --TABLE reference self column.
+CREATE TABLE test_at_modify_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_ref(a) ON DELETE SET NULL
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_ref_pkey" for table "test_at_modify_ref"
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_modify_ref VALUES('a','a');
+DROP TABLE test_at_modify_ref;
+-- --RULE reference column.
+CREATE RULE test_at_modify_rule AS ON INSERT TO test_at_modify_depend WHERE (b is null) DO INSTEAD UPDATE test_at_modify_depend SET b=0;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP RULE test_at_modify_rule ON test_at_modify_depend;
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_modify_depend;
+CREATE ROLE at_modify_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_schema TO at_modify_role;
+GRANT SELECT ON test_at_modify_depend TO at_modify_role;
+ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(21,21);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 21 | 21
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 0 | 0
+ 21 | 21
+(2 rows)
+
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(22,22);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 22 | 22
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 0 | 0
+ 21 | 1
+ 22 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_schema FROM at_modify_role;
+DROP ROLE at_modify_role;
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column syntax
+CREATE TABLE test_at_change_syntax(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change_syntax CHANGE b b1 int INVISIBLE; -- ERROR
+ERROR: syntax error at or near "INVISIBLE"
+LINE 1: ALTER TABLE test_at_change_syntax CHANGE b b1 int INVISIBLE;
+ ^
+ALTER TABLE test_at_change_syntax CHANGE b b1 int COMMENT 'string'; -- ERROR
+ERROR: syntax error at or near "COMMENT"
+LINE 1: ALTER TABLE test_at_change_syntax CHANGE b b1 int COMMENT 's...
+ ^
+ALTER TABLE test_at_change_syntax CHANGE b b1 int CHECK (b < 100) NOT ENFORCED; -- ERROR
+ERROR: syntax error at or near "ENFORCED"
+LINE 1: ..._change_syntax CHANGE b b1 int CHECK (b < 100) NOT ENFORCED;
+ ^
+ALTER TABLE test_at_change_syntax CHANGE b b1 int GENERATED ALWAYS AS (a+1) VIRTUAL; -- ERROR
+ERROR: syntax error at or near "VIRTUAL"
+LINE 1: ...ge_syntax CHANGE b b1 int GENERATED ALWAYS AS (a+1) VIRTUAL;
+ ^
+ALTER TABLE test_at_change_syntax CHANGE b b1 int KEY; -- ERROR
+ERROR: syntax error at or near "KEY"
+LINE 1: ALTER TABLE test_at_change_syntax CHANGE b b1 int KEY;
+ ^
+ALTER TABLE test_at_change_syntax CHANGE b b1 int encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column to encrypted column is not supported
+ALTER TABLE test_at_change_syntax CHANGE b a int UNIQUE KEY; -- ERROR
+ERROR: column "a" of relation "test_at_change_syntax" already exists
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b a int NOT NULL,CHANGE COLUMN a b int; -- ERROR
+ERROR: column "a" of relation "test_at_change_syntax" already exists
+ALTER TABLE test_at_change_syntax CHANGE b b1 int UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_syntax_b1_key" for table "test_at_change_syntax"
+\d+ test_at_change_syntax;
+ Table "atbdb_schema.test_at_change_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | integer | | plain | |
+Indexes:
+ "test_at_change_syntax_b1_key" UNIQUE CONSTRAINT, btree (b1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b1 b int PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_syntax_pkey" for table "test_at_change_syntax"
+\d+ test_at_change_syntax;
+ Table "atbdb_schema.test_at_change_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_change_syntax_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_syntax_b1_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b b123456789012345678901234567890123456789012345678901234567890123 int UNIQUE KEY;
+NOTICE: identifier "b123456789012345678901234567890123456789012345678901234567890123" will be truncated to "b12345678901234567890123456789012345678901234567890123456789012"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_syntax_b123456789012345678901234567890123456_key" for table "test_at_change_syntax"
+\d+ test_at_change_syntax;
+ Table "atbdb_schema.test_at_change_syntax"
+ Column | Type | Modifiers | Storage | Stats target | Description
+-----------------------------------------------------------------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b12345678901234567890123456789012345678901234567890123456789012 | integer | not null | plain | |
+Indexes:
+ "test_at_change_syntax_pkey" PRIMARY KEY, btree (b12345678901234567890123456789012345678901234567890123456789012) TABLESPACE pg_default
+ "test_at_change_syntax_b123456789012345678901234567890123456_key" UNIQUE CONSTRAINT, btree (b12345678901234567890123456789012345678901234567890123456789012) TABLESPACE pg_default
+ "test_at_change_syntax_b1_key" UNIQUE CONSTRAINT, btree (b12345678901234567890123456789012345678901234567890123456789012) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_change_syntax;
+-- test change column without data
+CREATE TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_b1_seq" for serial column "test_at_change.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_pkey" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_b_key" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b1) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b1) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+ Table "atbdb_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_tabledef('test_at_change'::regclass);
+ pg_get_tabledef
+---------------------------------------------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_change ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_change_b1_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, compression=no); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_b_key UNIQUE USING btree (b); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_pkey PRIMARY KEY USING btree (b);
+(1 row)
+
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+-- test change column datatype
+CREATE TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ a | b1
+---+----
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_change_type_b_seq" for serial column "test_at_change_type.b"
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+----
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ERROR: invalid input value for set test_at_change_type_b1_set: 'guangzhou'
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_change_type'::regclass);
+ pg_get_tabledef
+-----------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_change_type ( +
+ a integer, +
+ b1 SET('beijing', 'shanghai', 'nanjing', 'guangzhou')+
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_type;
+-- test change column constraint
+CREATE TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b1" of table "test_at_change_constr"
+LINE 1: ... test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_constr_b1_key" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_change_constr_b1_key"
+DETAIL: Key (b1)=(3) already exists.
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_change_constr" already exists
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_change_constr" are not allowed
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ pg_get_tabledef
+------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE test_at_change_constr ( +
+ a integer, +
+ b1 integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b1)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b1)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, compression=no); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_b1_key UNIQUE USING btree (b1); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_pkey PRIMARY KEY USING btree (b1);
+(1 row)
+
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_change_constr;
+-- test change column default
+CREATE TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ Table "atbdb_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_change_default"
+LINE 1: ...change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+ Table "atbdb_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b1" of table "test_at_change_default"
+LINE 1: ... test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+ a1 | b
+----+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_change_default;
+ Table "atbdb_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-------------------------------------------------+----------+--------------+-------------
+ a1 | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a1)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_change_default;
+-- test change column depended by generated column
+CREATE TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b1 | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_change_generated
+ Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+---------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | date | | plain | |
+ c | character varying(32) | generated always as (b1) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32) AFTER c;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | c | b
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_generated;
+CREATE TABLE test_at_change_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(4 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool, MODIFY COLUMN b varchar(32);
+\d test_at_change_generated
+Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ a1 | boolean |
+ b | character varying(32) |
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a1 | b
+----+-----
+ f | 1
+ t | 101
+ t | 2
+ t | 2
+(4 rows)
+
+ALTER TABLE test_at_change_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, CHANGE COLUMN a1 a int;
+\d test_at_change_generated
+ Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+------------------------------------------------
+ a | integer |
+ b | integer | generated always as (((a)::bigint + 1)) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+(4 rows)
+
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(5 rows)
+
+ALTER TABLE test_at_change_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_change_generated
+ Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(5 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+\d test_at_change_generated
+ Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+---------------------------------
+ a1 | boolean |
+ b | boolean | generated always as (a1) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a1 | b
+----+---
+ f | f
+ t | t
+ t | t
+ t | t
+ t | t
+(5 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_change_generated
+ Table "atbdb_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(6 rows)
+
+DROP TABLE test_at_change_generated;
+-- test change column AUTO_INCREMENT
+CREATE TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: The datatype of column 'b1' does not support auto_increment
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b1_key" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_autoinc_pkey" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_c_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1 | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+(8 rows)
+
+DROP TABLE test_at_change_autoinc;
+-- test change column depended by other objects
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_depend VALUES(1,1);
+INSERT INTO test_at_change_depend VALUES(2,2);
+INSERT INTO test_at_change_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_change_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NOT NULL;
+CALL test_at_change_proc(2); -- ERROR
+ERROR: column "b" of relation "test_at_change_depend" does not exist
+LINE 1: INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in)
+ ^
+QUERY: INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in)
+CONTEXT: PL/pgSQL function test_at_change_proc(integer) line 2 at SQL statement
+DROP PROCEDURE test_at_change_proc;
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_change_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_change
+ AFTER UPDATE ON test_at_change_depend
+ FOR EACH ROW WHEN ( NEW.b1 IS NULL AND OLD.b1 = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_change_func();
+ALTER TABLE test_at_change_depend CHANGE b1 b varchar(8) NULL DEFAULT '0';
+UPDATE test_at_change_depend SET b = NULL WHERE a = 2; -- ERROR
+ERROR: column "b1" of relation "test_at_change_depend" does not exist
+LINE 1: UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a
+ ^
+QUERY: UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a
+CONTEXT: PL/pgSQL function tg_bf_test_at_change_func() line 4 at SQL statement
+DROP TRIGGER tg_bf_test_at_change ON test_at_change_depend;
+DROP FUNCTION tg_bf_test_at_change_func;
+-- --VIEW depends column
+CREATE VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b1 bigint NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to bigint
+ALTER TABLE test_at_change_depend CHANGE b b1 int NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to integer
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+SELECT b FROM test_at_change_view ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+DROP VIEW test_at_change_view;
+CREATE VIEW test_at_change_view AS SELECT a FROM test_at_change_depend where b1 > 0;
+CREATE VIEW test_at_change_view1 AS SELECT * FROM test_at_change_view;
+ALTER TABLE test_at_change_depend CHANGE b1 b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change_depend CHANGE b1 b int NULL;
+SELECT * FROM test_at_change_view1 ORDER BY 1;
+ a
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+DROP VIEW test_at_change_view1;
+DROP VIEW test_at_change_view;
+CREATE materialized VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 bigint not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP MATERIALIZED VIEW test_at_change_view;
+-- --TABLE reference column.
+DELETE FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b INT PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_depend_pkey" for table "test_at_change_depend"
+CREATE TABLE test_at_change_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 varchar(8);
+INSERT INTO test_at_change_ref VALUES(0,0); -- ERROR
+ERROR: insert or update on table "test_at_change_ref" violates foreign key constraint "test_at_change_ref_b_fkey"
+DETAIL: Key (b)=(0) is not present in table "test_at_change_depend".
+INSERT INTO test_at_change_depend VALUES(0,0);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+\d+ test_at_change_ref
+ Table "atbdb_schema.test_at_change_ref"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | | extended | |
+Foreign-key constraints:
+ "test_at_change_ref_b_fkey" FOREIGN KEY (b1) REFERENCES test_at_change_depend(b1) ON DELETE SET NULL
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_change_ref;
+-- --TABLE reference self column.
+CREATE TABLE test_at_change_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_ref(a) ON DELETE SET NULL
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_change_ref_pkey" for table "test_at_change_ref"
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+ALTER TABLE test_at_change_ref CHANGE COLUMN a a1 varchar(8);
+INSERT INTO test_at_change_ref VALUES('a','a');
+DROP TABLE test_at_change_ref;
+-- --RULE reference column.
+CREATE RULE test_at_change_rule AS ON INSERT TO test_at_change_depend WHERE (b1 is null) DO INSTEAD UPDATE test_at_change_depend SET b1=0;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b bigint not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP RULE test_at_change_rule ON test_at_change_depend;
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_change_depend;
+CREATE ROLE at_change_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_schema TO at_change_role;
+GRANT SELECT ON test_at_change_depend TO at_change_role;
+ALTER TABLE test_at_change_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_role USING(b >= 20);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 int not null;
+INSERT INTO test_at_change_depend VALUES(21,21);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b1
+----+----
+ 21 | 21
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b1
+----+----
+ 0 | 0
+ 21 | 21
+(2 rows)
+
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b2 bool not null;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b2 b3 int not null;
+INSERT INTO test_at_change_depend VALUES(22,22);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b3
+----+----
+ 22 | 22
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b3
+----+----
+ 0 | 0
+ 21 | 1
+ 22 | 22
+(3 rows)
+
+DROP TABLE test_at_change_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_schema FROM at_change_role;
+DROP ROLE at_change_role;
+-- test alter command order
+CREATE TABLE test_at_pass(
+ a int,
+ b int
+);
+INSERT INTO test_at_pass VALUES(1,0);
+ALTER TABLE test_at_pass ADD COLUMN c int, DROP COLUMN c; -- ERROR
+ERROR: column "c" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, MODIFY COLUMN c bigint; -- ERROR
+ERROR: column "c" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, CHANGE COLUMN c c1 bigint; -- ERROR
+ERROR: column "c" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, MODIFY COLUMN b float4; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column "b" twice
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, CHANGE COLUMN b b1 float4; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, CHANGE COLUMN b1 b2 bigint; -- ERROR
+ERROR: column "b1" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, DROP COLUMN b; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b1; -- ERROR
+ERROR: column "b1" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY a bigint, MODIFY COLUMN a VARCHAR(8); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column "a" twice
+ALTER TABLE test_at_pass CHANGE COLUMN b a bigint, CHANGE COLUMN a b VARCHAR(8); -- ERROR
+ERROR: column "a" of relation "test_at_pass" already exists
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, ALTER COLUMN b SET DEFAULT 100;
+\d test_at_pass
+Table "atbdb_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+Table "atbdb_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, ALTER COLUMN b1 SET DEFAULT 100;
+\d test_at_pass
+Table "atbdb_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b1 | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b1 b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+Table "atbdb_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, DROP CONSTRAINT atpass_pk; -- ERROR
+ERROR: constraint "atpass_pk" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT atpass_pk PRIMARY KEY(a); -- ERROR
+ERROR: multiple primary keys for table "test_at_pass" are not allowed
+LINE 1: ...MN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT...
+ ^
+DROP TABLE test_at_pass;
+-- test complex commands combined
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN a varchar(8), MODIFY COLUMN b int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_complex_b_seq" for serial column "test_at_complex.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_complex_b_key" for table "test_at_complex"
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+ a | b
+----+---
+ -1 | 4
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 5
+(5 rows)
+
+DROP TABLE test_at_complex;
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN b int AUTO_INCREMENT UNIQUE, MODIFY COLUMN a varchar(8);
+NOTICE: ALTER TABLE will create implicit sequence "test_at_complex_b_seq" for serial column "test_at_complex.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_complex_b_key" for table "test_at_complex"
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+ a | b
+----+---
+ -1 | 4
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 5
+(5 rows)
+
+DROP TABLE test_at_complex;
+-- test modify partitioned table column without data
+CREATE TABLE pt_at_modify (a int, b int NOT NULL, PRIMARY KEY(b,a))
+PARTITION BY RANGE (a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (1000),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pt_at_modify_pkey" for table "pt_at_modify"
+ALTER TABLE pt_at_modify MODIFY a int8 DEFAULT 0; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change partition key column is not supported
+ALTER TABLE pt_at_modify MODIFY a int DEFAULT 0;
+ALTER TABLE pt_at_modify MODIFY a int GENERATED ALWAYS AS (b+1) STORED; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change a partition key column as a generated column
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 NULL;
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 DEFAULT 0;
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null default 0 | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "pt_at_modify_b_seq" for serial column "pt_at_modify.b"
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int2 UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "pt_at_modify_b_tableoid_key" for table "pt_at_modify"
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | smallint | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int CHECK (b < 10000);
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b::bigint < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 GENERATED ALWAYS AS (a+1) STORED;
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------------------------------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null generated always as ((a + 1)) stored | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b varchar(8) NOT NULL;
+\d+ pt_at_modify;
+ Table "atbdb_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------+----------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, btree (b, a) LOCAL TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b::bigint < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+select pg_get_tabledef('pt_at_modify'::regclass);
+ pg_get_tabledef
+----------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_schema; +
+ CREATE TABLE pt_at_modify ( +
+ a integer DEFAULT 0 NOT NULL, +
+ b character varying(8) NOT NULL, +
+ CONSTRAINT pt_at_modify_b_check CHECK (((b)::bigint < 10000)) +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY RANGE (a) +
+ ( +
+ PARTITION p1 VALUES LESS THAN (100) TABLESPACE pg_default, +
+ PARTITION p2 VALUES LESS THAN (1000) TABLESPACE pg_default, +
+ PARTITION p3 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ ALTER TABLE pt_at_modify ADD CONSTRAINT pt_at_modify_b_tableoid_key UNIQUE USING btree (b) INCLUDE (tableoid);+
+ ALTER TABLE pt_at_modify ADD CONSTRAINT pt_at_modify_pkey PRIMARY KEY USING btree (b, a);
+(1 row)
+
+INSERT INTO pt_at_modify VALUES(1,1);
+DROP TABLE pt_at_modify;
+-- test alter modify first after
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY c float4 FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ c | a | d | b
+----+----+-----+----
+ 3 | 1 | 101 | 2
+ 13 | 11 | 111 | 12
+ 23 | 21 | 121 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c float4 GENERATED ALWAYS AS (b+100) STORED AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+-----+----
+ 101 | 1 | 102 | 2
+ 111 | 11 | 112 | 12
+ 121 | 21 | 122 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c bigint AUTO_INCREMENT PRIMARY KEY AFTER a;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_fa_pkey" for table "test_at_modify_fa"
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+---+----
+ 101 | 1 | 3 | 2
+ 111 | 11 | 4 | 12
+ 121 | 21 | 5 | 22
+(3 rows)
+
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,NULL);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+---+----
+ 101 | 1 | 3 | 2
+ 111 | 11 | 4 | 12
+ 121 | 21 | 5 | 22
+ 131 | 31 | 6 | 32
+(4 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(11,12,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(21,22,DEFAULT);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED FIRST; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (b+100) STORED AFTER a, MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ a | d | b | c
+------+-----+----+----
+ 1002 | 102 | 2 | 3
+ 1012 | 112 | 12 | 13
+ 1022 | 122 | 22 | 23
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ a | b | c
+----+----+----
+ 1 | 2 | 3
+ 11 | 12 | 13
+ 21 | 22 | 23
+(3 rows)
+
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+-----+----+----
+ 102 | 1 | 101 | 2 | 3
+ 112 | 11 | 111 | 12 | 13
+ 122 | 21 | 121 | 22 | 23
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ a | b | c
+----+----+----
+ 1 | 2 | 3
+ 11 | 12 | 13
+ 21 | 22 | 23
+(3 rows)
+
+ALTER TABLE test_at_modify_fa ADD COLUMN d bigint AUTO_INCREMENT PRIMARY KEY AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_d_seq" for serial column "test_at_modify_fa.d"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_fa_pkey" for table "test_at_modify_fa"
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+---+----+----
+ 102 | 1 | 1 | 2 | 3
+ 112 | 11 | 2 | 12 | 13
+ 122 | 21 | 3 | 22 | 23
+(3 rows)
+
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,33);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+---+----+----
+ 102 | 1 | 1 | 2 | 3
+ 112 | 11 | 2 | 12 | 13
+ 122 | 21 | 3 | 22 | 23
+ 132 | 31 | 4 | 32 | 33
+(4 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ERROR: cannot use generated column "c" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (a+100) STORED AFTER c;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3;
+ c | b | a
+-----+-----+----
+ 101 | 101 | 1
+ 111 | 111 | 11
+ 121 | 121 | 21
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+DROP TABLE test_at_modify_fa;
+-- primary key should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) |
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) |
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int AFTER f13;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11) TABLESPACE pg_default
+
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer |
+
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11) TABLESPACE pg_default
+
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+ERROR: null value in column "f11" violates not-null constraint
+DETAIL: Failing row contains (1, t, null).
+drop table test11;
+-- primary keys should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11,f12));
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) | not null
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11, f12) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) | not null
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11, f12) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY f11 int AFTER f13;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11, f12) TABLESPACE pg_default
+
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer |
+
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, btree (f11) TABLESPACE pg_default
+
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+ERROR: null value in column "f11" violates not-null constraint
+DETAIL: Failing row contains (1, t, null).
+drop table test11;
+-- primary keys in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (id, birthday);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | not null | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id, birthday) LOCAL TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id, birthday) LOCAL TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (id);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | not null | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (gender);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (gender) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN gender varchar AFTER birthday;
+\d+ range_range
+ Table "atbdb_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ birthday | date | not null | plain | |
+ gender | character varying | not null | extended | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, btree (gender) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists range_range cascade;
+-- primary keys in multi range keys partition table should be not null after modify
+create table multi_keys_range(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition multi_keys_range_p0 values less than (10, 0),
+ partition multi_keys_range_p1 values less than (20, 0),
+ partition multi_keys_range_p2 values less than (30, 0)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_range modify f1 int after f3, ADD CONSTRAINT multi_keys_range_pkey PRIMARY KEY USING btree (f1,f2);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_range_pkey" for table "multi_keys_range"
+\d+ multi_keys_range
+ Table "atbdb_schema.multi_keys_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_range_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table multi_keys_range modify f2 int after f3;
+\d+ multi_keys_range
+ Table "atbdb_schema.multi_keys_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_range_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists multi_keys_range cascade;
+-- primary keys in multi list keys partition table should be not null after modify
+create table multi_keys_list(f1 int, f2 int, f3 int)
+partition by list(f1, f2)
+(
+ partition multi_keys_list_p0 values ((10, 0)),
+ partition multi_keys_list_p1 values ((20, 0)),
+ partition multi_keys_list_p2 values (DEFAULT)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_list modify f1 int after f3, ADD CONSTRAINT multi_keys_list_pkey PRIMARY KEY USING btree (f1,f2);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_list_pkey" for table "multi_keys_list"
+\d+ multi_keys_list
+ Table "atbdb_schema.multi_keys_list"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_list_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By LIST(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+alter table multi_keys_list modify f2 int after f3;
+\d+ multi_keys_list
+ Table "atbdb_schema.multi_keys_list"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_list_pkey" PRIMARY KEY, btree (f1, f2) LOCAL TABLESPACE pg_default
+Partition By LIST(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+drop table if exists multi_keys_list cascade;
+-- test moidfy/change VIEW depends column
+-- --modify
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN f0 int;
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT * FROM (SELECT (((test_at_modify_view_column.f1)::bigint / 10000))::numeric(18,2) AS "numeric", ((((test_at_modify_view_column.f4)::numeric(18,4) / (test_at_modify_view_column.f1)::numeric) * (100)::numeric))::numeric(18,2) AS "numeric" FROM test_at_modify_view_column) __unnamed_subquery__;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column MODIFY column f1 int ...
+ ^
+CONTEXT: referenced column: col1
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIR...
+ ^
+CONTEXT: referenced column: col1
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test modify view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_f1f2
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view "test_modify_view_f1f2F3"
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1, DROP COLUMN f5;
+ERROR: column "f5" of relation "test_at_modify_view_column" does not exist
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- --change
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COLUMN f0 int; -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT * FROM (SELECT (((test_at_modify_view_column.c1)::bigint / 10000))::numeric(18,2) AS "numeric", ((((test_at_modify_view_column.f4)::numeric(18,4) / (test_at_modify_view_column.c1)::numeric) * (100)::numeric))::numeric(18,2) AS "numeric" FROM test_at_modify_view_column) __unnamed_subquery__;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 i...
+ ^
+CONTEXT: referenced column: col1
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIR...
+ ^
+CONTEXT: referenced column: col1
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test CHANGE view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_f1f2
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER c1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view "test_modify_view_f1f2F3"
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER f1, DROP COLUMN f5;
+ERROR: column "f5" of relation "test_at_modify_view_column" does not exist
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb;
+drop database if exists atbdb;
diff --git a/src/test/regress/expected/alter_table_modify_gtt.out b/src/test/regress/expected/alter_table_modify_gtt.out
new file mode 100644
index 000000000..6e913a311
--- /dev/null
+++ b/src/test/regress/expected/alter_table_modify_gtt.out
@@ -0,0 +1,1194 @@
+create database atbdb_gtt WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_gtt
+CREATE SCHEMA atbdb_gtt_schema;
+SET CURRENT_SCHEMA TO atbdb_gtt_schema;
+-- test modify column without data
+CREATE GLOBAL TEMPORARY TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_b_seq" for serial column "test_at_modify.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_pkey" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_b_key" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+ Table "atbdb_gtt_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default DEFERRABLE INITIALLY DEFERRED
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default DEFERRABLE
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+select pg_get_tabledef('test_at_modify'::regclass);
+ pg_get_tabledef
+---------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_modify ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_modify_b_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false); +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_b_key UNIQUE USING btree (b) DEFERRABLE DEFERRABLE; +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_pkey PRIMARY KEY USING btree (b) DEFERRABLE INITIALLY DEFERRED DEFERRABLE INITIALLY DEFERRED;
+(1 row)
+
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+-- test modify column datatype
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ a | b
+---+---
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_modify_type_b_seq" for serial column "test_at_modify_type.b"
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ERROR: column "b" cannot be cast automatically to type integer[]
+HINT: Specify a USING expression to perform the conversion.
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+---
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ERROR: invalid input value for set test_at_modify_type_b_set: 'guangzhou'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_modify_type'::regclass);
+ pg_get_tabledef
+----------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_modify_type ( +
+ a integer, +
+ b SET('beijing', 'shanghai', 'nanjing', 'guangzhou') +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false);
+(1 row)
+
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+-------
+ 1 | {1,1}
+ 2 | {2,2}
+ 3 | {3,3}
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+-- test modify column constraint
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b" of table "test_at_modify_constr"
+LINE 1: ...BLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_constr_b_key" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_modify_constr_b_key"
+DETAIL: Key (b)=(3) already exists.
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_modify_constr" already exists
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_modify_constr" are not allowed
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ pg_get_tabledef
+-----------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_modify_constr ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_b_key UNIQUE USING btree (b); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_pkey PRIMARY KEY USING btree (b);
+(1 row)
+
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_modify_constr;
+-- test modify column default
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ Table "atbdb_gtt_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_modify_default"
+LINE 1: ...at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+ Table "atbdb_gtt_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b" of table "test_at_modify_default"
+LINE 1: ...BLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_modify_default;
+ Table "atbdb_gtt_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------------------------------+----------+--------------+-------------
+ a | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+DROP TABLE test_at_modify_default;
+-- test modify column depended by generated column
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_modify_generated
+ Table "atbdb_gtt_schema.test_at_modify_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | date | | plain | |
+ c | character varying(32) | generated always as (b) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_generated;
+-- test modify column AUTO_INCREMENT
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: The datatype of column 'b' does not support auto_increment
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_autoinc_pkey" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq1" for serial column "test_at_modify_autoinc.b"
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_c_key" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key1" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+(8 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+ 9 | 1003
+(9 rows)
+
+DROP TABLE test_at_modify_autoinc;
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE GLOBAL TEMPORARY TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_b1_seq" for serial column "test_at_change.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_pkey" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_b_key" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b1) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b1) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b1) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+ Table "atbdb_gtt_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, btree (b) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, btree (b) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+select pg_get_tabledef('test_at_change'::regclass);
+ pg_get_tabledef
+---------------------------------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_change ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_change_b1_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_b_key UNIQUE USING btree (b); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_pkey PRIMARY KEY USING btree (b);
+(1 row)
+
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+-- test change column datatype
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ a | b1
+---+----
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_change_type_b_seq" for serial column "test_at_change_type.b"
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+----
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ERROR: invalid input value for set test_at_change_type_b1_set: 'guangzhou'
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_change_type'::regclass);
+ pg_get_tabledef
+----------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_change_type ( +
+ a integer, +
+ b1 SET('beijing', 'shanghai', 'nanjing', 'guangzhou') +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false);
+(1 row)
+
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_type;
+-- test change column constraint
+CREATE GLOBAL TEMPORARY TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b1" of table "test_at_change_constr"
+LINE 1: ... test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_constr_b1_key" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_change_constr_b1_key"
+DETAIL: Key (b1)=(3) already exists.
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_change_constr" already exists
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_change_constr" are not allowed
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ pg_get_tabledef
+------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_gtt_schema; +
+ CREATE GLOBAL TEMPORARY TABLE test_at_change_constr ( +
+ a integer, +
+ b1 integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b1)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b1)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, compression=no, on_commit_delete_rows=false); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_b1_key UNIQUE USING btree (b1); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_pkey PRIMARY KEY USING btree (b1);
+(1 row)
+
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_change_constr;
+-- test change column default
+CREATE GLOBAL TEMPORARY TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ Table "atbdb_gtt_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_change_default"
+LINE 1: ...change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+ Table "atbdb_gtt_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b1" of table "test_at_change_default"
+LINE 1: ... test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+ a1 | b
+----+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_change_default;
+ Table "atbdb_gtt_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-------------------------------------------------+----------+--------------+-------------
+ a1 | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a1)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+DROP TABLE test_at_change_default;
+-- test change column depended by generated column
+CREATE GLOBAL TEMPORARY TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b1 | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_change_generated
+ Table "atbdb_gtt_schema.test_at_change_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+---------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | date | | plain | |
+ c | character varying(32) | generated always as (b1) stored | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, on_commit_delete_rows=false
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_generated;
+-- test change column AUTO_INCREMENT
+CREATE GLOBAL TEMPORARY TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: The datatype of column 'b1' does not support auto_increment
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b1_key" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_autoinc_pkey" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_c_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1 | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+(8 rows)
+
+DROP TABLE test_at_change_autoinc;
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_gtt_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_gtt;
+drop database if exists atbdb_gtt;
diff --git a/src/test/regress/expected/alter_table_modify_ltt.out b/src/test/regress/expected/alter_table_modify_ltt.out
new file mode 100644
index 000000000..20df0567f
--- /dev/null
+++ b/src/test/regress/expected/alter_table_modify_ltt.out
@@ -0,0 +1,786 @@
+create database atbdb_ltt WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_ltt
+CREATE SCHEMA atbdb_ltt_schema;
+SET CURRENT_SCHEMA TO atbdb_ltt_schema;
+-- test modify column without data
+CREATE LOCAL TEMPORARY TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_pkey" for table "test_at_modify"
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_b_key" for table "test_at_modify"
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+-- test modify column datatype
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ a | b
+---+---
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ERROR: invalid input value for set test_at_modify_type_b_set: 'guangzhou'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+-------
+ 1 | {1,1}
+ 2 | {2,2}
+ 3 | {3,3}
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+-- test modify column constraint
+CREATE LOCAL TEMPORARY TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b" of table "test_at_modify_constr"
+LINE 1: ...BLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_constr_b_key" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_modify_constr_b_key"
+DETAIL: Key (b)=(3) already exists.
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_modify_constr" already exists
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_modify_constr" are not allowed
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_modify_constr;
+-- test modify column default
+CREATE LOCAL TEMPORARY TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_modify_default"
+LINE 1: ...at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b" of table "test_at_modify_default"
+LINE 1: ...BLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+DROP TABLE test_at_modify_default;
+-- test modify column depended by generated column
+CREATE LOCAL TEMPORARY TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_generated;
+-- test modify column AUTO_INCREMENT
+CREATE LOCAL TEMPORARY TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: The datatype of column 'b' does not support auto_increment
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_autoinc_pkey" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_c_key" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key1" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+(8 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+ 9 | 1003
+(9 rows)
+
+DROP TABLE test_at_modify_autoinc;
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE LOCAL TEMPORARY TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_pkey" for table "test_at_change"
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_b_key" for table "test_at_change"
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+-- test change column datatype
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ a | b1
+---+----
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ERROR: invalid input value for set test_at_change_type_b1_set: 'guangzhou'
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+DROP TABLE test_at_change_type;
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_type;
+-- test change column constraint
+CREATE LOCAL TEMPORARY TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b1" of table "test_at_change_constr"
+LINE 1: ... test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_constr_b1_key" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_change_constr_b1_key"
+DETAIL: Key (b1)=(3) already exists.
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_change_constr" already exists
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_change_constr" are not allowed
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_change_constr;
+-- test change column default
+CREATE LOCAL TEMPORARY TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_change_default"
+LINE 1: ...change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b1" of table "test_at_change_default"
+LINE 1: ... test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+ a1 | b
+----+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+DROP TABLE test_at_change_default;
+-- test change column depended by generated column
+CREATE LOCAL TEMPORARY TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b1 | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_generated;
+-- test change column AUTO_INCREMENT
+CREATE LOCAL TEMPORARY TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: The datatype of column 'b1' does not support auto_increment
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b1_key" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_autoinc_pkey" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_c_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1 | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+ 7 | 1001
+ 8 | 1002
+(8 rows)
+
+DROP TABLE test_at_change_autoinc;
+-- TEMPORARY view
+CREATE LOCAL TEMPORARY TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(1, '1', '1', 1);
+CREATE TEMPORARY VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_ltt_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_ltt;
+drop database if exists atbdb_ltt;
diff --git a/src/test/regress/expected/alter_table_modify_ustore.out b/src/test/regress/expected/alter_table_modify_ustore.out
new file mode 100644
index 000000000..ff30fd730
--- /dev/null
+++ b/src/test/regress/expected/alter_table_modify_ustore.out
@@ -0,0 +1,3183 @@
+create database atbdb_ustore WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_ustore
+CREATE SCHEMA atbdb_ustore_schema;
+SET CURRENT_SCHEMA TO atbdb_ustore_schema;
+-- test modify column without data
+CREATE TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_b_seq" for serial column "test_at_modify.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_pkey" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_b_key" for table "test_at_modify"
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+ Table "atbdb_ustore_schema.test_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_modify_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_modify_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_modify_b_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select pg_get_tabledef('test_at_modify'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_modify ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_modify_b_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no); +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_b_key UNIQUE USING ubtree (b) WITH (storage_type=ustore); +
+ ALTER TABLE test_at_modify ADD CONSTRAINT test_at_modify_pkey PRIMARY KEY USING ubtree (b) WITH (storage_type=ustore);
+(1 row)
+
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+-- test modify column datatype
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ a | b
+---+---
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_modify_type_b_seq" for serial column "test_at_modify_type.b"
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ERROR: column "b" cannot be cast automatically to type integer[]
+HINT: Specify a USING expression to perform the conversion.
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+---
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b text
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ERROR: invalid input value for set test_at_modify_type_b_set: 'guangzhou'
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_modify_type_b_set" for column "test_at_modify_type.b"
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_modify_type'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_modify_type ( +
+ a integer, +
+ b SET('beijing', 'shanghai', 'nanjing', 'guangzhou') +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no);
+(1 row)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+ a | b
+---+-------
+ 1 | {1,1}
+ 2 | {2,2}
+ 3 | {3,3}
+(3 rows)
+
+DROP TABLE test_at_modify_type;
+-- test modify column constraint
+CREATE TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b" of table "test_at_modify_constr"
+LINE 1: ...BLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_constr_b_key" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_modify_constr_b_key"
+DETAIL: Key (b)=(3) already exists.
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_constr_pkey" for table "test_at_modify_constr"
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_modify_constr" already exists
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_modify_constr" are not allowed
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_modify_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ pg_get_tabledef
+---------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_modify_constr ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_b_key UNIQUE USING ubtree (b) WITH (storage_type=ustore); +
+ ALTER TABLE test_at_modify_constr ADD CONSTRAINT test_at_modify_constr_pkey PRIMARY KEY USING ubtree (b) WITH (storage_type=ustore);
+(1 row)
+
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_modify_constr;
+-- test modify column default
+CREATE TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ Table "atbdb_ustore_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_modify_default"
+LINE 1: ...at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+ Table "atbdb_ustore_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b" of table "test_at_modify_default"
+LINE 1: ...BLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ b
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_modify_default;
+ Table "atbdb_ustore_schema.test_at_modify_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------------------------------+----------+--------------+-------------
+ a | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+DROP TABLE test_at_modify_default;
+-- test modify column depended by generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_modify_generated
+ Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | date | | plain | |
+ c | character varying(32) | generated always as (b) stored | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a) STORED FIRST, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY b::int,a::int;
+ b | a
+----+----
+ -1 | -1
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int AFTER b, MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(4,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+---+----
+ 0 | -1
+ 1 | 0
+ 2 | 1
+ 3 | 2
+ 4 | 3
+ 5 | 4
+(6 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) AFTER a, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(5,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+---+----
+ 0 | -1
+ 1 | 0
+ 2 | 1
+ 3 | 2
+ 4 | 3
+ 5 | 4
+ | 5
+(7 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a) STORED;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int FIRST, MODIFY COLUMN b int FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(6,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ b | a
+----+----
+ -1 | -1
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ | 6
+(8 rows)
+
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(4 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool, MODIFY COLUMN b varchar(32);
+\d test_at_modify_generated
+Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ a | boolean |
+ b | character varying(32) |
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+-----
+ f | 1
+ t | 101
+ t | 2
+ t | 2
+(4 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, MODIFY COLUMN a int;
+\d test_at_modify_generated
+ Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+------------------------------------------------
+ a | integer |
+ b | integer | generated always as (((a)::bigint + 1)) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+(4 rows)
+
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_modify_generated
+Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+\d test_at_modify_generated
+Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | boolean |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+---+---
+ f | f
+ t | t
+ t | t
+ t | t
+ t | t
+(5 rows)
+
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_modify_generated
+Table "atbdb_ustore_schema.test_at_modify_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(6 rows)
+
+DROP TABLE test_at_modify_generated;
+-- error generated column reference generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,1);
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED; -- ERROR
+ERROR: cannot use generated column "c" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED, MODIFY c float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ERROR: cannot use generated column "a" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_generated MODIFY COLUMN c float4, MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED;
+DROP TABLE test_at_modify_generated;
+-- test modify column AUTO_INCREMENT
+CREATE TABLE test_at_modify_autoinc(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: The datatype of column 'b' does not support auto_increment
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_autoinc_pkey" for table "test_at_modify_autoinc"
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq1" for serial column "test_at_modify_autoinc.b"
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_c_seq" for serial column "test_at_modify_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_c_key" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ a | b | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_autoinc_b_seq" for serial column "test_at_modify_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_modify_autoinc_b_key1" for table "test_at_modify_autoinc"
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+(8 rows)
+
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+ 1003 | 9
+(9 rows)
+
+DROP TABLE test_at_modify_autoinc;
+-- test generated column reference auto_increment column
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+DROP TABLE test_at_modify_fa;
+-- test modify column depended by other objects
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_depend VALUES(1,1);
+INSERT INTO test_at_modify_depend VALUES(2,2);
+INSERT INTO test_at_modify_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_modify_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_modify_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NOT NULL;
+CALL test_at_modify_proc(2);
+ test_at_modify_proc
+---------------------
+
+(1 row)
+
+DROP PROCEDURE test_at_modify_proc;
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_modify_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_modify_depend SET b = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_modify
+ AFTER UPDATE ON test_at_modify_depend
+ FOR EACH ROW WHEN ( NEW.b IS NULL AND OLD.b = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b int NULL DEFAULT 0;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 | 1
+ 2 | 2
+ 2 | 2
+ 3 | 3
+(4 rows)
+
+UPDATE test_at_modify_depend SET b = NULL WHERE a = 2;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 |
+ 2 |
+ 3 | 3
+(4 rows)
+
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+-- --TRIGGER contains but does not depend column
+CREATE TRIGGER tg_bf_test_at_modify
+ BEFORE INSERT ON test_at_modify_depend
+ FOR EACH ROW
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+INSERT INTO test_at_modify_depend VALUES (4, 4);
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 |
+ 2 |
+ 3 |
+ 4 | 4
+(5 rows)
+
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+DROP PROCEDURE tg_bf_test_at_modify_func;
+-- --VIEW depends column
+CREATE VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to bigint
+ALTER TABLE test_at_modify_depend MODIFY b int NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to integer
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+SELECT * FROM test_at_modify_view ORDER BY 1;
+ b
+---
+ 4
+
+
+
+
+(5 rows)
+
+DROP VIEW test_at_modify_view;
+CREATE VIEW test_at_modify_view AS SELECT a FROM test_at_modify_depend where b > 0;
+CREATE VIEW test_at_modify_view1 AS SELECT * FROM test_at_modify_view;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify_depend MODIFY b int NULL;
+DROP VIEW test_at_modify_view1;
+DROP VIEW test_at_modify_view;
+CREATE materialized VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend; --ERROR
+ERROR: materialized view is not supported in ustore yet
+-- --TABLE reference column.
+DELETE FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b INT PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_depend_pkey" for table "test_at_modify_depend"
+CREATE TABLE test_at_modify_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b varchar(8);
+INSERT INTO test_at_modify_ref VALUES(0,0); -- ERROR
+ERROR: insert or update on table "test_at_modify_ref" violates foreign key constraint "test_at_modify_ref_b_fkey"
+DETAIL: Key (b)=(0) is not present in table "test_at_modify_depend".
+INSERT INTO test_at_modify_depend VALUES(0,0);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+\d+ test_at_modify_ref
+ Table "atbdb_ustore_schema.test_at_modify_ref"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | | extended | |
+Foreign-key constraints:
+ "test_at_modify_ref_b_fkey" FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+Has OIDs: no
+Options: orientation=row, compression=no
+
+DROP TABLE test_at_modify_ref;
+-- --TABLE reference self column.
+CREATE TABLE test_at_modify_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_ref(a) ON DELETE SET NULL
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_ref_pkey" for table "test_at_modify_ref"
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_modify_ref VALUES('a','a');
+DROP TABLE test_at_modify_ref;
+-- --RULE reference column.
+CREATE RULE test_at_modify_rule AS ON INSERT TO test_at_modify_depend WHERE (b is null) DO INSTEAD UPDATE test_at_modify_depend SET b=0;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP RULE test_at_modify_rule ON test_at_modify_depend;
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_modify_depend;
+CREATE ROLE at_modify_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role;
+GRANT SELECT ON test_at_modify_depend TO at_modify_role;
+ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(21,21);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 21 | 21
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 0 | 0
+ 21 | 21
+(2 rows)
+
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(22,22);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 22 | 22
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ a | b
+----+----
+ 0 | 0
+ 21 | 1
+ 22 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role;
+DROP ROLE at_modify_role;
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE TABLE test_at_change(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default '0'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_b1_seq" for serial column "test_at_change.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_pkey" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b1) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_b_key" for table "test_at_change"
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b1) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b1) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------------------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | not null generated always as ((a + 1)) stored | extended | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b1) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b1) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b1::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+ Table "atbdb_ustore_schema.test_at_change"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "test_at_change_pkey" PRIMARY KEY, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+ "test_at_change_b_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "test_at_change_b1_check" CHECK (b::text < 'a'::text)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select pg_get_tabledef('test_at_change'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_change ( +
+ a integer, +
+ b integer NOT NULL, +
+ CONSTRAINT test_at_change_b1_check CHECK (((b)::text < 'a'::text)) +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_b_key UNIQUE USING ubtree (b) WITH (storage_type=ustore); +
+ ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_pkey PRIMARY KEY USING ubtree (b) WITH (storage_type=ustore);
+(1 row)
+
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+-- test change column datatype
+CREATE TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ a | b1
+---+----
+ 3 | 3
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ERROR: invalid input syntax for type date: "1"
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b
+---+----
+ 1 | 01
+ 2 | 02
+ 3 | 03
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE will create implicit sequence "test_at_change_type_b_seq" for serial column "test_at_change_type.b"
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ a | b
+---+------
+ 3 | 3.00
+(1 row)
+
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+----
+ 1 | t
+ 2 | t
+ 3 | t
+(3 rows)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b text
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ERROR: invalid input value for set test_at_change_type_b1_set: 'guangzhou'
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+NOTICE: ALTER TABLE will create implicit set "test_at_change_type_b1_set" for column "test_at_change_type.b1"
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+ERROR: can not alter column type to another set
+select pg_get_tabledef('test_at_change_type'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_change_type ( +
+ a integer, +
+ b1 SET('beijing', 'shanghai', 'nanjing', 'guangzhou') +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no);
+(1 row)
+
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ERROR: value too long for type character varying(10)
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+ a | b1
+---+------------
+ 1 | 11-22-2022
+ 2 | 11-23-2022
+ 3 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_type;
+-- test change column constraint
+CREATE TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ERROR: conflicting NULL/NOT NULL declarations for column "b1" of table "test_at_change_constr"
+LINE 1: ... test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL;
+ ^
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_constr_b1_key" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+ERROR: duplicate key value violates unique constraint "test_at_change_constr_b1_key"
+DETAIL: Key (b1)=(3) already exists.
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+ERROR: column "b" contains null values
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_constr_pkey" for table "test_at_change_constr"
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ERROR: null value in column "b" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ERROR: check constraint "t_at_m_check" is violated by some row
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ERROR: constraint "t_at_m_check" for relation "test_at_change_constr" already exists
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check_1"
+DETAIL: N/A
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ERROR: multiple primary keys for table "test_at_change_constr" are not allowed
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+ERROR: new row for relation "test_at_change_constr" violates check constraint "t_at_m_check"
+DETAIL: N/A
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ pg_get_tabledef
+----------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE test_at_change_constr ( +
+ a integer, +
+ b1 integer NOT NULL, +
+ CONSTRAINT t_at_m_check_1 CHECK (((b1)::bigint = a)), +
+ CONSTRAINT t_at_m_check CHECK (((b1)::bigint < 5)) +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_b1_key UNIQUE USING ubtree (b1) WITH (storage_type=ustore); +
+ ALTER TABLE test_at_change_constr ADD CONSTRAINT test_at_change_constr_pkey PRIMARY KEY USING ubtree (b1) WITH (storage_type=ustore);
+(1 row)
+
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change column REFERENCES constraint is not supported
+DROP TABLE test_at_change_constr;
+-- test change column default
+CREATE TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ERROR: default value cannot reference to a column
+HINT: Perhaps the default value is enclosed in double quotes
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ Table "atbdb_ustore_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b1 | bigint | | plain | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: both default and generation expression specified for column "b" of table "test_at_change_default"
+LINE 1: ...change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ...
+ ^
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+ Table "atbdb_ustore_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b | character varying(8) | default 'a'::character varying | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+ a
+(4 rows)
+
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ERROR: multiple default values specified for column "b1" of table "test_at_change_default"
+LINE 1: ... test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCRE...
+ ^
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ b1
+----
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+(5 rows)
+
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+ a1 | b
+----+---
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 4 | 5
+ a | 1
+(6 rows)
+
+\d+ test_at_change_default;
+ Table "atbdb_ustore_schema.test_at_change_default"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-------------------------------------------------+----------+--------------+-------------
+ a1 | character varying(8) | default 'a'::character varying | extended | |
+ b | character varying(8) | generated always as (((a1)::bigint + 1)) stored | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+DROP TABLE test_at_change_default;
+-- test change column depended by generated column
+CREATE TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b | c
+---+---------------------+---------------------
+ 1 | 2022-11-22 12:00:00 | 2022-11-22 12:00:00
+ 2 | 2022-11-23 12:00:00 | 2022-11-23 12:00:00
+ 3 | 2022-11-24 12:00:00 | 2022-11-24 12:00:00
+(3 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b1 | c
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+\d+ test_at_change_generated
+ Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+---------------------------------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | date | | plain | |
+ c | character varying(32) | generated always as (b1) stored | extended | |
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32) AFTER c;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | c | b
+---+------------+------------
+ 1 | 11-22-2022 | 11-22-2022
+ 2 | 11-23-2022 | 11-23-2022
+ 3 | 11-24-2022 | 11-24-2022
+(3 rows)
+
+DROP TABLE test_at_change_generated;
+CREATE TABLE test_at_change_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(4 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool, MODIFY COLUMN b varchar(32);
+\d test_at_change_generated
+Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ a1 | boolean |
+ b | character varying(32) |
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a1 | b
+----+-----
+ f | 1
+ t | 101
+ t | 2
+ t | 2
+(4 rows)
+
+ALTER TABLE test_at_change_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, CHANGE COLUMN a1 a int;
+\d test_at_change_generated
+ Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+------------------------------------------------
+ a | integer |
+ b | integer | generated always as (((a)::bigint + 1)) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+(4 rows)
+
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 1
+ 1 | 2
+ 1 | 2
+ 1 | 2
+ 100 | 101
+(5 rows)
+
+ALTER TABLE test_at_change_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_change_generated
+Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(5 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+\d test_at_change_generated
+Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+---------------------------------
+ a1 | boolean |
+ b | boolean | generated always as (a1) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a1 | b
+----+---
+ f | f
+ t | t
+ t | t
+ t | t
+ t | t
+(5 rows)
+
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_change_generated
+Table "atbdb_ustore_schema.test_at_change_generated"
+ Column | Type | Modifiers
+--------+---------+--------------------------------
+ a | integer |
+ b | boolean | generated always as (a) stored
+
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ a | b
+-----+---
+ 0 | f
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+ 100 | t
+(6 rows)
+
+DROP TABLE test_at_change_generated;
+-- test change column AUTO_INCREMENT
+CREATE TABLE test_at_change_autoinc(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: auto_increment column must be defined as a unique or primary key
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: The datatype of column 'b1' does not support auto_increment
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column to type 'serial'
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b1_key" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+----
+ 1 |
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+---
+ 1 |
+ 2 | 2
+ 3 | 3
+ 4 | 0
+(4 rows)
+
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_autoinc_pkey" for table "test_at_change_autoinc"
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+(4 rows)
+
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1
+---+-----
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+(5 rows)
+
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b
+---+------
+ 1 | 100
+ 2 | 2
+ 3 | 3
+ 4 | 101
+ 5 | 102
+ 6 | 1000
+(6 rows)
+
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b1_seq" for serial column "test_at_change_autoinc.b1"
+ERROR: Incorrect table definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+ERROR: Incorrect column definition, there can be only one auto_increment column
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_c_seq" for serial column "test_at_change_autoinc.c"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_c_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ a | b1 | c
+---+------+---
+ 1 | 100 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 4 | 101 | 4
+ 5 | 102 | 5
+ 6 | 1000 | 6
+ 7 | 0 | 7
+(7 rows)
+
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_change_autoinc_b_seq" for serial column "test_at_change_autoinc.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_change_autoinc_b_key" for table "test_at_change_autoinc"
+INSERT INTO test_at_change_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 2,1;
+ b | a
+------+---
+ 100 | 1
+ 2 | 2
+ 3 | 3
+ 101 | 4
+ 102 | 5
+ 1000 | 6
+ 1001 | 7
+ 1002 | 8
+(8 rows)
+
+DROP TABLE test_at_change_autoinc;
+-- test change column depended by other objects
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_depend VALUES(1,1);
+INSERT INTO test_at_change_depend VALUES(2,2);
+INSERT INTO test_at_change_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_change_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NOT NULL;
+CALL test_at_change_proc(2); -- ERROR
+ERROR: column "b" of relation "test_at_change_depend" does not exist
+LINE 1: INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in)
+ ^
+QUERY: INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in)
+CONTEXT: PL/pgSQL function test_at_change_proc(integer) line 2 at SQL statement
+DROP PROCEDURE test_at_change_proc;
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_change_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_change
+ AFTER UPDATE ON test_at_change_depend
+ FOR EACH ROW WHEN ( NEW.b1 IS NULL AND OLD.b1 = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_change_func();
+ALTER TABLE test_at_change_depend CHANGE b1 b varchar(8) NULL DEFAULT '0';
+UPDATE test_at_change_depend SET b = NULL WHERE a = 2; -- ERROR
+ERROR: column "b1" of relation "test_at_change_depend" does not exist
+LINE 1: UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a
+ ^
+QUERY: UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a
+CONTEXT: PL/pgSQL function tg_bf_test_at_change_func() line 4 at SQL statement
+DROP TRIGGER tg_bf_test_at_change ON test_at_change_depend;
+DROP FUNCTION tg_bf_test_at_change_func;
+-- --VIEW depends column
+CREATE VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b1 bigint NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to bigint
+ALTER TABLE test_at_change_depend CHANGE b b1 int NULL; -- ERROR
+ERROR: cannot change data type of view column "b" from character varying(8) to integer
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+SELECT b FROM test_at_change_view ORDER BY 1;
+ b
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+DROP VIEW test_at_change_view;
+CREATE VIEW test_at_change_view AS SELECT a FROM test_at_change_depend where b1 > 0;
+CREATE VIEW test_at_change_view1 AS SELECT * FROM test_at_change_view;
+ALTER TABLE test_at_change_depend CHANGE b1 b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change_depend CHANGE b1 b int NULL;
+SELECT * FROM test_at_change_view1 ORDER BY 1;
+ a
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+DROP VIEW test_at_change_view1;
+DROP VIEW test_at_change_view;
+CREATE materialized VIEW test_at_change_view AS SELECT b FROM test_at_change_depend; -- ERROR
+ERROR: materialized view is not supported in ustore yet
+-- --TABLE reference column.
+DELETE FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b INT PRIMARY KEY;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_change_depend_pkey" for table "test_at_change_depend"
+CREATE TABLE test_at_change_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_depend(b) ON DELETE SET NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 varchar(8);
+INSERT INTO test_at_change_ref VALUES(0,0); -- ERROR
+ERROR: insert or update on table "test_at_change_ref" violates foreign key constraint "test_at_change_ref_b_fkey"
+DETAIL: Key (b)=(0) is not present in table "test_at_change_depend".
+INSERT INTO test_at_change_depend VALUES(0,0);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+\d+ test_at_change_ref
+ Table "atbdb_ustore_schema.test_at_change_ref"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | integer | | plain | |
+ b1 | character varying(8) | | extended | |
+Foreign-key constraints:
+ "test_at_change_ref_b_fkey" FOREIGN KEY (b1) REFERENCES test_at_change_depend(b1) ON DELETE SET NULL
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+DROP TABLE test_at_change_ref;
+-- --TABLE reference self column.
+CREATE TABLE test_at_change_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_ref(a) ON DELETE SET NULL
+) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_change_ref_pkey" for table "test_at_change_ref"
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ERROR: invalid ON DELETE action for foreign key constraint containing generated column
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+ALTER TABLE test_at_change_ref CHANGE COLUMN a a1 varchar(8);
+INSERT INTO test_at_change_ref VALUES('a','a');
+DROP TABLE test_at_change_ref;
+-- --RULE reference column.
+CREATE RULE test_at_change_rule AS ON INSERT TO test_at_change_depend WHERE (b1 is null) DO INSTEAD UPDATE test_at_change_depend SET b1=0;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b bigint not null; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change a column used by materialized view or rule is not supported
+DROP RULE test_at_change_rule ON test_at_change_depend;
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_change_depend;
+CREATE ROLE at_change_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_change_role;
+GRANT SELECT ON test_at_change_depend TO at_change_role;
+ALTER TABLE test_at_change_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_role USING(b >= 20);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 int not null;
+INSERT INTO test_at_change_depend VALUES(21,21);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b1
+----+----
+ 21 | 21
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b1
+----+----
+ 0 | 0
+ 21 | 21
+(2 rows)
+
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b2 bool not null;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b2 b3 int not null;
+INSERT INTO test_at_change_depend VALUES(22,22);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b3
+----+----
+ 22 | 22
+(1 row)
+
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ a | b3
+----+----
+ 0 | 0
+ 21 | 1
+ 22 | 22
+(3 rows)
+
+DROP TABLE test_at_change_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_change_role;
+DROP ROLE at_change_role;
+-- test alter command order
+CREATE TABLE test_at_pass(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_pass VALUES(1,0);
+ALTER TABLE test_at_pass ADD COLUMN c int, DROP COLUMN c; -- ERROR
+ERROR: column "c" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, MODIFY COLUMN c bigint; -- ERROR
+ERROR: column "c" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, CHANGE COLUMN c c1 bigint; -- ERROR
+ERROR: column "c" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, MODIFY COLUMN b float4; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column "b" twice
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, CHANGE COLUMN b b1 float4; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, CHANGE COLUMN b1 b2 bigint; -- ERROR
+ERROR: column "b1" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, DROP COLUMN b; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b1; -- ERROR
+ERROR: column "b1" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY a bigint, MODIFY COLUMN a VARCHAR(8); -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change column "a" twice
+ALTER TABLE test_at_pass CHANGE COLUMN b a bigint, CHANGE COLUMN a b VARCHAR(8); -- ERROR
+ERROR: column "a" of relation "test_at_pass" already exists
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, ALTER COLUMN b SET DEFAULT 100;
+\d test_at_pass
+Table "atbdb_ustore_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+Table "atbdb_ustore_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, ALTER COLUMN b1 SET DEFAULT 100;
+\d test_at_pass
+Table "atbdb_ustore_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b1 | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b1 b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+Table "atbdb_ustore_schema.test_at_pass"
+ Column | Type | Modifiers
+--------+---------+-------------
+ a | integer |
+ b | bigint | default 100
+
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT; -- ERROR
+ERROR: column "b" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, DROP CONSTRAINT atpass_pk; -- ERROR
+ERROR: constraint "atpass_pk" of relation "test_at_pass" does not exist
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT atpass_pk PRIMARY KEY(a); -- ERROR
+ERROR: multiple primary keys for table "test_at_pass" are not allowed
+LINE 1: ...MN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT...
+ ^
+DROP TABLE test_at_pass;
+-- test complex commands combined
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN a varchar(8), MODIFY COLUMN b int AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_complex_b_seq" for serial column "test_at_complex.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_complex_b_key" for table "test_at_complex"
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+ a | b
+----+---
+ -1 | 4
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 5
+(5 rows)
+
+DROP TABLE test_at_complex;
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN b int AUTO_INCREMENT UNIQUE, MODIFY COLUMN a varchar(8);
+NOTICE: ALTER TABLE will create implicit sequence "test_at_complex_b_seq" for serial column "test_at_complex.b"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_at_complex_b_key" for table "test_at_complex"
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+ a | b
+----+---
+ -1 | 4
+ 0 | 1
+ 1 | 2
+ 2 | 3
+ 3 | 5
+(5 rows)
+
+DROP TABLE test_at_complex;
+-- test modify partitioned table column without data
+CREATE TABLE pt_at_modify (a int, b int NOT NULL, PRIMARY KEY(b,a)) WITH(STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (1000),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE)
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pt_at_modify_pkey" for table "pt_at_modify"
+ALTER TABLE pt_at_modify MODIFY a int8 DEFAULT 0; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: modify or change partition key column is not supported
+ALTER TABLE pt_at_modify MODIFY a int DEFAULT 0;
+ALTER TABLE pt_at_modify MODIFY a int GENERATED ALWAYS AS (b+1) STORED; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: cannot modify or change a partition key column as a generated column
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 NULL;
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 DEFAULT 0;
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null default 0 | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int AUTO_INCREMENT;
+NOTICE: ALTER TABLE will create implicit sequence "pt_at_modify_b_seq" for serial column "pt_at_modify.b"
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-------------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null AUTO_INCREMENT | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int2 UNIQUE;
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "pt_at_modify_b_tableoid_key" for table "pt_at_modify"
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | smallint | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int CHECK (b < 10000);
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+--------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | integer | not null | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+------------------------+----------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | character varying(8) | collate POSIX not null | extended | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b::bigint < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b int8 GENERATED ALWAYS AS (a+1) STORED;
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------------------------------------------+---------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | bigint | not null generated always as ((a + 1)) stored | plain | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE pt_at_modify MODIFY b varchar(8) NOT NULL;
+\d+ pt_at_modify;
+ Table "atbdb_ustore_schema.pt_at_modify"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+--------------------+----------+--------------+-------------
+ a | integer | not null default 0 | plain | |
+ b | character varying(8) | not null | extended | |
+Indexes:
+ "pt_at_modify_pkey" PRIMARY KEY, ubtree (b, a) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+ "pt_at_modify_b_tableoid_key" UNIQUE CONSTRAINT, ubtree (b) WITH (storage_type=ustore) TABLESPACE pg_default
+Check constraints:
+ "pt_at_modify_b_check" CHECK (b::bigint < 10000)
+Partition By RANGE(a)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+select pg_get_tabledef('pt_at_modify'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = atbdb_ustore_schema; +
+ CREATE TABLE pt_at_modify ( +
+ a integer DEFAULT 0 NOT NULL, +
+ b character varying(8) NOT NULL, +
+ CONSTRAINT pt_at_modify_b_check CHECK (((b)::bigint < 10000)) +
+ ) +
+ WITH (orientation=row, storage_type=ustore, compression=no) +
+ PARTITION BY RANGE (a) +
+ ( +
+ PARTITION p1 VALUES LESS THAN (100) TABLESPACE pg_default, +
+ PARTITION p2 VALUES LESS THAN (1000) TABLESPACE pg_default, +
+ PARTITION p3 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ ALTER TABLE pt_at_modify ADD CONSTRAINT pt_at_modify_b_tableoid_key UNIQUE USING ubtree (b) INCLUDE (tableoid) WITH (storage_type=ustore);+
+ ALTER TABLE pt_at_modify ADD CONSTRAINT pt_at_modify_pkey PRIMARY KEY USING ubtree (b, a) WITH (storage_type=ustore);
+(1 row)
+
+INSERT INTO pt_at_modify VALUES(1,1);
+DROP TABLE pt_at_modify;
+-- test alter modify first after
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY c float4 FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ c | a | d | b
+----+----+-----+----
+ 3 | 1 | 101 | 2
+ 13 | 11 | 111 | 12
+ 23 | 21 | 121 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c float4 GENERATED ALWAYS AS (b+100) STORED AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+-----+----
+ 101 | 1 | 102 | 2
+ 111 | 11 | 112 | 12
+ 121 | 21 | 122 | 22
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c bigint AUTO_INCREMENT PRIMARY KEY AFTER a;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_fa_pkey" for table "test_at_modify_fa"
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+---+----
+ 101 | 1 | 3 | 2
+ 111 | 11 | 4 | 12
+ 121 | 21 | 5 | 22
+(3 rows)
+
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,NULL);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ d | a | c | b
+-----+----+---+----
+ 101 | 1 | 3 | 2
+ 111 | 11 | 4 | 12
+ 121 | 21 | 5 | 22
+ 131 | 31 | 6 | 32
+(4 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(11,12,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(21,22,DEFAULT);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED FIRST; -- ERROR
+ERROR: Invalid modify column operation
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (b+100) STORED AFTER a, MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ a | d | b | c
+------+-----+----+----
+ 1002 | 102 | 2 | 3
+ 1012 | 112 | 12 | 13
+ 1022 | 122 | 22 | 23
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ a | b | c
+----+----+----
+ 1 | 2 | 3
+ 11 | 12 | 13
+ 21 | 22 | 23
+(3 rows)
+
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+-----+----+----
+ 102 | 1 | 101 | 2 | 3
+ 112 | 11 | 111 | 12 | 13
+ 122 | 21 | 121 | 22 | 23
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ a | b | c
+----+----+----
+ 1 | 2 | 3
+ 11 | 12 | 13
+ 21 | 22 | 23
+(3 rows)
+
+ALTER TABLE test_at_modify_fa ADD COLUMN d bigint AUTO_INCREMENT PRIMARY KEY AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_d_seq" for serial column "test_at_modify_fa.d"
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_at_modify_fa_pkey" for table "test_at_modify_fa"
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+---+----+----
+ 102 | 1 | 1 | 2 | 3
+ 112 | 11 | 2 | 12 | 13
+ 122 | 21 | 3 | 22 | 23
+(3 rows)
+
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,33);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+ e | a | d | b | c
+-----+----+---+----+----
+ 102 | 1 | 1 | 2 | 3
+ 112 | 11 | 2 | 12 | 13
+ 122 | 21 | 3 | 22 | 23
+ 132 | 31 | 4 | 32 | 33
+(4 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ERROR: cannot use generated column "c" in column generation expression
+DETAIL: A generated column cannot reference another generated column.
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (a+100) STORED AFTER c;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3;
+ c | b | a
+-----+-----+----
+ 101 | 101 | 1
+ 111 | 111 | 11
+ 121 | 121 | 21
+(3 rows)
+
+DROP TABLE test_at_modify_fa;
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_at_modify_fa_c_seq" for serial column "test_at_modify_fa.c"
+ERROR: generated column cannot refer to auto_increment column
+DROP TABLE test_at_modify_fa;
+-- primary key should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11)) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) |
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) |
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=ustore) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int AFTER f13;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=ustore) TABLESPACE pg_default
+
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer |
+
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) |
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default
+
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+ERROR: null value in column "f11" violates not-null constraint
+DETAIL: Failing row contains (1, t, null).
+drop table test11;
+-- primary keys should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11,f12)) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) | not null
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11, f12) WITH (storage_type=USTORE) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f11 | integer | not null
+ f12 | character varying(20) | not null
+ f13 | boolean |
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11, f12) WITH (storage_type=ustore) TABLESPACE pg_default
+
+ALTER TABLE test11 MODIFY f11 int AFTER f13;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11, f12) WITH (storage_type=ustore) TABLESPACE pg_default
+
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer |
+
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_test11_f11" for table "test11"
+\d test11
+ Table "atbdb_ustore_schema.test11"
+ Column | Type | Modifiers
+--------+-----------------------+-----------
+ f12 | character varying(20) | not null
+ f13 | boolean |
+ f11 | integer | not null
+Indexes:
+ "pk_test11_f11" PRIMARY KEY, ubtree (f11) WITH (storage_type=USTORE) TABLESPACE pg_default
+
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+ERROR: null value in column "f11" violates not-null constraint
+DETAIL: Failing row contains (1, t, null).
+drop table test11;
+-- primary keys in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (id, birthday);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | not null | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (id, birthday) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (id, birthday) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (id);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | not null | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (id) WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+ id | integer | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (id) WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (gender);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range"
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ gender | character varying | not null | extended | |
+ birthday | date | not null | plain | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (gender) WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+ALTER TABLE range_range MODIFY COLUMN gender varchar AFTER birthday;
+\d+ range_range
+ Table "atbdb_ustore_schema.range_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+-------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ birthday | date | not null | plain | |
+ gender | character varying | not null | extended | |
+Indexes:
+ "range_range_pkey" PRIMARY KEY, ubtree (gender) WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(id) Subpartition By RANGE(birthday)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Number of subpartitions: 6 (View pg_partition to check each subpartition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists range_range cascade;
+-- primary keys in multi range keys partition table should be not null after modify
+create table multi_keys_range(f1 int, f2 int, f3 int) WITH(STORAGE_TYPE=USTORE)
+partition by range(f1, f2)
+(
+ partition multi_keys_range_p0 values less than (10, 0),
+ partition multi_keys_range_p1 values less than (20, 0),
+ partition multi_keys_range_p2 values less than (30, 0)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_range modify f1 int after f3, ADD CONSTRAINT multi_keys_range_pkey PRIMARY KEY USING ubtree (f1,f2);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_range_pkey" for table "multi_keys_range"
+\d+ multi_keys_range
+ Table "atbdb_ustore_schema.multi_keys_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_range_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table multi_keys_range modify f2 int after f3;
+\d+ multi_keys_range
+ Table "atbdb_ustore_schema.multi_keys_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_range_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By RANGE(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists multi_keys_range cascade;
+-- primary keys in multi list keys partition table should be not null after modify
+create table multi_keys_list(f1 int, f2 int, f3 int) WITH(STORAGE_TYPE=USTORE)
+partition by list(f1, f2)
+(
+ partition multi_keys_list_p0 values ((10, 0)),
+ partition multi_keys_list_p1 values ((20, 0)),
+ partition multi_keys_list_p2 values (DEFAULT)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_list modify f1 int after f3, ADD CONSTRAINT multi_keys_list_pkey PRIMARY KEY USING ubtree (f1,f2);
+NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "multi_keys_list_pkey" for table "multi_keys_list"
+\d+ multi_keys_list
+ Table "atbdb_ustore_schema.multi_keys_list"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f2 | integer | not null | plain | |
+ f3 | integer | | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_list_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default
+Partition By LIST(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+alter table multi_keys_list modify f2 int after f3;
+\d+ multi_keys_list
+ Table "atbdb_ustore_schema.multi_keys_list"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ f3 | integer | | plain | |
+ f2 | integer | not null | plain | |
+ f1 | integer | not null | plain | |
+Indexes:
+ "multi_keys_list_pkey" PRIMARY KEY, ubtree (f1, f2) LOCAL WITH (storage_type=ustore) TABLESPACE pg_default
+Partition By LIST(f1, f2)
+Number of partitions: 3 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, storage_type=ustore, compression=no
+
+drop table if exists multi_keys_list cascade;
+-- test moidfy/change VIEW depends column
+-- --modify
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN f0 int;
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT * FROM (SELECT (((test_at_modify_view_column.f1)::bigint / 10000))::numeric(18,2) AS "numeric", ((((test_at_modify_view_column.f4)::numeric(18,4) / (test_at_modify_view_column.f1)::numeric) * (100)::numeric))::numeric(18,2) AS "numeric" FROM test_at_modify_view_column) __unnamed_subquery__;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column MODIFY column f1 int ...
+ ^
+CONTEXT: referenced column: col1
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIR...
+ ^
+CONTEXT: referenced column: col1
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test modify view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_f1f2
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view "test_modify_view_f1f2F3"
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1, DROP COLUMN f5;
+ERROR: column "f5" of relation "test_at_modify_view_column" does not exist
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- --change
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COLUMN f0 int; -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------
+ SELECT * FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f4, test_at_modify_view_column.f3, test_at_modify_view_column.f2, test_at_modify_view_column.f1 FROM test_at_modify_view_column;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ f4 | f3 | f2 | f1
+----+----+----+----
+ 4 | 3 | 2 | 1
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT * FROM (SELECT (((test_at_modify_view_column.c1)::bigint / 10000))::numeric(18,2) AS "numeric", ((((test_at_modify_view_column.f4)::numeric(18,4) / (test_at_modify_view_column.c1)::numeric) * (100)::numeric))::numeric(18,2) AS "numeric" FROM test_at_modify_view_column) __unnamed_subquery__;
+(1 row)
+
+SELECT * FROM test_modify_view_star;
+ col1 | col2
+------+--------
+ 0.00 | 400.00
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 i...
+ ^
+CONTEXT: referenced column: col1
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+ERROR: column reference "numeric" is ambiguous
+LINE 1: ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIR...
+ ^
+CONTEXT: referenced column: col1
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_star
+-- -- --test CHANGE view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ pg_get_viewdef
+------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2 FROM test_at_modify_view_column;
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view test_modify_view_f1f2
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE (test_at_modify_view_column.f4 > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER c1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.c1 AS f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.f4)::bigint > 0);
+(1 row)
+
+DROP TABLE test_at_modify_view_column CASCADE;
+NOTICE: drop cascades to view "test_modify_view_f1f2F3"
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_at_modify_view_column_pkey" for table "test_at_modify_view_column"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_at_modify_view_column_f1_key" for table "test_at_modify_view_column"
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ERROR: cannot change data type of view column "f1" from integer to character varying(20)
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER f1, DROP COLUMN f5;
+ERROR: column "f5" of relation "test_at_modify_view_column" does not exist
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SELECT test_at_modify_view_column.f1, test_at_modify_view_column.f2, test_at_modify_view_column.f3 AS "F3" FROM test_at_modify_view_column WHERE ((test_at_modify_view_column.c4)::bigint > 0);
+(1 row)
+
+SELECT * FROM "test_modify_view_f1f2F3";
+ f1 | f2 | F3
+----+----+----
+ 1 | 2 | 3
+(1 row)
+
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_ustore_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_ustore;
+drop database if exists atbdb_ustore;
diff --git a/src/test/regress/expected/auto_explain.out b/src/test/regress/expected/auto_explain.out
index 217f020a1..afce8fca2 100644
--- a/src/test/regress/expected/auto_explain.out
+++ b/src/test/regress/expected/auto_explain.out
@@ -698,7 +698,7 @@ Name: datanode1
Output: test1.id, test1.val
Exec Nodes: All datanodes
Filter: (test1.id = ($15)::numeric)
- -> Seq Scan on public.test1 t1 (cost=0.00..18.69 rows=869 p-time=0 p-rows=0 width=70)
+--?.*
Output: t1.id, t1.val, t1.ctid
Exec Nodes: All datanodes
param1 value: 103 type: int4
diff --git a/src/test/regress/expected/col_count_distinct_4.out b/src/test/regress/expected/col_count_distinct_4.out
old mode 100644
new mode 100755
index 701d180b8..4ea718eb9
--- a/src/test/regress/expected/col_count_distinct_4.out
+++ b/src/test/regress/expected/col_count_distinct_4.out
@@ -170,11 +170,11 @@ select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(disti
(7 rows)
explain (costs off) select distinct case when min(distinct c)>60 then min(distinct c) else null end as min, count(distinct(b)) from t_distinct group by b;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
Row Adapter
-> Vector Hash Aggregate
- Group By Key: CASE WHEN (min(DISTINCT c) > 60) THEN (min(DISTINCT c))::numeric ELSE NULL::numeric END, count(DISTINCT b)
+ Group By Key: CASE WHEN (min(DISTINCT c) > 60) THEN min(DISTINCT c) ELSE NULL::integer END, count(DISTINCT b)
-> Vector Sort Aggregate
Group By Key: b
-> Vector Sort
diff --git a/src/test/regress/expected/count_distinct_part2.out b/src/test/regress/expected/count_distinct_part2.out
old mode 100644
new mode 100755
index b11582c9a..875f58c03
--- a/src/test/regress/expected/count_distinct_part2.out
+++ b/src/test/regress/expected/count_distinct_part2.out
@@ -721,10 +721,10 @@ select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(disti
(7 rows)
explain (costs off) select distinct case when min(distinct c)>60 then min(distinct c) else null end as min, count(distinct(b)) from t_distinct group by b;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------
HashAggregate
- Group By Key: CASE WHEN (min(DISTINCT c) > 60) THEN (min(DISTINCT c))::numeric ELSE NULL::numeric END, count(DISTINCT b)
+ Group By Key: CASE WHEN (min(DISTINCT c) > 60) THEN min(DISTINCT c) ELSE NULL::integer END, count(DISTINCT b)
-> GroupAggregate
Group By Key: b
-> Sort
diff --git a/src/test/regress/expected/count_distinct_part4.out b/src/test/regress/expected/count_distinct_part4.out
old mode 100644
new mode 100755
index 0bc168ca8..c4000dcd8
--- a/src/test/regress/expected/count_distinct_part4.out
+++ b/src/test/regress/expected/count_distinct_part4.out
@@ -350,12 +350,12 @@ select '20180831' rpt_Dt, org_id org_id,
end) bxs
from m_inte_counter_detail a left join m_pub_org_stat_stt b on a.org_id = b.net_bank
group by a.org_id;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END)), (count(DISTINCT CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END)), (count(DISTINCT CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END), count(DISTINCT CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END), count(DISTINCT CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
Group By Key: a.org_id
-> Vector Sort
Output: a.org_id, a.rpt_dt, a.ywbh, a.is_type, a.term_code
@@ -391,12 +391,12 @@ select '20180831' rpt_Dt, a.org_id,
end) bxs
from m_inte_counter_detail a left join m_pub_org_stat_stt b on a.org_id = b.net_bank
group by org_id;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END)), (sum(DISTINCT (CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)::numeric)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END)), (sum(DISTINCT (CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)::numeric)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END), sum(DISTINCT (CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)::numeric), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), count(DISTINCT CASE WHEN ((a.is_type)::text = ANY ('{TTS,GXH}'::text[])) THEN a.term_code ELSE NULL::character varying END), sum(DISTINCT (CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.is_type)::text = ANY ('{TTS,GXH}'::text[]))) THEN a.term_code ELSE NULL::character varying END)::numeric), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
Group By Key: a.org_id
-> Vector Sort
Output: a.org_id, a.rpt_dt, a.ywbh, a.is_type, a.term_code
@@ -425,12 +425,12 @@ select '20180831' rpt_Dt, a.org_id,
from m_inte_counter_detail a left join m_pub_org_stat_stt b on a.org_id = b.net_bank
group by org_id
having count(distinct a.is_type) > 100;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
Group By Key: a.org_id
Filter: (count(DISTINCT a.is_type) > 100)
-> Vector Sort
@@ -460,12 +460,12 @@ select '20180831' rpt_Dt, org_id,
from m_inte_counter_detail a left join m_pub_org_stat_stt b on a.org_id = b.net_bank
group by a.org_id
having sum(distinct a.is_type) + avg(distinct org_id)> 100;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), sum(DISTINCT (CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)::numeric), a.org_id
Group By Key: a.org_id
Filter: ((sum(DISTINCT (a.is_type)::numeric) + avg(DISTINCT (a.org_id)::numeric)) > 100::numeric)
-> Vector Sort
@@ -496,12 +496,12 @@ select '20180831' rpt_Dt, org_id,
group by a.org_id
having sum(distinct a.is_type) + avg(distinct org_id)> 100
order by 2;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
Group By Key: a.org_id
Filter: ((sum(DISTINCT (a.is_type)::numeric) + avg(DISTINCT (a.org_id)::numeric)) > 100::numeric)
-> Vector Sort
@@ -532,12 +532,12 @@ select '20180831' rpt_Dt, org_id,
group by a.org_id
having sum(distinct a.is_type) + avg(distinct (a.org_id + org_id))> 100
order by 2;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), a.org_id
-> Vector Sort Aggregate
- Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
+ Output: '20180831'::text, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), a.org_id
Group By Key: a.org_id
Filter: ((sum(DISTINCT (a.is_type)::numeric) + avg(DISTINCT ((a.org_id)::numeric + (a.org_id)::numeric))) > 100::numeric)
-> Vector Sort
@@ -570,17 +570,17 @@ select '20180831' rpt_Dt, org_id,
group by cube(a.org_id)
having sum(distinct a.is_type) + avg(distinct (a.org_id + org_id))> 100
order by 2;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Row Adapter
- Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id)), (rank() OVER (PARTITION BY a.org_id)), a.org_id
+ Output: ('20180831'::text), a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id)), (rank() OVER (PARTITION BY a.org_id)), a.org_id
-> Vector WindowAgg
- Output: '20180831'::text, a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id)), rank() OVER (PARTITION BY a.org_id), a.org_id
+ Output: '20180831'::text, a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id)), rank() OVER (PARTITION BY a.org_id), a.org_id
-> Vector Sort
- Output: a.org_id, a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id))
+ Output: a.org_id, a.org_id, (count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END)), (count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END)), (GROUPING(a.org_id))
Sort Key: a.org_id
-> Vector Sort Aggregate
- Output: a.org_id, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1::numeric ELSE NULL::numeric END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), GROUPING(a.org_id)
+ Output: a.org_id, a.org_id, count(CASE WHEN (((a.rpt_dt)::text = '20180831'::text) AND ((a.ywbh)::text = ANY ('{223,478,819,886}'::text[]))) THEN 1 ELSE NULL::integer END), count(DISTINCT CASE WHEN ((a.is_type)::text = 'BXS'::text) THEN a.term_code ELSE NULL::character varying END), GROUPING(a.org_id)
Group By Key: a.org_id
Group By Key: ()
Filter: ((sum(DISTINCT (a.is_type)::numeric) + avg(DISTINCT ((a.org_id)::numeric + (a.org_id)::numeric))) > 100::numeric)
diff --git a/src/test/regress/expected/create_schema2.out b/src/test/regress/expected/create_schema2.out
index e0057e9f4..711efc02d 100644
--- a/src/test/regress/expected/create_schema2.out
+++ b/src/test/regress/expected/create_schema2.out
@@ -4,10 +4,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema ' || var_name;
+ query_str := 'create schema "' || var_name || '"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name ||' CASCADE';
+ query_str := 'drop schema "' || var_name ||'" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
@@ -21,10 +21,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema authorization ' || var_name;
+ query_str := 'create schema authorization "' || var_name || '"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name ||' CASCADE';
+ query_str := 'drop schema "' || var_name ||'" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
@@ -38,10 +38,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema ' || var_name ||'_123';
+ query_str := 'create schema "' || var_name ||'_123"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name || '_123 CASCADE';
+ query_str := 'drop schema "' || var_name || '_123" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
diff --git a/src/test/regress/expected/decode_compatible_with_o.out b/src/test/regress/expected/decode_compatible_with_o.out
old mode 100644
new mode 100755
index 620d8c141..07fcdd555
--- a/src/test/regress/expected/decode_compatible_with_o.out
+++ b/src/test/regress/expected/decode_compatible_with_o.out
@@ -41,128 +41,128 @@ insert into tb_test values(
);
-- convert to bool
select decode(1, 2, c_bool, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and tinyint cannot be matched
+ERROR: DECODE types boolean and tinyint cannot be matched
LINE 1: select decode(1, 2, c_bool, c_int1) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and smallint cannot be matched
+ERROR: DECODE types boolean and smallint cannot be matched
LINE 1: select decode(1, 2, c_bool, c_int2) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and integer cannot be matched
+ERROR: DECODE types boolean and integer cannot be matched
LINE 1: select decode(1, 2, c_bool, c_int4) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and bigint cannot be matched
+ERROR: DECODE types boolean and bigint cannot be matched
LINE 1: select decode(1, 2, c_bool, c_int8) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and real cannot be matched
+ERROR: DECODE types boolean and real cannot be matched
LINE 1: select decode(1, 2, c_bool, c_float4) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and double precision cannot be matched
+ERROR: DECODE types boolean and double precision cannot be matched
LINE 1: select decode(1, 2, c_bool, c_float8) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and numeric cannot be matched
+ERROR: DECODE types boolean and numeric cannot be matched
LINE 1: select decode(1, 2, c_bool, c_numeric) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and boolean cannot be matched
+ERROR: DECODE types money and boolean cannot be matched
LINE 1: select decode(1, 2, c_bool, c_money) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and character cannot be matched
+ERROR: DECODE types boolean and character cannot be matched
LINE 1: select decode(1, 2, c_bool, c_char) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and character cannot be matched
+ERROR: DECODE types boolean and character cannot be matched
LINE 1: select decode(1, 2, c_bool, c_bpchar) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and character varying cannot be matched
+ERROR: DECODE types boolean and character varying cannot be matched
LINE 1: select decode(1, 2, c_bool, c_varchar2) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and nvarchar2 cannot be matched
+ERROR: DECODE types boolean and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_bool, c_nvarchar2) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and text cannot be matched
+ERROR: DECODE types boolean and text cannot be matched
LINE 1: select decode(1, 2, c_bool, c_text) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and text cannot be matched
+ERROR: DECODE types boolean and text cannot be matched
LINE 1: select decode(1, 2, c_bool, c_char2number_success) as result...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and raw cannot be matched
+ERROR: DECODE types boolean and raw cannot be matched
LINE 1: select decode(1, 2, c_bool, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and timestamp without time zone cannot be matched
+ERROR: DECODE types boolean and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_bool, c_date) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and time without time zone cannot be matched
+ERROR: DECODE types boolean and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_bool, c_time) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and time with time zone cannot be matched
+ERROR: DECODE types boolean and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_bool, c_timetz) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and timestamp without time zone cannot be matched
+ERROR: DECODE types boolean and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_bool, c_timestamp) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and timestamp with time zone cannot be matched
+ERROR: DECODE types boolean and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_bool, c_timestamptz) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and smalldatetime cannot be matched
+ERROR: DECODE types boolean and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_bool, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and interval cannot be matched
+ERROR: DECODE types boolean and interval cannot be matched
LINE 1: select decode(1, 2, c_bool, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and reltime cannot be matched
+ERROR: DECODE types boolean and reltime cannot be matched
LINE 1: select decode(1, 2, c_bool, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_bool, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and abstime cannot be matched
+ERROR: DECODE types boolean and abstime cannot be matched
LINE 1: select decode(1, 2, c_bool, c_abstime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
-- convert to int1
select decode(1, 2, c_int1, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and boolean cannot be matched
+ERROR: DECODE types tinyint and boolean cannot be matched
LINE 1: select decode(1, 2, c_int1, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -229,58 +229,58 @@ select decode(1, 2, c_int1, c_char2number_success) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_int1, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and raw cannot be matched
+ERROR: DECODE types tinyint and raw cannot be matched
LINE 1: select decode(1, 2, c_int1, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and timestamp without time zone cannot be matched
+ERROR: DECODE types tinyint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int1, c_date) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and time without time zone cannot be matched
+ERROR: DECODE types tinyint and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_int1, c_time) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and time with time zone cannot be matched
+ERROR: DECODE types tinyint and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_int1, c_timetz) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and timestamp without time zone cannot be matched
+ERROR: DECODE types tinyint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int1, c_timestamp) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and timestamp with time zone cannot be matched
+ERROR: DECODE types tinyint and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_int1, c_timestamptz) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and smalldatetime cannot be matched
+ERROR: DECODE types tinyint and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_int1, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and interval cannot be matched
+ERROR: DECODE types tinyint and interval cannot be matched
LINE 1: select decode(1, 2, c_int1, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and reltime cannot be matched
+ERROR: DECODE types tinyint and reltime cannot be matched
LINE 1: select decode(1, 2, c_int1, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int1, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types tinyint and abstime cannot be matched
+ERROR: DECODE types tinyint and abstime cannot be matched
LINE 1: select decode(1, 2, c_int1, c_abstime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
-- convert to int2
select decode(1, 2, c_int2, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and boolean cannot be matched
+ERROR: DECODE types smallint and boolean cannot be matched
LINE 1: select decode(1, 2, c_int2, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -347,58 +347,58 @@ select decode(1, 2, c_int2, c_char2number_success) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_int2, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and raw cannot be matched
+ERROR: DECODE types smallint and raw cannot be matched
LINE 1: select decode(1, 2, c_int2, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and timestamp without time zone cannot be matched
+ERROR: DECODE types smallint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int2, c_date) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and time without time zone cannot be matched
+ERROR: DECODE types smallint and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_int2, c_time) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and time with time zone cannot be matched
+ERROR: DECODE types smallint and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_int2, c_timetz) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and timestamp without time zone cannot be matched
+ERROR: DECODE types smallint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int2, c_timestamp) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and timestamp with time zone cannot be matched
+ERROR: DECODE types smallint and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_int2, c_timestamptz) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and smalldatetime cannot be matched
+ERROR: DECODE types smallint and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_int2, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and interval cannot be matched
+ERROR: DECODE types smallint and interval cannot be matched
LINE 1: select decode(1, 2, c_int2, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and reltime cannot be matched
+ERROR: DECODE types smallint and reltime cannot be matched
LINE 1: select decode(1, 2, c_int2, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int2, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smallint and abstime cannot be matched
+ERROR: DECODE types smallint and abstime cannot be matched
LINE 1: select decode(1, 2, c_int2, c_abstime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
-- convert to int4
select decode(1, 2, c_int4, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and boolean cannot be matched
+ERROR: DECODE types integer and boolean cannot be matched
LINE 1: select decode(1, 2, c_int4, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -465,58 +465,58 @@ select decode(1, 2, c_int4, c_char2number_success) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_int4, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and raw cannot be matched
+ERROR: DECODE types integer and raw cannot be matched
LINE 1: select decode(1, 2, c_int4, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and timestamp without time zone cannot be matched
+ERROR: DECODE types integer and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int4, c_date) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and time without time zone cannot be matched
+ERROR: DECODE types integer and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_int4, c_time) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and time with time zone cannot be matched
+ERROR: DECODE types integer and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_int4, c_timetz) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and timestamp without time zone cannot be matched
+ERROR: DECODE types integer and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int4, c_timestamp) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and timestamp with time zone cannot be matched
+ERROR: DECODE types integer and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_int4, c_timestamptz) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and smalldatetime cannot be matched
+ERROR: DECODE types integer and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_int4, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and interval cannot be matched
+ERROR: DECODE types integer and interval cannot be matched
LINE 1: select decode(1, 2, c_int4, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and reltime cannot be matched
+ERROR: DECODE types integer and reltime cannot be matched
LINE 1: select decode(1, 2, c_int4, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int4, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types integer and abstime cannot be matched
+ERROR: DECODE types integer and abstime cannot be matched
LINE 1: select decode(1, 2, c_int4, c_abstime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
-- convert to int8
select decode(1, 2, c_int8, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and boolean cannot be matched
+ERROR: DECODE types bigint and boolean cannot be matched
LINE 1: select decode(1, 2, c_int8, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -583,58 +583,58 @@ select decode(1, 2, c_int8, c_char2number_success) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_int8, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and raw cannot be matched
+ERROR: DECODE types bigint and raw cannot be matched
LINE 1: select decode(1, 2, c_int8, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and timestamp without time zone cannot be matched
+ERROR: DECODE types bigint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int8, c_date) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and time without time zone cannot be matched
+ERROR: DECODE types bigint and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_int8, c_time) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and time with time zone cannot be matched
+ERROR: DECODE types bigint and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_int8, c_timetz) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and timestamp without time zone cannot be matched
+ERROR: DECODE types bigint and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_int8, c_timestamp) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and timestamp with time zone cannot be matched
+ERROR: DECODE types bigint and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_int8, c_timestamptz) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and smalldatetime cannot be matched
+ERROR: DECODE types bigint and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_int8, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and interval cannot be matched
+ERROR: DECODE types bigint and interval cannot be matched
LINE 1: select decode(1, 2, c_int8, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and reltime cannot be matched
+ERROR: DECODE types bigint and reltime cannot be matched
LINE 1: select decode(1, 2, c_int8, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_int8, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types bigint and abstime cannot be matched
+ERROR: DECODE types bigint and abstime cannot be matched
LINE 1: select decode(1, 2, c_int8, c_abstime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
-- convert to float4
select decode(1, 2, c_float4, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and boolean cannot be matched
+ERROR: DECODE types real and boolean cannot be matched
LINE 1: select decode(1, 2, c_float4, c_bool) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
@@ -701,58 +701,58 @@ select decode(1, 2, c_float4, c_char2number_success) as result, pg_typeof(result
(1 row)
select decode(1, 2, c_float4, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and raw cannot be matched
+ERROR: DECODE types real and raw cannot be matched
LINE 1: select decode(1, 2, c_float4, c_raw) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and timestamp without time zone cannot be matched
+ERROR: DECODE types real and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_float4, c_date) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and time without time zone cannot be matched
+ERROR: DECODE types real and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_float4, c_time) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and time with time zone cannot be matched
+ERROR: DECODE types real and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_float4, c_timetz) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and timestamp without time zone cannot be matched
+ERROR: DECODE types real and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_float4, c_timestamp) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and timestamp with time zone cannot be matched
+ERROR: DECODE types real and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_float4, c_timestamptz) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and smalldatetime cannot be matched
+ERROR: DECODE types real and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_float4, c_smalldatetime) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and interval cannot be matched
+ERROR: DECODE types real and interval cannot be matched
LINE 1: select decode(1, 2, c_float4, c_interval) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and reltime cannot be matched
+ERROR: DECODE types real and reltime cannot be matched
LINE 1: select decode(1, 2, c_float4, c_reltime) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float4, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types real and abstime cannot be matched
+ERROR: DECODE types real and abstime cannot be matched
LINE 1: select decode(1, 2, c_float4, c_abstime) as result, pg_typeo...
^
CONTEXT: referenced column: result
-- convert to float8
select decode(1, 2, c_float8, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and boolean cannot be matched
+ERROR: DECODE types double precision and boolean cannot be matched
LINE 1: select decode(1, 2, c_float8, c_bool) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
@@ -819,58 +819,58 @@ select decode(1, 2, c_float8, c_char2number_success) as result, pg_typeof(result
(1 row)
select decode(1, 2, c_float8, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and raw cannot be matched
+ERROR: DECODE types double precision and raw cannot be matched
LINE 1: select decode(1, 2, c_float8, c_raw) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and timestamp without time zone cannot be matched
+ERROR: DECODE types double precision and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_float8, c_date) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and time without time zone cannot be matched
+ERROR: DECODE types double precision and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_float8, c_time) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and time with time zone cannot be matched
+ERROR: DECODE types double precision and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_float8, c_timetz) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and timestamp without time zone cannot be matched
+ERROR: DECODE types double precision and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_float8, c_timestamp) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and timestamp with time zone cannot be matched
+ERROR: DECODE types double precision and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_float8, c_timestamptz) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and smalldatetime cannot be matched
+ERROR: DECODE types double precision and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_float8, c_smalldatetime) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and interval cannot be matched
+ERROR: DECODE types double precision and interval cannot be matched
LINE 1: select decode(1, 2, c_float8, c_interval) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and reltime cannot be matched
+ERROR: DECODE types double precision and reltime cannot be matched
LINE 1: select decode(1, 2, c_float8, c_reltime) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_float8, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types double precision and abstime cannot be matched
+ERROR: DECODE types double precision and abstime cannot be matched
LINE 1: select decode(1, 2, c_float8, c_abstime) as result, pg_typeo...
^
CONTEXT: referenced column: result
-- convert to numeric
select decode(1, 2, c_numeric, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and boolean cannot be matched
+ERROR: DECODE types numeric and boolean cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_bool) as result, pg_typeof(...
^
CONTEXT: referenced column: result
@@ -937,58 +937,58 @@ select decode(1, 2, c_numeric, c_char2number_success) as result, pg_typeof(resul
(1 row)
select decode(1, 2, c_numeric, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and raw cannot be matched
+ERROR: DECODE types numeric and raw cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_raw) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and timestamp without time zone cannot be matched
+ERROR: DECODE types numeric and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_date) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and time without time zone cannot be matched
+ERROR: DECODE types numeric and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_time) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and time with time zone cannot be matched
+ERROR: DECODE types numeric and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_timetz) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and timestamp without time zone cannot be matched
+ERROR: DECODE types numeric and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_timestamp) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and timestamp with time zone cannot be matched
+ERROR: DECODE types numeric and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_timestamptz) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and smalldatetime cannot be matched
+ERROR: DECODE types numeric and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_smalldatetime) as result, p...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and interval cannot be matched
+ERROR: DECODE types numeric and interval cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_interval) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and reltime cannot be matched
+ERROR: DECODE types numeric and reltime cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_reltime) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_numeric, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types numeric and abstime cannot be matched
+ERROR: DECODE types numeric and abstime cannot be matched
LINE 1: select decode(1, 2, c_numeric, c_abstime) as result, pg_type...
^
CONTEXT: referenced column: result
-- convert to money
select decode(1, 2, c_money, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types boolean and money cannot be matched
+ERROR: DECODE types boolean and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_bool) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
@@ -1058,58 +1058,58 @@ LINE 1: select decode(1, 2, c_money, c_char2number_success) as resul...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and money cannot be matched
+ERROR: DECODE types raw and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_raw) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and money cannot be matched
+ERROR: DECODE types timestamp without time zone and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_date) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and money cannot be matched
+ERROR: DECODE types time without time zone and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_time) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and money cannot be matched
+ERROR: DECODE types time with time zone and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_timetz) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and money cannot be matched
+ERROR: DECODE types timestamp without time zone and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_timestamp) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and money cannot be matched
+ERROR: DECODE types timestamp with time zone and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_timestamptz) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and money cannot be matched
+ERROR: DECODE types smalldatetime and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_smalldatetime) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and money cannot be matched
+ERROR: DECODE types interval and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_interval) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and money cannot be matched
+ERROR: DECODE types reltime and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_reltime) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_money, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and money cannot be matched
+ERROR: DECODE types abstime and money cannot be matched
LINE 1: select decode(1, 2, c_money, c_abstime) as result, pg_typeof...
^
CONTEXT: referenced column: result
-- convert to char
select decode(1, 2, c_char, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types character and boolean cannot be matched
+ERROR: DECODE types character and boolean cannot be matched
LINE 1: select decode(1, 2, c_char, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -1191,7 +1191,7 @@ select decode(1, 2, c_char, c_char2number_success) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_char, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types character and raw cannot be matched
+ERROR: DECODE types character and raw cannot be matched
LINE 1: select decode(1, 2, c_char, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
@@ -1242,7 +1242,7 @@ LINE 1: select decode(1, 2, c_char, c_abstime) as result, pg_typeof(...
CONTEXT: referenced column: result
-- convert to bpchar
select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types character and boolean cannot be matched
+ERROR: DECODE types character and boolean cannot be matched
LINE 1: select decode(1, 2, c_bpchar, c_bool) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
@@ -1324,7 +1324,7 @@ select decode(1, 2, c_bpchar, c_char2number_success) as result, pg_typeof(result
(1 row)
select decode(1, 2, c_bpchar, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types character and raw cannot be matched
+ERROR: DECODE types character and raw cannot be matched
LINE 1: select decode(1, 2, c_bpchar, c_raw) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
@@ -1375,7 +1375,7 @@ LINE 1: select decode(1, 2, c_bpchar, c_abstime) as result, pg_typeo...
CONTEXT: referenced column: result
-- convert to varchar2
select decode(1, 2, c_varchar2, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types character varying and boolean cannot be matched
+ERROR: DECODE types character varying and boolean cannot be matched
LINE 1: select decode(1, 2, c_varchar2, c_bool) as result, pg_typeof...
^
CONTEXT: referenced column: result
@@ -1513,7 +1513,7 @@ LINE 1: select decode(1, 2, c_varchar2, c_abstime) as result, pg_typ...
CONTEXT: referenced column: result
-- convert to nvarchar2
select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types nvarchar2 and boolean cannot be matched
+ERROR: DECODE types nvarchar2 and boolean cannot be matched
LINE 1: select decode(1, 2, c_nvarchar2, c_bool) as result, pg_typeo...
^
CONTEXT: referenced column: result
@@ -1595,7 +1595,7 @@ select decode(1, 2, c_nvarchar2, c_char2number_success) as result, pg_typeof(res
(1 row)
select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types nvarchar2 and raw cannot be matched
+ERROR: DECODE types nvarchar2 and raw cannot be matched
LINE 1: select decode(1, 2, c_nvarchar2, c_raw) as result, pg_typeof...
^
CONTEXT: referenced column: result
@@ -1649,7 +1649,7 @@ LINE 1: select decode(1, 2, c_nvarchar2, c_abstime) as result, pg_ty...
CONTEXT: referenced column: result
-- convert to text
select decode(1, 2, c_text, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types text and boolean cannot be matched
+ERROR: DECODE types text and boolean cannot be matched
LINE 1: select decode(1, 2, c_text, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
@@ -1792,57 +1792,57 @@ select decode(1, 2, c_text, c_abstime) as result, pg_typeof(result) from tb_test
-- convert to raw
select decode(1, 2, c_raw, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and boolean cannot be matched
+ERROR: DECODE types raw and boolean cannot be matched
LINE 1: select decode(1, 2, c_raw, c_bool) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and tinyint cannot be matched
+ERROR: DECODE types raw and tinyint cannot be matched
LINE 1: select decode(1, 2, c_raw, c_int1) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and smallint cannot be matched
+ERROR: DECODE types raw and smallint cannot be matched
LINE 1: select decode(1, 2, c_raw, c_int2) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and integer cannot be matched
+ERROR: DECODE types raw and integer cannot be matched
LINE 1: select decode(1, 2, c_raw, c_int4) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and bigint cannot be matched
+ERROR: DECODE types raw and bigint cannot be matched
LINE 1: select decode(1, 2, c_raw, c_int8) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and real cannot be matched
+ERROR: DECODE types raw and real cannot be matched
LINE 1: select decode(1, 2, c_raw, c_float4) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and double precision cannot be matched
+ERROR: DECODE types raw and double precision cannot be matched
LINE 1: select decode(1, 2, c_raw, c_float8) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and numeric cannot be matched
+ERROR: DECODE types raw and numeric cannot be matched
LINE 1: select decode(1, 2, c_raw, c_numeric) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and raw cannot be matched
+ERROR: DECODE types money and raw cannot be matched
LINE 1: select decode(1, 2, c_raw, c_money) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and character cannot be matched
+ERROR: DECODE types raw and character cannot be matched
LINE 1: select decode(1, 2, c_raw, c_char) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and character cannot be matched
+ERROR: DECODE types raw and character cannot be matched
LINE 1: select decode(1, 2, c_raw, c_bpchar) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
@@ -1850,7 +1850,7 @@ select decode(1, 2, c_raw, c_varchar2) as result, pg_typeof(result) from tb_test
ERROR: invalid hexadecimal digit: "v"
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and nvarchar2 cannot be matched
+ERROR: DECODE types raw and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_raw, c_nvarchar2) as result, pg_typeof...
^
CONTEXT: referenced column: result
@@ -1865,128 +1865,128 @@ LINE 1: select decode(1, 2, c_raw, c_char2number_success) as result,...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and timestamp without time zone cannot be matched
+ERROR: DECODE types raw and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_raw, c_date) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and time without time zone cannot be matched
+ERROR: DECODE types raw and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_raw, c_time) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and time with time zone cannot be matched
+ERROR: DECODE types raw and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_raw, c_timetz) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and timestamp without time zone cannot be matched
+ERROR: DECODE types raw and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_raw, c_timestamp) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and timestamp with time zone cannot be matched
+ERROR: DECODE types raw and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_raw, c_timestamptz) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and smalldatetime cannot be matched
+ERROR: DECODE types raw and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_raw, c_smalldatetime) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and interval cannot be matched
+ERROR: DECODE types raw and interval cannot be matched
LINE 1: select decode(1, 2, c_raw, c_interval) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and reltime cannot be matched
+ERROR: DECODE types raw and reltime cannot be matched
LINE 1: select decode(1, 2, c_raw, c_reltime) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types raw and abstime cannot be matched
+ERROR: DECODE types raw and abstime cannot be matched
LINE 1: select decode(1, 2, c_raw, c_abstime) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
-- convert to date
select decode(1, 2, c_date, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and boolean cannot be matched
+ERROR: DECODE types timestamp without time zone and boolean cannot be matched
LINE 1: select decode(1, 2, c_date, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and tinyint cannot be matched
+ERROR: DECODE types timestamp without time zone and tinyint cannot be matched
LINE 1: select decode(1, 2, c_date, c_int1) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and smallint cannot be matched
+ERROR: DECODE types timestamp without time zone and smallint cannot be matched
LINE 1: select decode(1, 2, c_date, c_int2) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and integer cannot be matched
+ERROR: DECODE types timestamp without time zone and integer cannot be matched
LINE 1: select decode(1, 2, c_date, c_int4) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and bigint cannot be matched
+ERROR: DECODE types timestamp without time zone and bigint cannot be matched
LINE 1: select decode(1, 2, c_date, c_int8) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and real cannot be matched
+ERROR: DECODE types timestamp without time zone and real cannot be matched
LINE 1: select decode(1, 2, c_date, c_float4) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and double precision cannot be matched
+ERROR: DECODE types timestamp without time zone and double precision cannot be matched
LINE 1: select decode(1, 2, c_date, c_float8) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and numeric cannot be matched
+ERROR: DECODE types timestamp without time zone and numeric cannot be matched
LINE 1: select decode(1, 2, c_date, c_numeric) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and timestamp without time zone cannot be matched
+ERROR: DECODE types money and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_date, c_money) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character cannot be matched
+ERROR: DECODE types timestamp without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_date, c_char) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character cannot be matched
+ERROR: DECODE types timestamp without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_date, c_bpchar) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character varying cannot be matched
+ERROR: DECODE types timestamp without time zone and character varying cannot be matched
LINE 1: select decode(1, 2, c_date, c_varchar2) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and nvarchar2 cannot be matched
+ERROR: DECODE types timestamp without time zone and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_date, c_nvarchar2) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and text cannot be matched
+ERROR: DECODE types timestamp without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_date, c_text) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and text cannot be matched
+ERROR: DECODE types timestamp without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_date, c_char2number_success) as result...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and raw cannot be matched
+ERROR: DECODE types timestamp without time zone and raw cannot be matched
LINE 1: select decode(1, 2, c_date, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
@@ -2019,12 +2019,12 @@ select decode(1, 2, c_date, c_smalldatetime) as result, pg_typeof(result) from t
(1 row)
select decode(1, 2, c_date, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and interval cannot be matched
+ERROR: DECODE types timestamp without time zone and interval cannot be matched
LINE 1: select decode(1, 2, c_date, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_date, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and reltime cannot be matched
+ERROR: DECODE types timestamp without time zone and reltime cannot be matched
LINE 1: select decode(1, 2, c_date, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
@@ -2036,82 +2036,82 @@ select decode(1, 2, c_date, c_abstime) as result, pg_typeof(result) from tb_test
-- convert to time
select decode(1, 2, c_time, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and boolean cannot be matched
+ERROR: DECODE types time without time zone and boolean cannot be matched
LINE 1: select decode(1, 2, c_time, c_bool) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and tinyint cannot be matched
+ERROR: DECODE types time without time zone and tinyint cannot be matched
LINE 1: select decode(1, 2, c_time, c_int1) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and smallint cannot be matched
+ERROR: DECODE types time without time zone and smallint cannot be matched
LINE 1: select decode(1, 2, c_time, c_int2) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and integer cannot be matched
+ERROR: DECODE types time without time zone and integer cannot be matched
LINE 1: select decode(1, 2, c_time, c_int4) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and bigint cannot be matched
+ERROR: DECODE types time without time zone and bigint cannot be matched
LINE 1: select decode(1, 2, c_time, c_int8) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and real cannot be matched
+ERROR: DECODE types time without time zone and real cannot be matched
LINE 1: select decode(1, 2, c_time, c_float4) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and double precision cannot be matched
+ERROR: DECODE types time without time zone and double precision cannot be matched
LINE 1: select decode(1, 2, c_time, c_float8) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and numeric cannot be matched
+ERROR: DECODE types time without time zone and numeric cannot be matched
LINE 1: select decode(1, 2, c_time, c_numeric) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and time without time zone cannot be matched
+ERROR: DECODE types money and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_time, c_money) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and character cannot be matched
+ERROR: DECODE types time without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_time, c_char) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and character cannot be matched
+ERROR: DECODE types time without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_time, c_bpchar) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and character varying cannot be matched
+ERROR: DECODE types time without time zone and character varying cannot be matched
LINE 1: select decode(1, 2, c_time, c_varchar2) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and nvarchar2 cannot be matched
+ERROR: DECODE types time without time zone and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_time, c_nvarchar2) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and text cannot be matched
+ERROR: DECODE types time without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_time, c_text) as result, pg_typeof(res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and text cannot be matched
+ERROR: DECODE types time without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_time, c_char2number_success) as result...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and raw cannot be matched
+ERROR: DECODE types time without time zone and raw cannot be matched
LINE 1: select decode(1, 2, c_time, c_raw) as result, pg_typeof(resu...
^
CONTEXT: referenced column: result
@@ -2142,12 +2142,12 @@ LINE 1: select decode(1, 2, c_time, c_smalldatetime) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and interval cannot be matched
+ERROR: DECODE types time without time zone and interval cannot be matched
LINE 1: select decode(1, 2, c_time, c_interval) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_time, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time without time zone and reltime cannot be matched
+ERROR: DECODE types time without time zone and reltime cannot be matched
LINE 1: select decode(1, 2, c_time, c_reltime) as result, pg_typeof(...
^
CONTEXT: referenced column: result
@@ -2158,82 +2158,82 @@ LINE 1: select decode(1, 2, c_time, c_abstime) as result, pg_typeof(...
CONTEXT: referenced column: result
-- convert to timetz
select decode(1, 2, c_timetz, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and boolean cannot be matched
+ERROR: DECODE types time with time zone and boolean cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_bool) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and tinyint cannot be matched
+ERROR: DECODE types time with time zone and tinyint cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_int1) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and smallint cannot be matched
+ERROR: DECODE types time with time zone and smallint cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_int2) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and integer cannot be matched
+ERROR: DECODE types time with time zone and integer cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_int4) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and bigint cannot be matched
+ERROR: DECODE types time with time zone and bigint cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_int8) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and real cannot be matched
+ERROR: DECODE types time with time zone and real cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_float4) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and double precision cannot be matched
+ERROR: DECODE types time with time zone and double precision cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_float8) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and numeric cannot be matched
+ERROR: DECODE types time with time zone and numeric cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_numeric) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and time with time zone cannot be matched
+ERROR: DECODE types money and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_money) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and character cannot be matched
+ERROR: DECODE types time with time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_char) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and character cannot be matched
+ERROR: DECODE types time with time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_bpchar) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and character varying cannot be matched
+ERROR: DECODE types time with time zone and character varying cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_varchar2) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and nvarchar2 cannot be matched
+ERROR: DECODE types time with time zone and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_nvarchar2) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and text cannot be matched
+ERROR: DECODE types time with time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_text) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and text cannot be matched
+ERROR: DECODE types time with time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_char2number_success) as resu...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and raw cannot be matched
+ERROR: DECODE types time with time zone and raw cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_raw) as result, pg_typeof(re...
^
CONTEXT: referenced column: result
@@ -2264,12 +2264,12 @@ LINE 1: select decode(1, 2, c_timetz, c_smalldatetime) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and interval cannot be matched
+ERROR: DECODE types time with time zone and interval cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_interval) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timetz, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types time with time zone and reltime cannot be matched
+ERROR: DECODE types time with time zone and reltime cannot be matched
LINE 1: select decode(1, 2, c_timetz, c_reltime) as result, pg_typeo...
^
CONTEXT: referenced column: result
@@ -2280,82 +2280,82 @@ LINE 1: select decode(1, 2, c_timetz, c_abstime) as result, pg_typeo...
CONTEXT: referenced column: result
-- convert to timestamp
select decode(1, 2, c_timestamp, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and boolean cannot be matched
+ERROR: DECODE types timestamp without time zone and boolean cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_bool) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and tinyint cannot be matched
+ERROR: DECODE types timestamp without time zone and tinyint cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_int1) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and smallint cannot be matched
+ERROR: DECODE types timestamp without time zone and smallint cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_int2) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and integer cannot be matched
+ERROR: DECODE types timestamp without time zone and integer cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_int4) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and bigint cannot be matched
+ERROR: DECODE types timestamp without time zone and bigint cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_int8) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and real cannot be matched
+ERROR: DECODE types timestamp without time zone and real cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_float4) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and double precision cannot be matched
+ERROR: DECODE types timestamp without time zone and double precision cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_float8) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and numeric cannot be matched
+ERROR: DECODE types timestamp without time zone and numeric cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_numeric) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and timestamp without time zone cannot be matched
+ERROR: DECODE types money and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_money) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character cannot be matched
+ERROR: DECODE types timestamp without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_char) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character cannot be matched
+ERROR: DECODE types timestamp without time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_bpchar) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and character varying cannot be matched
+ERROR: DECODE types timestamp without time zone and character varying cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_varchar2) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and nvarchar2 cannot be matched
+ERROR: DECODE types timestamp without time zone and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_nvarchar2) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and text cannot be matched
+ERROR: DECODE types timestamp without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_text) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and text cannot be matched
+ERROR: DECODE types timestamp without time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_char2number_success) as r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and raw cannot be matched
+ERROR: DECODE types timestamp without time zone and raw cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_raw) as result, pg_typeof...
^
CONTEXT: referenced column: result
@@ -2388,12 +2388,12 @@ select decode(1, 2, c_timestamp, c_smalldatetime) as result, pg_typeof(result) f
(1 row)
select decode(1, 2, c_timestamp, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and interval cannot be matched
+ERROR: DECODE types timestamp without time zone and interval cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_interval) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamp, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp without time zone and reltime cannot be matched
+ERROR: DECODE types timestamp without time zone and reltime cannot be matched
LINE 1: select decode(1, 2, c_timestamp, c_reltime) as result, pg_ty...
^
CONTEXT: referenced column: result
@@ -2405,82 +2405,82 @@ select decode(1, 2, c_timestamp, c_abstime) as result, pg_typeof(result) from tb
-- convert to timestamptz
select decode(1, 2, c_timestamptz, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and boolean cannot be matched
+ERROR: DECODE types timestamp with time zone and boolean cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_bool) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and tinyint cannot be matched
+ERROR: DECODE types timestamp with time zone and tinyint cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_int1) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and smallint cannot be matched
+ERROR: DECODE types timestamp with time zone and smallint cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_int2) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and integer cannot be matched
+ERROR: DECODE types timestamp with time zone and integer cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_int4) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and bigint cannot be matched
+ERROR: DECODE types timestamp with time zone and bigint cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_int8) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and real cannot be matched
+ERROR: DECODE types timestamp with time zone and real cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_float4) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and double precision cannot be matched
+ERROR: DECODE types timestamp with time zone and double precision cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_float8) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and numeric cannot be matched
+ERROR: DECODE types timestamp with time zone and numeric cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_numeric) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and timestamp with time zone cannot be matched
+ERROR: DECODE types money and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_money) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and character cannot be matched
+ERROR: DECODE types timestamp with time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_char) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and character cannot be matched
+ERROR: DECODE types timestamp with time zone and character cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_bpchar) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and character varying cannot be matched
+ERROR: DECODE types timestamp with time zone and character varying cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_varchar2) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and nvarchar2 cannot be matched
+ERROR: DECODE types timestamp with time zone and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_nvarchar2) as result, p...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and text cannot be matched
+ERROR: DECODE types timestamp with time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_text) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and text cannot be matched
+ERROR: DECODE types timestamp with time zone and text cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_char2number_success) as...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and raw cannot be matched
+ERROR: DECODE types timestamp with time zone and raw cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_raw) as result, pg_type...
^
CONTEXT: referenced column: result
@@ -2513,12 +2513,12 @@ select decode(1, 2, c_timestamptz, c_smalldatetime) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_timestamptz, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and interval cannot be matched
+ERROR: DECODE types timestamp with time zone and interval cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_interval) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_timestamptz, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types timestamp with time zone and reltime cannot be matched
+ERROR: DECODE types timestamp with time zone and reltime cannot be matched
LINE 1: select decode(1, 2, c_timestamptz, c_reltime) as result, pg_...
^
CONTEXT: referenced column: result
@@ -2530,82 +2530,82 @@ select decode(1, 2, c_timestamptz, c_abstime) as result, pg_typeof(result) from
-- convert to smalldatetime
select decode(1, 2, c_smalldatetime, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and boolean cannot be matched
+ERROR: DECODE types smalldatetime and boolean cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_bool) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and tinyint cannot be matched
+ERROR: DECODE types smalldatetime and tinyint cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_int1) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and smallint cannot be matched
+ERROR: DECODE types smalldatetime and smallint cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_int2) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and integer cannot be matched
+ERROR: DECODE types smalldatetime and integer cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_int4) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and bigint cannot be matched
+ERROR: DECODE types smalldatetime and bigint cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_int8) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and real cannot be matched
+ERROR: DECODE types smalldatetime and real cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_float4) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and double precision cannot be matched
+ERROR: DECODE types smalldatetime and double precision cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_float8) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and numeric cannot be matched
+ERROR: DECODE types smalldatetime and numeric cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_numeric) as result, p...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and smalldatetime cannot be matched
+ERROR: DECODE types money and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_money) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and character cannot be matched
+ERROR: DECODE types smalldatetime and character cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_char) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and character cannot be matched
+ERROR: DECODE types smalldatetime and character cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_bpchar) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and character varying cannot be matched
+ERROR: DECODE types smalldatetime and character varying cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_varchar2) as result, ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and nvarchar2 cannot be matched
+ERROR: DECODE types smalldatetime and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_nvarchar2) as result,...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and text cannot be matched
+ERROR: DECODE types smalldatetime and text cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_text) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and text cannot be matched
+ERROR: DECODE types smalldatetime and text cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_char2number_success) ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and raw cannot be matched
+ERROR: DECODE types smalldatetime and raw cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_raw) as result, pg_ty...
^
CONTEXT: referenced column: result
@@ -2638,12 +2638,12 @@ select decode(1, 2, c_smalldatetime, c_timestamptz) as result, pg_typeof(result)
(1 row)
select decode(1, 2, c_smalldatetime, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and interval cannot be matched
+ERROR: DECODE types smalldatetime and interval cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_interval) as result, ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_smalldatetime, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types smalldatetime and reltime cannot be matched
+ERROR: DECODE types smalldatetime and reltime cannot be matched
LINE 1: select decode(1, 2, c_smalldatetime, c_reltime) as result, p...
^
CONTEXT: referenced column: result
@@ -2655,112 +2655,112 @@ select decode(1, 2, c_smalldatetime, c_abstime) as result, pg_typeof(result) fro
-- convert to interval
select decode(1, 2, c_interval, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and boolean cannot be matched
+ERROR: DECODE types interval and boolean cannot be matched
LINE 1: select decode(1, 2, c_interval, c_bool) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and tinyint cannot be matched
+ERROR: DECODE types interval and tinyint cannot be matched
LINE 1: select decode(1, 2, c_interval, c_int1) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and smallint cannot be matched
+ERROR: DECODE types interval and smallint cannot be matched
LINE 1: select decode(1, 2, c_interval, c_int2) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and integer cannot be matched
+ERROR: DECODE types interval and integer cannot be matched
LINE 1: select decode(1, 2, c_interval, c_int4) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and bigint cannot be matched
+ERROR: DECODE types interval and bigint cannot be matched
LINE 1: select decode(1, 2, c_interval, c_int8) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and real cannot be matched
+ERROR: DECODE types interval and real cannot be matched
LINE 1: select decode(1, 2, c_interval, c_float4) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and double precision cannot be matched
+ERROR: DECODE types interval and double precision cannot be matched
LINE 1: select decode(1, 2, c_interval, c_float8) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and numeric cannot be matched
+ERROR: DECODE types interval and numeric cannot be matched
LINE 1: select decode(1, 2, c_interval, c_numeric) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and interval cannot be matched
+ERROR: DECODE types money and interval cannot be matched
LINE 1: select decode(1, 2, c_interval, c_money) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and character cannot be matched
+ERROR: DECODE types interval and character cannot be matched
LINE 1: select decode(1, 2, c_interval, c_char) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and character cannot be matched
+ERROR: DECODE types interval and character cannot be matched
LINE 1: select decode(1, 2, c_interval, c_bpchar) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and character varying cannot be matched
+ERROR: DECODE types interval and character varying cannot be matched
LINE 1: select decode(1, 2, c_interval, c_varchar2) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and nvarchar2 cannot be matched
+ERROR: DECODE types interval and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_interval, c_nvarchar2) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and text cannot be matched
+ERROR: DECODE types interval and text cannot be matched
LINE 1: select decode(1, 2, c_interval, c_text) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and text cannot be matched
+ERROR: DECODE types interval and text cannot be matched
LINE 1: select decode(1, 2, c_interval, c_char2number_success) as re...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and raw cannot be matched
+ERROR: DECODE types interval and raw cannot be matched
LINE 1: select decode(1, 2, c_interval, c_raw) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and timestamp without time zone cannot be matched
+ERROR: DECODE types interval and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_interval, c_date) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and time without time zone cannot be matched
+ERROR: DECODE types interval and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_interval, c_time) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and time with time zone cannot be matched
+ERROR: DECODE types interval and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_interval, c_timetz) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and timestamp without time zone cannot be matched
+ERROR: DECODE types interval and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_interval, c_timestamp) as result, pg_t...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and timestamp with time zone cannot be matched
+ERROR: DECODE types interval and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_interval, c_timestamptz) as result, pg...
^
CONTEXT: referenced column: result
select decode(1, 2, c_interval, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and smalldatetime cannot be matched
+ERROR: DECODE types interval and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_interval, c_smalldatetime) as result, ...
^
CONTEXT: referenced column: result
@@ -2771,118 +2771,118 @@ select decode(1, 2, c_interval, c_reltime) as result, pg_typeof(result) from tb_
(1 row)
select decode(1, 2, c_interval, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types interval and abstime cannot be matched
+ERROR: DECODE types interval and abstime cannot be matched
LINE 1: select decode(1, 2, c_interval, c_abstime) as result, pg_typ...
^
CONTEXT: referenced column: result
-- convert to reltime
select decode(1, 2, c_reltime, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and boolean cannot be matched
+ERROR: DECODE types reltime and boolean cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_bool) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and tinyint cannot be matched
+ERROR: DECODE types reltime and tinyint cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_int1) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and smallint cannot be matched
+ERROR: DECODE types reltime and smallint cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_int2) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and integer cannot be matched
+ERROR: DECODE types reltime and integer cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_int4) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and bigint cannot be matched
+ERROR: DECODE types reltime and bigint cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_int8) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and real cannot be matched
+ERROR: DECODE types reltime and real cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_float4) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and double precision cannot be matched
+ERROR: DECODE types reltime and double precision cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_float8) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and numeric cannot be matched
+ERROR: DECODE types reltime and numeric cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_numeric) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and reltime cannot be matched
+ERROR: DECODE types money and reltime cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_money) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and character cannot be matched
+ERROR: DECODE types reltime and character cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_char) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and character cannot be matched
+ERROR: DECODE types reltime and character cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_bpchar) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and character varying cannot be matched
+ERROR: DECODE types reltime and character varying cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_varchar2) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and nvarchar2 cannot be matched
+ERROR: DECODE types reltime and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_nvarchar2) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and text cannot be matched
+ERROR: DECODE types reltime and text cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_text) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and text cannot be matched
+ERROR: DECODE types reltime and text cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_char2number_success) as res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and raw cannot be matched
+ERROR: DECODE types reltime and raw cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_raw) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_date) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and timestamp without time zone cannot be matched
+ERROR: DECODE types reltime and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_date) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_time) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and time without time zone cannot be matched
+ERROR: DECODE types reltime and time without time zone cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_time) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_timetz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and time with time zone cannot be matched
+ERROR: DECODE types reltime and time with time zone cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_timetz) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_timestamp) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and timestamp without time zone cannot be matched
+ERROR: DECODE types reltime and timestamp without time zone cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_timestamp) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_timestamptz) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and timestamp with time zone cannot be matched
+ERROR: DECODE types reltime and timestamp with time zone cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_timestamptz) as result, pg_...
^
CONTEXT: referenced column: result
select decode(1, 2, c_reltime, c_smalldatetime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and smalldatetime cannot be matched
+ERROR: DECODE types reltime and smalldatetime cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_smalldatetime) as result, p...
^
CONTEXT: referenced column: result
@@ -2893,88 +2893,88 @@ select decode(1, 2, c_reltime, c_interval) as result, pg_typeof(result) from tb_
(1 row)
select decode(1, 2, c_reltime, c_abstime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types reltime and abstime cannot be matched
+ERROR: DECODE types reltime and abstime cannot be matched
LINE 1: select decode(1, 2, c_reltime, c_abstime) as result, pg_type...
^
CONTEXT: referenced column: result
-- convert to abstime
select decode(1, 2, c_abstime, c_bool) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and boolean cannot be matched
+ERROR: DECODE types abstime and boolean cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_bool) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_int1) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and tinyint cannot be matched
+ERROR: DECODE types abstime and tinyint cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_int1) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_int2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and smallint cannot be matched
+ERROR: DECODE types abstime and smallint cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_int2) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_int4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and integer cannot be matched
+ERROR: DECODE types abstime and integer cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_int4) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_int8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and bigint cannot be matched
+ERROR: DECODE types abstime and bigint cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_int8) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_float4) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and real cannot be matched
+ERROR: DECODE types abstime and real cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_float4) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_float8) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and double precision cannot be matched
+ERROR: DECODE types abstime and double precision cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_float8) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_numeric) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and numeric cannot be matched
+ERROR: DECODE types abstime and numeric cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_numeric) as result, pg_type...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_money) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types money and abstime cannot be matched
+ERROR: DECODE types money and abstime cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_money) as result, pg_typeof...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_char) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and character cannot be matched
+ERROR: DECODE types abstime and character cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_char) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_bpchar) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and character cannot be matched
+ERROR: DECODE types abstime and character cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_bpchar) as result, pg_typeo...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_varchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and character varying cannot be matched
+ERROR: DECODE types abstime and character varying cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_varchar2) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_nvarchar2) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and nvarchar2 cannot be matched
+ERROR: DECODE types abstime and nvarchar2 cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_nvarchar2) as result, pg_ty...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_text) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and text cannot be matched
+ERROR: DECODE types abstime and text cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_text) as result, pg_typeof(...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_char2number_success) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and text cannot be matched
+ERROR: DECODE types abstime and text cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_char2number_success) as res...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_raw) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and raw cannot be matched
+ERROR: DECODE types abstime and raw cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_raw) as result, pg_typeof(r...
^
CONTEXT: referenced column: result
@@ -3013,12 +3013,12 @@ select decode(1, 2, c_abstime, c_smalldatetime) as result, pg_typeof(result) fro
(1 row)
select decode(1, 2, c_abstime, c_interval) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and interval cannot be matched
+ERROR: DECODE types abstime and interval cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_interval) as result, pg_typ...
^
CONTEXT: referenced column: result
select decode(1, 2, c_abstime, c_reltime) as result, pg_typeof(result) from tb_test;
-ERROR: CASE types abstime and reltime cannot be matched
+ERROR: DECODE types abstime and reltime cannot be matched
LINE 1: select decode(1, 2, c_abstime, c_reltime) as result, pg_type...
^
CONTEXT: referenced column: result
@@ -3719,24 +3719,19 @@ ERROR: operator does not exist: numeric = money
HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
CONTEXT: referenced column: case
select decode(c_char, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
-ERROR: operator does not exist: character = money
-HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+ERROR: failed to find conversion function from money to numeric
CONTEXT: referenced column: case
select decode(c_bpchar, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
-ERROR: operator does not exist: character = money
-HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+ERROR: failed to find conversion function from money to numeric
CONTEXT: referenced column: case
select decode(c_varchar2, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
-ERROR: operator does not exist: character varying = money
-HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+ERROR: failed to find conversion function from money to numeric
CONTEXT: referenced column: case
select decode(c_nvarchar2, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
-ERROR: operator does not exist: nvarchar2 = money
-HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+ERROR: failed to find conversion function from money to numeric
CONTEXT: referenced column: case
select decode(c_text, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
-ERROR: operator does not exist: text = money
-HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts.
+ERROR: failed to find conversion function from money to numeric
CONTEXT: referenced column: case
select decode(c_raw, c_money, 'Conversion successfully!', 'Conversion failed!') from tb_test;
ERROR: operator does not exist: raw = money
@@ -5060,7 +5055,116 @@ select decode(c_interval, c_reltime, 'Conversion successfully!', 'Conversion fai
Conversion successfully!
(1 row)
+----
+-- testcase - fix o compatibility of a_style_coerce
+----
+-- 1. return type
+set sql_beta_feature = 'a_style_coerce';
+select pg_typeof(decode(1, 1, 1, '1'));
+ pg_typeof
+-----------
+ numeric
+(1 row)
+
+select pg_typeof(decode(1, 1, '1', 1));
+ pg_typeof
+-----------
+ text
+(1 row)
+
+select pg_typeof(case 1 when 1 then 1 else '1' end);
+ pg_typeof
+-----------
+ integer
+(1 row)
+
+select pg_typeof(case 1 when 1 then '1' else 1 end);
+ pg_typeof
+-----------
+ integer
+(1 row)
+
set sql_beta_feature = 'none';
+select pg_typeof(decode(1, 1, 1, '1'));
+ pg_typeof
+-----------
+ integer
+(1 row)
+
+select pg_typeof(decode(1, 1, '1', 1));
+ pg_typeof
+-----------
+ integer
+(1 row)
+
+select pg_typeof(case 1 when 1 then 1 else '1' end);
+ pg_typeof
+-----------
+ integer
+(1 row)
+
+select pg_typeof(case 1 when 1 then '1' else 1 end);
+ pg_typeof
+-----------
+ integer
+(1 row)
+
+-- 2. operator match
+set sql_beta_feature = 'a_style_coerce';
+select decode(1, '1.0', 'same', 'different');
+ case
+-----------
+ different
+(1 row)
+
+select decode('1.0', 1, 'same', 'different');
+ case
+------
+ same
+(1 row)
+
+select decode(1, '1.0'::text, 'same', 'different');
+ERROR: invalid input syntax for type bigint: "1.0"
+CONTEXT: referenced column: case
+select decode('1.0'::text, 1, 'same', 'different');
+ case
+------
+ same
+(1 row)
+
+select case 1 when '1.0' then 'same' else 'different' end;
+ERROR: invalid input syntax for integer: "1.0"
+LINE 1: select case 1 when '1.0' then 'same' else 'different' end;
+ ^
+CONTEXT: referenced column: case
+select case '1.0' when 1 then 'same' else 'different' end;
+ERROR: invalid input syntax for type bigint: "1.0"
+CONTEXT: referenced column: case
+set sql_beta_feature = 'none';
+select decode(1, '1.0', 'same', 'different');
+ERROR: invalid input syntax for integer: "1.0"
+LINE 1: select decode(1, '1.0', 'same', 'different');
+ ^
+CONTEXT: referenced column: case
+select decode('1.0', 1, 'same', 'different');
+ERROR: invalid input syntax for integer: "1.0"
+LINE 1: select decode('1.0', 1, 'same', 'different');
+ ^
+CONTEXT: referenced column: case
+select decode(1, '1.0'::text, 'same', 'different');
+ERROR: invalid input syntax for type bigint: "1.0"
+CONTEXT: referenced column: case
+select decode('1.0'::text, 1, 'same', 'different');
+ERROR: invalid input syntax for type bigint: "1.0"
+CONTEXT: referenced column: case
+select case 1 when '1.0' then 'same' else 'different' end;
+ERROR: invalid input syntax for integer: "1.0"
+LINE 1: select case 1 when '1.0' then 'same' else 'different' end;
+ ^
+CONTEXT: referenced column: case
+select case '1.0' when 1 then 'same' else 'different' end;
+ERROR: invalid input syntax for type bigint: "1.0"
+CONTEXT: referenced column: case
\c regression
clean connection to all force for database decode_compatibility;
drop database decode_compatibility;
diff --git a/src/test/regress/expected/event.out b/src/test/regress/expected/event.out
new file mode 100644
index 000000000..4e566b99d
--- /dev/null
+++ b/src/test/regress/expected/event.out
@@ -0,0 +1,870 @@
+drop database if exists event_b;
+NOTICE: database "event_b" does not exist, skipping
+create database event_b with dbcompatibility 'b';
+\c event_b
+create user event_a sysadmin password 'event_123';
+create user event_b sysadmin password 'event_123';
+--CREATE EVENT
+--Schedule Parameter Test
+--CHECK Schedule AT .. situation
+create event IF NOT EXISTS ee11 on schedule at '2022-12-09 17:24:11' disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at sysdate disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_DATE disable do insert into t values(0);
+ERROR: syntax error at or near "CURRENT_DATE"
+LINE 1: create event IF NOT EXISTS ee11 on schedule at CURRENT_DATE ...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME disable do insert into t values(0);
+ERROR: syntax error at or near "CURRENT_TIME"
+LINE 1: create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME ...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME (1) disable do insert into t values(0);
+ERROR: syntax error at or near "CURRENT_TIME"
+LINE 1: create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME ...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIME disable do insert into t values(0);
+ERROR: syntax error at or near "LOCALTIME"
+LINE 1: create event IF NOT EXISTS ee11 on schedule at LOCALTIME dis...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at LOCALTIME (1) disable do insert into t values(0);
+ERROR: syntax error at or near "LOCALTIME"
+LINE 1: create event IF NOT EXISTS ee11 on schedule at LOCALTIME (1)...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP + interval 1 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at SYSDATE + interval 10 second disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at SYSDATE + interval 0.5 second disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 1 hour disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP + interval '00:00' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 1 year + interval '00:00' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 666666666666666666666666666667 year + interval '00:00' minute to second disable do insert into t values(0);
+ERROR: interval field value out of range: "666666666666666666666666666667 day"
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists ee11;
+NOTICE: event "ee11" is not exists, skipping
+create event IF NOT EXISTS ee11 on schedule at sysdate + interval 1234567890 second + interval 1234567890 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at sysdate + interval 1.5 second + interval 1.33 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ ee11 | public
+(1 row)
+
+drop event if exists ee11;
+--CHECK Schedule EVERY ..situation
+create event IF NOT EXISTS evtest on schedule every 1 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ evtest | public
+(1 row)
+
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every '00:30' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ evtest | public
+(1 row)
+
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ evtest | public
+(1 row)
+
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute ends sysdate + interval 1 hour disable do insert into t values(0);
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ evtest | public
+(1 row)
+
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate + interval 1 day ends now() + interval 1 year disable do insert into t values(0);
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ evtest | public
+(1 row)
+
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate + interval 1 day + interval '00:99' minute to second disable do insert into t values(0);
+ERROR: interval field value out of range: "00:99"
+LINE 1: ...minute starts sysdate + interval 1 day + interval '00:99' mi...
+ ^
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+(0 rows)
+
+drop event if exists evtest;
+NOTICE: event "evtest" is not exists, skipping
+--if not exists
+create event e on schedule every 1 month disable do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+create event e on schedule at sysdate disable do select 1;
+ERROR: event "e" already exists
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+create event if not exists e on schedule at sysdate disable do select 1;
+NOTICE: event "e" already exists, skipping
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+--auto_drop
+drop event e;
+ERROR: event "e" is not exists
+create event e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+create event e on schedule at sysdate on completion not preserve disable do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+create event e on schedule at sysdate on completion preserve disable do select 1;
+drop event e;
+--job_status
+create event e on schedule every 1 hour do select 1;
+drop event e;
+create event e on schedule every 1 hour enable do select 1;
+drop event e;
+create event e on schedule every 1 hour disable do select 1;
+drop event e;
+create event e on schedule every 1 hour disable on slave do select 1;
+drop event e;
+--comment
+create event e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+create event e on schedule at sysdate disable comment '======' do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+create event e on schedule at sysdate disable comment 'fsdfjksadfhkjsfafkjsdfhjkahfdsknvxhguiyeurfbsdbccguyaHUFAWEFKSJBFCNJNDAgudagsHJBHDSBHJFBSAHBkjbhjbhjBHJBUbhbhBYGUIOInkb' do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+create event e on schedule at now() disable
+comment '=================================================================================================
+==========================================================================================================
+==========================================================================================================
+==========================================================================================================
+==========================================================================================================
+'
+do select 1;
+select pg_sleep(0.2);
+ pg_sleep
+----------
+
+(1 row)
+
+drop event e;
+--sql body
+--abort
+CREATE TABLE customer_demographics_t1
+(
+ CD_DEMO_SK INTEGER NOT NULL,
+ CD_GENDER CHAR(1) ,
+ CD_MARITAL_STATUS CHAR(1) ,
+ CD_EDUCATION_STATUS CHAR(20) ,
+ CD_PURCHASE_ESTIMATE INTEGER ,
+ CD_CREDIT_RATING CHAR(10) ,
+ CD_DEP_COUNT INTEGER ,
+ CD_DEP_EMPLOYED_COUNT INTEGER ,
+ CD_DEP_COLLEGE_COUNT INTEGER
+)
+WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE)
+;
+INSERT INTO customer_demographics_t1 VALUES(1920801,'M', 'U', 'DOCTOR DEGREE', 200, 'GOOD', 1, 0,0);
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+ cd_demo_sk | cd_gender | cd_marital_status | cd_education_status | cd_purchase_estimate | cd_credit_rating | cd_dep_count | cd_dep_employed_count | cd_dep_college_count
+------------+-----------+-------------------+----------------------+----------------------+------------------+--------------+-----------------------+----------------------
+ 1920801 | M | U | DOCTOR DEGREE | 200 | GOOD | 1 | 0 | 0
+(1 row)
+
+START TRANSACTION;
+UPDATE customer_demographics_t1 SET cd_education_status= 'Unknown';
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+ cd_demo_sk | cd_gender | cd_marital_status | cd_education_status | cd_purchase_estimate | cd_credit_rating | cd_dep_count | cd_dep_employed_count | cd_dep_college_count
+------------+-----------+-------------------+----------------------+----------------------+------------------+--------------+-----------------------+----------------------
+ 1920801 | M | U | Unknown | 200 | GOOD | 1 | 0 | 0
+(1 row)
+
+create event e on schedule at sysdate do ABORT;
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+ cd_demo_sk | cd_gender | cd_marital_status | cd_education_status | cd_purchase_estimate | cd_credit_rating | cd_dep_count | cd_dep_employed_count | cd_dep_college_count
+------------+-----------+-------------------+----------------------+----------------------+------------------+--------------+-----------------------+----------------------
+ 1920801 | M | U | Unknown | 200 | GOOD | 1 | 0 | 0
+(1 row)
+
+DROP TABLE customer_demographics_t1;
+ABORT;
+--CALL
+CREATE FUNCTION func_add_sql(num1 integer, num2 integer) RETURN integer
+AS
+BEGIN
+RETURN num1 + num2;
+END;
+/
+create event e on schedule at sysdate disable do CALL func_add_sql(1, 3);
+DROP FUNCTION func_add_sql;
+drop event e;
+--ALTER EVENT
+--alter schedule
+\c event_b
+drop event e;
+ERROR: event "e" is not exists
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+alter definer=event_a event e on schedule at '2023-01-16 21:05:40' + interval 1 year;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Tue Jan 16 21:05:40 2024 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+alter definer=event_a event e on schedule every 1 year;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+--------------------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Tue Jan 16 21:05:40 2024 | interval '1' year | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+alter definer=event_a event e on schedule every 0.5 minute starts '2023-01-16 21:05:40' + interval '00:50' minute to second;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+------------------------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:06:30 2023 | interval '0.5' minute | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+alter definer=event_a event e on schedule at '2023-01-16 21:05:40' + interval 500 second;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:14:00 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+drop event e;
+--alter auto_drop
+drop event e;
+ERROR: event "e" is not exists
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+ job_name | attribute_name | attribute_value
+----------+----------------+-----------------
+ e | auto_drop | true
+(1 row)
+
+drop event e;
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' on completion preserve disable do select 1;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+ job_name | attribute_name | attribute_value
+----------+----------------+-----------------
+ e | auto_drop | false
+(1 row)
+
+alter definer=event_a event e on completion not preserve;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+ job_name | attribute_name | attribute_value
+----------+----------------+-----------------
+ e | auto_drop | true
+(1 row)
+
+alter definer=event_a event e on completion preserve;
+show events where job_name='e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+ job_name | attribute_name | attribute_value
+----------+----------------+-----------------
+ e | auto_drop | false
+(1 row)
+
+drop event e;
+--alter event_name
+drop event e;
+ERROR: event "e" is not exists
+create event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ e | public
+(1 row)
+
+alter event e rename to e_new;
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ e_new | public
+(1 row)
+
+select what,job_name from pg_job_proc where job_name='e_new';
+ what | job_name
+------------+----------
+ select 1; | e_new
+(1 row)
+
+alter event e_new rename to e;
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ e | public
+(1 row)
+
+select what,job_name from pg_job_proc where job_name='e';
+ what | job_name
+------------+----------
+ select 1; | e
+(1 row)
+
+drop event e;
+--alter status
+drop table if exists a;
+NOTICE: table "a" does not exist, skipping
+create table a(a int);
+create event e on schedule at '2023-01-16 21:05:40' disable do insert into a values(0);
+select * from a;
+ a
+---
+(0 rows)
+
+alter event e on schedule every 1 year enable do insert into a values(0);
+select * from a;
+ a
+---
+(0 rows)
+
+truncate table a;
+alter event e disable;
+select * from a;
+ a
+---
+(0 rows)
+
+drop event e;
+create event e on schedule every 1 minute starts '3000-01-16 21:05:40' do select 1;
+select enable from pg_job where job_name='e';
+ enable
+--------
+ t
+(1 row)
+
+alter event e disable;
+select enable from pg_job where job_name='e';
+ enable
+--------
+ f
+(1 row)
+
+alter event e enable;
+select enable from pg_job where job_name='e';
+ enable
+--------
+ t
+(1 row)
+
+drop event e;
+--Alter event combination test.
+drop event e;
+ERROR: event "e" is not exists
+create event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+alter definer=event_b event e on schedule every 1 year ends '2023-01-16 21:05:40' + interval 1 year;
+alter event e disable;
+alter event e do select 2;
+alter event e rename to ee comment 'test ee' do select sysdate;
+alter event ee comment '========test=========';
+alter event ee on schedule at '2023-01-16 21:05:40' + interval 1 year on completion preserve rename to test_e;
+drop event if exists test_e;
+--Test owner
+create user evtest_owner password 'event_123';
+create event e on schedule at sysdate disable do select 1;
+alter definer=evtest_owner event e;
+select log_user, priv_user from pg_job where job_name='e';
+ log_user | priv_user
+--------------+--------------
+ evtest_owner | evtest_owner
+(1 row)
+
+alter event e rename to ee;
+alter definer=evtest_owner event ee rename to e;
+select log_user, priv_user from pg_job where job_name='e';
+ log_user | priv_user
+--------------+--------------
+ evtest_owner | evtest_owner
+(1 row)
+
+create definer=evtest_owner event e_a on schedule at sysdate disable do select 1;
+select log_user, priv_user from pg_job where job_name='e_a';
+ log_user | priv_user
+--------------+--------------
+ evtest_owner | evtest_owner
+(1 row)
+
+alter event e_a rename to ea;
+alter definer=evtest_owner event ea rename to e_a;
+select log_user, priv_user from pg_job where job_name='e_a';
+ log_user | priv_user
+--------------+--------------
+ evtest_owner | evtest_owner
+(1 row)
+
+select log_user, priv_user from pg_job where dbname='event_b';
+ log_user | priv_user
+--------------+--------------
+ evtest_owner | evtest_owner
+ evtest_owner | evtest_owner
+(2 rows)
+
+drop user evtest_owner;
+select log_user, priv_user from pg_job where dbname='event_b';
+ log_user | priv_user
+----------+-----------
+(0 rows)
+
+select * from gs_job_attribute where job_name='e' or job_name='e_a';
+ job_name | attribute_name | attribute_value
+----------+----------------+-----------------
+(0 rows)
+
+--SHOW EVENTS
+drop event if exists e1;
+NOTICE: event "e1" is not exists, skipping
+create definer=event_a event e1 on schedule at '2023-01-16 21:05:40' disable do select 1;
+select job_name, nspname from pg_job where dbname='event_b';
+ job_name | nspname
+----------+---------
+ e1 | public
+(1 row)
+
+show events in a;
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+------------+----------+----------+--------+-------------
+(0 rows)
+
+show events from a;
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+------------+----------+----------+--------+-------------
+(0 rows)
+
+show events like 'e';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+------------+----------+----------+--------+-------------
+(0 rows)
+
+show events like 'e%';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e1 | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+show events like 'e_';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e1 | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+show events where job_name='e1';
+ job_name | schema_name | log_user | priv_user | job_status | start_date | interval | end_date | enable | failure_msg
+----------+-------------+----------+-----------+------------+--------------------------+----------+--------------------------+--------+-------------
+ e1 | public | event_a | event_a | s | Mon Jan 16 21:05:40 2023 | null | Sat Jan 01 08:00:00 4000 | f |
+(1 row)
+
+drop event if exists e1;
+--test sql help
+\h CREATE EVENT
+No help available for "CREATE EVENT".
+Try \h with no arguments to see available help.
+\h ALTER EVENT
+No help available for "ALTER EVENT".
+Try \h with no arguments to see available help.
+\h DROP EVENT
+No help available for "DROP EVENT".
+Try \h with no arguments to see available help.
+\h SHOW EVENTS
+No help available for "SHOW EVENTS".
+Try \h with no arguments to see available help.
+drop table if exists event_a.a;
+NOTICE: table "a" does not exist, skipping
+drop table if exists event_b.t;
+NOTICE: table "t" does not exist, skipping
+drop schema if exists event_a;
+drop schema if exists event_b;
+drop user if exists event_a;
+drop user if exists event_b;
+\c regression
+drop database if exists event_b;
diff --git a/src/test/regress/expected/forall_save_exceptions.out b/src/test/regress/expected/forall_save_exceptions.out
index 158964077..1be8f8238 100644
--- a/src/test/regress/expected/forall_save_exceptions.out
+++ b/src/test/regress/expected/forall_save_exceptions.out
@@ -1045,11 +1045,10 @@ select decode(2,'ff3',5,2); -- now ok
(1 row)
select case when 2 = 'ff3' then 5 else 2 end; -- now ok
- case
-------
- 2
-(1 row)
-
+ERROR: invalid input syntax for integer: "ff3"
+LINE 1: select case when 2 = 'ff3' then 5 else 2 end;
+ ^
+CONTEXT: referenced column: case
-- still valid
select
decode(col_char, 'arbitrary', 1, 2),
@@ -1167,11 +1166,10 @@ select decode(2,3,'r',2); -- now ok
(1 row)
select case when 2 = 3 then 'r' else 2 end; -- now ok
- case
-------
- 2
-(1 row)
-
+ERROR: invalid input syntax for integer: "r"
+LINE 1: select case when 2 = 3 then 'r' else 2 end;
+ ^
+CONTEXT: referenced column: case
-- still valid
select
decode(1, 2, 'never', col_char),
@@ -1199,7 +1197,7 @@ from test_decode_coercion;
select
decode(1, 2, 'never', col_bool)
from test_decode_coercion;
-ERROR: CASE types text and boolean cannot be matched
+ERROR: DECODE types text and boolean cannot be matched
LINE 2: decode(1, 2, 'never', col_bool)
^
CONTEXT: referenced column: col_bool
diff --git a/src/test/regress/expected/function_get_table_def.out b/src/test/regress/expected/function_get_table_def.out
index 8a5636c50..f2731284d 100644
--- a/src/test/regress/expected/function_get_table_def.out
+++ b/src/test/regress/expected/function_get_table_def.out
@@ -505,33 +505,33 @@ select * from pg_get_tabledef('list_hash_2');
CREATE INDEX list_hash_2_idx3 ON list_hash_2 USING btree (col_4) TABLESPACE pg_default; +
CREATE INDEX list_hash_2_idx2 ON list_hash_2 USING btree (col_3, col_1) LOCAL( +
PARTITION partition_name( +
- SUBPARTITION p_hash_1_3_col_3_col_1_idx, +
+ SUBPARTITION p_hash_1_1_col_3_col_1_idx, +
SUBPARTITION p_hash_1_2_col_3_col_1_idx, +
- SUBPARTITION p_hash_1_1_col_3_col_1_idx +
+ SUBPARTITION p_hash_1_3_col_3_col_1_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_2_subpartdefault1_col_3_col_1_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_3_2_col_3_col_1_idx, +
- SUBPARTITION p_hash_3_1_col_3_col_1_idx +
+ SUBPARTITION p_hash_3_1_col_3_col_1_idx, +
+ SUBPARTITION p_hash_3_2_col_3_col_1_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_4_5_col_3_col_1_idx, +
- SUBPARTITION p_hash_4_4_col_3_col_1_idx, +
- SUBPARTITION p_hash_4_3_col_3_col_1_idx, +
+ SUBPARTITION p_hash_4_1_col_3_col_1_idx, +
SUBPARTITION p_hash_4_2_col_3_col_1_idx, +
- SUBPARTITION p_hash_4_1_col_3_col_1_idx +
+ SUBPARTITION p_hash_4_3_col_3_col_1_idx, +
+ SUBPARTITION p_hash_4_4_col_3_col_1_idx, +
+ SUBPARTITION p_hash_4_5_col_3_col_1_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_5_subpartdefault1_col_3_col_1_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_6_5_col_3_col_1_idx, +
- SUBPARTITION p_hash_6_4_col_3_col_1_idx, +
- SUBPARTITION p_hash_6_3_col_3_col_1_idx, +
+ SUBPARTITION p_hash_6_1_col_3_col_1_idx, +
SUBPARTITION p_hash_6_2_col_3_col_1_idx, +
- SUBPARTITION p_hash_6_1_col_3_col_1_idx +
+ SUBPARTITION p_hash_6_3_col_3_col_1_idx, +
+ SUBPARTITION p_hash_6_4_col_3_col_1_idx, +
+ SUBPARTITION p_hash_6_5_col_3_col_1_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_7_subpartdefault1_col_3_col_1_idx +
@@ -539,33 +539,33 @@ select * from pg_get_tabledef('list_hash_2');
) TABLESPACE pg_default; +
CREATE UNIQUE INDEX list_hash_2_idx1 ON list_hash_2 USING btree (col_2, col_3, col_4) LOCAL(+
PARTITION partition_name( +
- SUBPARTITION p_hash_1_3_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_1_1_col_2_col_3_col_4_idx, +
SUBPARTITION p_hash_1_2_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_1_1_col_2_col_3_col_4_idx +
+ SUBPARTITION p_hash_1_3_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_2_subpartdefault1_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_3_2_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_3_1_col_2_col_3_col_4_idx +
+ SUBPARTITION p_hash_3_1_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_3_2_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_4_5_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_4_4_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_4_3_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_4_1_col_2_col_3_col_4_idx, +
SUBPARTITION p_hash_4_2_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_4_1_col_2_col_3_col_4_idx +
+ SUBPARTITION p_hash_4_3_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_4_4_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_4_5_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_5_subpartdefault1_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
- SUBPARTITION p_hash_6_5_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_6_4_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_6_3_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_6_1_col_2_col_3_col_4_idx, +
SUBPARTITION p_hash_6_2_col_2_col_3_col_4_idx, +
- SUBPARTITION p_hash_6_1_col_2_col_3_col_4_idx +
+ SUBPARTITION p_hash_6_3_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_6_4_col_2_col_3_col_4_idx, +
+ SUBPARTITION p_hash_6_5_col_2_col_3_col_4_idx +
), +
PARTITION partition_name( +
SUBPARTITION p_list_7_subpartdefault1_col_2_col_3_col_4_idx +
diff --git a/src/test/regress/expected/hw_partition_b_db.out b/src/test/regress/expected/hw_partition_b_db.out
new file mode 100644
index 000000000..cdfc2769a
--- /dev/null
+++ b/src/test/regress/expected/hw_partition_b_db.out
@@ -0,0 +1,3329 @@
+CREATE SCHEMA partition_a_db_schema;
+SET CURRENT_SCHEMA TO partition_a_db_schema;
+-- -----------------------------------test partitions clause with A compatibility
+-- range with partitions clause
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 3;
+ERROR: syntax error at or near ";"
+LINE 2: PARTITION BY RANGE (a,b) PARTITIONS 3;
+ ^
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY RANGE (a,b) PARTITIONS 3
+ ^
+DETAIL: range partition with partitions clause is supported only in B compatibility
+-- list with partitions clause
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 3;
+ERROR: syntax error at or near ";"
+LINE 2: PARTITION BY LIST (a,b) PARTITIONS 3;
+ ^
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES ((0,0)),
+ PARTITION p2 VALUES ((1,1), (1,2)),
+ PARTITION p3 VALUES ((2,1), (2,2), (2,3))
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY LIST (a,b) PARTITIONS 3
+ ^
+DETAIL: list partition with partitions clause is supported only in B compatibility
+-- hash with partitions clause
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 3;
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 0;
+ERROR: Invalid number of partitions
+LINE 2: PARTITION BY HASH(a) PARTITIONS 0;
+ ^
+DETAIL: partitions number must be a positive integer
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS -1;
+ERROR: syntax error at or near "-"
+LINE 2: PARTITION BY HASH(a) PARTITIONS -1;
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 2.5;
+ERROR: syntax error at or near "2.5"
+LINE 2: PARTITION BY HASH(a) PARTITIONS 2.5;
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS '5';
+ERROR: syntax error at or near "'5'"
+LINE 2: PARTITION BY HASH(a) PARTITIONS '5';
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 1048576;
+ERROR: Invalid number of partitions
+DETAIL: partitions number '1048576' cannot be greater than 1048575
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 3
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 4
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '4'
+-- range-range with subpartitions clause
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+ ^
+DETAIL: range partition with partitions clause is supported only in B compatibility
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY RANGE(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+ERROR: syntax error at or near "SUBPARTITIONS"
+LINE 2: PARTITION BY RANGE (a) SUBPARTITION BY RANGE(c) SUBPARTITION...
+ ^
+-- range-list with subpartitions clause
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+ ^
+DETAIL: range partition with partitions clause is supported only in B compatibility
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY LIST(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+ERROR: syntax error at or near "SUBPARTITIONS"
+LINE 2: PARTITION BY RANGE (a) SUBPARTITION BY LIST(c) SUBPARTITIONS...
+ ^
+-- range-hash with subpartitions clause
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: Invalid number of partitions
+LINE 2: ...RTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0
+ ^
+DETAIL: subpartitions number must be a positive integer
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0.2E+1
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "0.2E+1"
+LINE 2: ...ON BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0.2E+1
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2.5
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "2.5"
+LINE 2: ...ITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2.5
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS '5'
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "'5'"
+LINE 2: ...ITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS '5'
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 1048576
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: Invalid number of partitions
+DETAIL: subpartitions number '1048576' cannot be greater than 1048575
+-- list-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY RANGE(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+ )
+);
+ERROR: syntax error at or near "SUBPARTITIONS"
+LINE 2: PARTITION BY LIST (a) SUBPARTITION BY RANGE(c) SUBPARTITIONS...
+ ^
+-- list-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY LIST(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+ERROR: syntax error at or near "SUBPARTITIONS"
+LINE 2: PARTITION BY LIST (a) SUBPARTITION BY LIST(c) SUBPARTITIONS ...
+ ^
+-- list-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100),
+ PARTITION p2 VALUES (200)
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+ERROR: syntax error at or near ";"
+LINE 2: ...ST (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+ ^
+-- hash-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- hash-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- hash-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1,
+ PARTITION p2
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+SELECT pg_get_tabledef('T_HASH_HASH_SUBPARTITIONS_CLAUSE');
+ pg_get_tabledef
+---------------------------------------------------
+ SET search_path = partition_a_db_schema; +
+ CREATE TABLE t_hash_hash_subpartitions_clause ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) SUBPARTITION BY HASH (c) +
+ ( +
+ PARTITION p0 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p0sp0 TABLESPACE pg_default,+
+ SUBPARTITION p0sp1 TABLESPACE pg_default +
+ ), +
+ PARTITION p1 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p1sp0 TABLESPACE pg_default,+
+ SUBPARTITION p1sp1 TABLESPACE pg_default +
+ ) +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c);
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c);
+SELECT pg_get_tabledef('T_HASH_HASH_SUBPARTITIONS_CLAUSE');
+ pg_get_tabledef
+---------------------------------------------------------------
+ SET search_path = partition_a_db_schema; +
+ CREATE TABLE t_hash_hash_subpartitions_clause ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) SUBPARTITION BY HASH (c) +
+ ( +
+ PARTITION p0 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p0_subpartdefault1 TABLESPACE pg_default+
+ ) +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_PARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2;
+SELECT pg_get_tabledef('T_HASH_PARTITIONS_CLAUSE');
+ pg_get_tabledef
+------------------------------------------
+ SET search_path = partition_a_db_schema;+
+ CREATE TABLE t_hash_partitions_clause ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) +
+ ( +
+ PARTITION p0 TABLESPACE pg_default, +
+ PARTITION p1 TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE T_HASH_PARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_PARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a);
+SELECT pg_get_tabledef('T_HASH_PARTITIONS_CLAUSE');
+ pg_get_tabledef
+------------------------------------------
+ SET search_path = partition_a_db_schema;+
+ CREATE TABLE t_hash_partitions_clause ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) +
+ ( +
+ PARTITION p0 TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE T_HASH_PARTITIONS_CLAUSE;
+-- -----------------------------------test A compatibility syntax error
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE(a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200),
+ PARTITION p3 VALUES LESS THAN (300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+ERROR: Un-support feature in current compatibility
+LINE 7: PARTITION p4 VALUES LESS THAN MAXVALUE
+ ^
+DETAIL: MAXVALUE without parentheses is supported only in B compatibility
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY RANGE COLUMNS(a,b)
+ ^
+DETAIL: COLUMNS is supported only in B compatibility
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 5
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY RANGE (a,b) PARTITIONS 5
+ ^
+DETAIL: range partition with partitions clause is supported only in B compatibility
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES IN ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY LIST COLUMNS(a,b)
+ ^
+DETAIL: COLUMNS is supported only in B compatibility
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES IN ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+ERROR: Un-support syntax in current compatibility
+LINE 4: PARTITION p1 VALUES IN ( (0,0), (NULL,NULL) ),
+ ^
+DETAIL: VALUES IN is supported only in B compatibility
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 5
+(
+ PARTITION p1 VALUES ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY LIST (a,b) PARTITIONS 5
+ ^
+DETAIL: list partition with partitions clause is supported only in B compatibility
+CREATE TABLE t_part_by_key (a int, b int, c int)
+PARTITION BY KEY(a)
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+ERROR: Un-support syntax in current compatibility
+LINE 2: PARTITION BY KEY(a)
+ ^
+DETAIL: PARTITION BY KEY is supported only in B compatibility
+CREATE TABLE t_multi_keys_list_tbspc (a int, b varchar(4), c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,NULL) ) TABLESPACE = pg_default,
+ PARTITION p2 VALUES ( (0,'1'), (0,'2'), (0,'3'), (1,'1'), (1,'2') )
+);
+DROP TABLE t_multi_keys_list_tbspc;
+-- -----------------------------------test multi list keys with A compatibility
+CREATE TABLE t_multi_keys_list_default (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( DEFAULT ),
+ PARTITION p2 VALUES ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES ( (3,2), (NULL,NULL) ),
+ PARTITION pd VALUES ( DEFAULT )
+);
+ERROR: Partition table has multiple default partitions
+CREATE TABLE t_multi_keys_list_default (a int, b varchar(4), c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,NULL) ),
+ PARTITION p2 VALUES ( (0,'1'), (0,'2'), (0,'3'), (1,'1'), (1,'2') ),
+ PARTITION p3 VALUES ( (NULL,'0'), (2,'1') ),
+ PARTITION p4 VALUES ( (3,'2'), (NULL,NULL) ),
+ PARTITION pd VALUES ( DEFAULT )
+);
+CREATE INDEX t_multi_keys_list_default_idx_l ON t_multi_keys_list_default(a,b,c) LOCAL;
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_a_db_schema; +
+ CREATE TABLE t_multi_keys_list_default ( +
+ a integer, +
+ b character varying(4), +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,'1'),(0,'2'),(0,'3'),(1,'1'),(1,'2')) TABLESPACE pg_default, +
+ PARTITION p1 VALUES ((0,NULL)) TABLESPACE pg_default, +
+ PARTITION p4 VALUES ((3,'2'),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default, +
+ PARTITION p3 VALUES ((NULL,'0'),(2,'1')) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX t_multi_keys_list_default_idx_l ON t_multi_keys_list_default USING btree (a, b, c) LOCAL(PARTITION p3_a_b_c_idx, PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx, PARTITION pd_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+INSERT INTO t_multi_keys_list_default VALUES(0,NULL,0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p1) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 0 | | 0
+(1 row)
+
+INSERT INTO t_multi_keys_list_default VALUES(0,'1',0);
+INSERT INTO t_multi_keys_list_default VALUES(0,'2',0);
+INSERT INTO t_multi_keys_list_default VALUES(0,'3',0);
+INSERT INTO t_multi_keys_list_default VALUES(1,'1',0);
+INSERT INTO t_multi_keys_list_default VALUES(1,'2',0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p2) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+(5 rows)
+
+INSERT INTO t_multi_keys_list_default VALUES(NULL,0,0);
+INSERT INTO t_multi_keys_list_default VALUES(2,'1',0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ | 0 | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list_default VALUES(3,'2',0);
+INSERT INTO t_multi_keys_list_default VALUES(NULL,NULL,0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p4) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 3 | 2 | 0
+ | | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list_default VALUES(4,'4',4);
+SELECT * FROM t_multi_keys_list_default PARTITION(pd) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 4 | 4 | 4
+(1 row)
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a IS NULL;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a IS NULL)
+ Selected Partitions: 1,4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a IS NULL)
+ Selected Partitions: 1,4..5
+(8 rows)
+
+SELECT a FROM t_multi_keys_list_default WHERE a IS NULL;
+ a
+---
+
+
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a = 0;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a = 0)
+ Selected Partitions: 2..3,5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a = 0)
+ Selected Partitions: 2..3,5
+(8 rows)
+
+SELECT a FROM t_multi_keys_list_default WHERE a = 0;
+ a
+---
+ 0
+ 0
+ 0
+ 0
+(4 rows)
+
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_default WHERE b IS NULL;
+ QUERY PLAN
+---------------------------------------------------------
+ Partition Iterator
+ Iterations: 3
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (b IS NULL)
+ Selected Partitions: 2,4..5
+(5 rows)
+
+SELECT b FROM t_multi_keys_list_default WHERE b IS NULL;
+ b
+---
+
+
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_default WHERE b = '1';
+ QUERY PLAN
+---------------------------------------------------------
+ Partition Iterator
+ Iterations: 3
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((b)::text = '1'::text)
+ Selected Partitions: 1,3,5
+(5 rows)
+
+SELECT b FROM t_multi_keys_list_default WHERE b = '1';
+ b
+---
+ 1
+ 1
+ 1
+(3 rows)
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a = 4;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a = 4)
+ Selected Partitions: 5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a = 4)
+ Selected Partitions: 5
+(6 rows)
+
+SELECT a FROM t_multi_keys_list_default WHERE a = 4;
+ a
+---
+ 4
+(1 row)
+
+EXPLAIN (costs false)
+SELECT a,b FROM t_multi_keys_list_default WHERE a < 1 ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (a < 1)
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT a,b FROM t_multi_keys_list_default WHERE a < 1 ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+(4 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (0,'1');
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a = 0) AND (b = '1'::text))
+ Selected Partitions: 3
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (0,'1');
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (NULL,'0');
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (NULL,'0');
+ a | b | c
+---+---+---
+(0 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b = '0';
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a IS NULL) AND (b = '0'::text))
+ Selected Partitions: 1
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b = '0';
+ a | b | c
+---+---+---
+ | 0 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL AND a = 0) AND b = '0' ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 0
+ -> Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a IS NULL) AND (a = 0) AND (b = '0'::text))
+ Selected Partitions: NONE
+(5 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL AND a = 0) AND b = '0' ORDER BY 1,2;
+ a | b | c
+---+---+---
+(0 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a = 3) AND b = '2' ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (((a IS NULL) AND ((b)::text = '2'::text)) OR ((a = 3) AND ((b)::text = '2'::text)))
+ Selected Partitions: 4..5
+ -> BitmapOr
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: ((a IS NULL) AND ((b)::text = '2'::text))
+ Selected Partitions: 4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: ((a = 3) AND ((b)::text = '2'::text))
+ Selected Partitions: 4..5
+(14 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a = 3) AND b = '2' ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 3 | 2 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a IS NOT NULL) AND b = '0' ORDER BY 1,2;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (((a IS NULL) OR (a IS NOT NULL)) AND ((b)::text = '0'::text))
+ Selected Partitions: 1,5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a IS NOT NULL) AND b = '0' ORDER BY 1,2;
+ a | b | c
+---+---+---
+ | 0 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE b IS NOT NULL AND a = 0 ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: b
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: ((a = 0) AND (b IS NOT NULL))
+ Selected Partitions: 3,5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: ((a = 0) AND (b IS NOT NULL))
+ Selected Partitions: 3,5
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE b IS NOT NULL AND a = 0 ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+(3 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL AND b = '1') AND a = 2 ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a = 2) AND (b IS NOT NULL) AND (b = '1'::text))
+ Selected Partitions: 1
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL AND b = '1') AND a = 2 ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL OR b = '0') AND a = 0 ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: b
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a = 0)
+ Filter: ((b IS NOT NULL) OR ((b)::text = '0'::text))
+ Selected Partitions: 3,5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a = 0)
+ Selected Partitions: 3,5
+(11 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL OR b = '0') AND a = 0 ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+(3 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NULL ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a IS NULL) AND (b IS NULL))
+ Selected Partitions: 4
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ | | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: ((a IS NULL) AND (b IS NOT NULL))
+ Selected Partitions: 1,4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: ((a IS NULL) AND (b IS NOT NULL))
+ Selected Partitions: 1,4..5
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ | 0 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NOT NULL) AND (b IS NULL))
+ Selected Partitions: 2,4..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NOT NULL) AND (b IS NOT NULL))
+ Selected Partitions: 1,3..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+(8 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NULL) OR (b IS NULL))
+ Selected Partitions: 1..2,4..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | | 0
+ | 0 | 0
+ | | 0
+(3 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NULL) OR (b IS NOT NULL))
+ Selected Partitions: 1,3..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+ | 0 | 0
+ | | 0
+(10 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NOT NULL) OR (b IS NULL))
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+ | | 0
+(10 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NOT NULL) OR (b IS NOT NULL))
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+ | 0 | 0
+(10 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NULL ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a IS NULL)
+ Selected Partitions: 1,4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a IS NULL)
+ Selected Partitions: 1,4..5
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ | 0 | 0
+ | | 0
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NULL ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a IS NULL) AND (a IS NULL))
+ Selected Partitions: 1,4..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ | 0 | 0
+ | | 0
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR a IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (a IS NOT NULL)
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR a IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+(9 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND a IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NOT NULL) AND (a IS NOT NULL))
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND a IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+(9 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a IS NULL) OR (a IS NOT NULL))
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+ 2 | 1 | 0
+ 3 | 2 | 0
+ 4 | 4 | 4
+ | 0 | 0
+ | | 0
+(11 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NOT NULL ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: ((a IS NULL) AND (a IS NOT NULL))
+ Selected Partitions: 1,4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: ((a IS NULL) AND (a IS NOT NULL))
+ Selected Partitions: 1,4..5
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NOT NULL ORDER BY 1,2;
+ a | b | c
+---+---+---
+(0 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a = 0 OR b = '0' ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((a = 0) OR ((b)::text = '0'::text))
+ Selected Partitions: 1..3,5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a = 0 OR b = '0' ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ | 0 | 0
+(5 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) IN ((NULL,'0'), (3,'2')) ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a = 3) AND (b = '2'::text))
+ Selected Partitions: 4
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) IN ((NULL,'0'), (3,'2')) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 3 | 2 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) =ANY(ARRAY[(2,'1'::varchar), (3,'2'::varchar)]) ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 5
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (ROW(a, b) = ANY (ARRAY[ROW(2, '1'::character varying), ROW(3, '2'::character varying)]))
+ Selected Partitions: 1..5
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) =ANY(ARRAY[(2,'1'::varchar), (3,'2'::varchar)]) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 3 | 2 | 0
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_default
+ Recheck Cond: (a = ANY ('{2,3}'::integer[]))
+ Selected Partitions: 1,4..5
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_default_idx_l
+ Index Cond: (a = ANY ('{2,3}'::integer[]))
+ Selected Partitions: 1,4..5
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_default WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 3 | 2 | 0
+(2 rows)
+
+PREPARE part_bdb_stmt(varchar) as SELECT a,b FROM t_multi_keys_list_default WHERE b = $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt('3');
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((b)::text = ($1)::text)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt('1');
+ a | b
+---+---
+ 0 | 1
+ 1 | 1
+ 2 | 1
+(3 rows)
+
+EXECUTE part_bdb_stmt('2');
+ a | b
+---+---
+ 0 | 2
+ 1 | 2
+ 3 | 2
+(3 rows)
+
+PREPARE part_bdb_stmt1(int) as SELECT a,b FROM t_multi_keys_list_default WHERE a != $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt1(3);
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (a <> $1)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt1(1);
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+ 2 | 1
+ 3 | 2
+ 4 | 4
+(7 rows)
+
+EXECUTE part_bdb_stmt1(2);
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+ 1 | 1
+ 1 | 2
+ 3 | 2
+ 4 | 4
+(8 rows)
+
+PREPARE part_bdb_stmt2(int) as SELECT a,b FROM t_multi_keys_list_default WHERE a >= $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt2(3);
+ QUERY PLAN
+---------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: (a >= $1)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt2(1);
+ a | b
+---+---
+ 1 | 1
+ 1 | 2
+ 2 | 1
+ 3 | 2
+ 4 | 4
+(5 rows)
+
+EXECUTE part_bdb_stmt2(2);
+ a | b
+---+---
+ 2 | 1
+ 3 | 2
+ 4 | 4
+(3 rows)
+
+PREPARE part_bdb_stmt3(int, varchar) as SELECT a,b FROM t_multi_keys_list_default WHERE (a,b) = ($1,$2);
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt3(0,'1');
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: PART
+ -> Partitioned Index Only Scan using t_multi_keys_list_default_idx_l on t_multi_keys_list_default
+ Index Cond: ((a = $1) AND (b = ($2)::text))
+ Selected Partitions: PART
+(5 rows)
+
+EXECUTE part_bdb_stmt3(0,'1');
+ a | b
+---+---
+ 0 | 1
+(1 row)
+
+EXECUTE part_bdb_stmt3(3,'2');
+ a | b
+---+---
+ 3 | 2
+(1 row)
+
+UPDATE t_multi_keys_list_default SET a=2, b='1' where a=1 and b='1';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+ | 0 | 0
+(3 rows)
+
+UPDATE t_multi_keys_list_default SET a=NULL, b='0' where a=1 and b='2';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+ | 0 | 0
+ | 0 | 0
+(4 rows)
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION(p3) where b = '0';
+ QUERY PLAN
+---------------------------------------------------------
+ Delete on t_multi_keys_list_default
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Filter: ((b)::text = '0'::text)
+ Selected Partitions: 1
+(4 rows)
+
+DELETE t_multi_keys_list_default PARTITION(p3) where b = '0';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION FOR(0,NULL);
+ QUERY PLAN
+---------------------------------------------------------
+ Delete on t_multi_keys_list_default
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Selected Partitions: 2
+(3 rows)
+
+DELETE t_multi_keys_list_default PARTITION FOR(0,NULL);
+SELECT * FROM t_multi_keys_list_default PARTITION(p1) ORDER BY a,b;
+ a | b | c
+---+---+---
+(0 rows)
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION FOR(0,'3');
+ QUERY PLAN
+---------------------------------------------------------
+ Delete on t_multi_keys_list_default
+ -> Partitioned Seq Scan on t_multi_keys_list_default
+ Selected Partitions: 3
+(3 rows)
+
+DELETE t_multi_keys_list_default PARTITION FOR(0,'3');
+SELECT * FROM t_multi_keys_list_default PARTITION(p2) ORDER BY a,b;
+ a | b | c
+---+---+---
+(0 rows)
+
+-- alter table partition
+CREATE INDEX test_multi_list_key_gi on t_multi_keys_list_default(c);
+CREATE TABLESPACE part_adb_temp_tbspc RELATIVE LOCATION 'tablespace/part_adb_temp_tbspc';
+ALTER TABLE t_multi_keys_list_default MOVE PARTITION FOR(0,NULL) TABLESPACE part_adb_temp_tbspc;
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_a_db_schema; +
+ CREATE TABLE t_multi_keys_list_default ( +
+ a integer, +
+ b character varying(4), +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,'1'),(0,'2'),(0,'3'),(1,'1'),(1,'2')) TABLESPACE pg_default, +
+ PARTITION p1 VALUES ((0,NULL)) TABLESPACE part_adb_temp_tbspc, +
+ PARTITION p4 VALUES ((3,'2'),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default, +
+ PARTITION p3 VALUES ((NULL,'0'),(2,'1')) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX test_multi_list_key_gi ON t_multi_keys_list_default USING btree (c) TABLESPACE pg_default; +
+ CREATE INDEX t_multi_keys_list_default_idx_l ON t_multi_keys_list_default USING btree (a, b, c) LOCAL(PARTITION p3_a_b_c_idx, PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx, PARTITION pd_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+CREATE TABLE t_alter_partition_temp (a int, b varchar(4), c int);
+INSERT INTO t_alter_partition_temp VALUES(NULL,'0',1);
+INSERT INTO t_alter_partition_temp VALUES(2,'1',2);
+CREATE INDEX t_alter_partition_temp_idx_l ON t_alter_partition_temp(a,b,c);
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 2
+ | 0 | 1
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+ALTER TABLE t_multi_keys_list_default EXCHANGE PARTITION (p3) WITH TABLE t_alter_partition_temp UPDATE GLOBAL INDEX;
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 2
+ | 0 | 1
+(2 rows)
+
+DROP TABLE IF EXISTS t_alter_partition_temp;
+ALTER TABLE t_multi_keys_list_default ADD PARTITION p5 VALUES ((2,1));
+ERROR: list boundary of adding partition MUST NOT overlap with existing partition
+ALTER TABLE t_multi_keys_list_default DROP PARTITION FOR (1,'5');
+ALTER TABLE t_multi_keys_list_default DROP PARTITION FOR (2,'1') UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (1,'5') ORDER BY a,b;
+ERROR: Cannot find partition by the value
+DETAIL: N/A.
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (2,'1') ORDER BY a,b;
+ERROR: Cannot find partition by the value
+DETAIL: N/A.
+ALTER TABLE t_multi_keys_list_default TRUNCATE PARTITION FOR (NULL,NULL) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (NULL,NULL) ORDER BY a,b;
+ a | b | c
+---+---+---
+(0 rows)
+
+ALTER TABLE t_multi_keys_list_default RENAME PARTITION FOR (0,NULL) TO p0;
+ALTER TABLE t_multi_keys_list_default ADD PARTITION pd VALUES (DEFAULT);
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_a_db_schema; +
+ CREATE TABLE t_multi_keys_list_default ( +
+ a integer, +
+ b character varying(4), +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,'1'),(0,'2'),(0,'3'),(1,'1'),(1,'2')) TABLESPACE pg_default, +
+ PARTITION p0 VALUES ((0,NULL)) TABLESPACE part_adb_temp_tbspc, +
+ PARTITION p4 VALUES ((3,'2'),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX t_multi_keys_list_default_idx_l ON t_multi_keys_list_default USING btree (a, b, c) LOCAL(PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx, PARTITION pd_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+-- test views
+SELECT table_name,partitioning_type,partition_count,partitioning_key_count,subpartitioning_type FROM MY_PART_TABLES WHERE table_name = 't_multi_keys_list_default' ORDER BY 1;
+ERROR: relation "my_part_tables" does not exist on datanode1
+LINE 1: ...,partitioning_key_count,subpartitioning_type FROM MY_PART_TA...
+ ^
+SELECT table_name,partition_name,high_value,subpartition_count FROM MY_TAB_PARTITIONS WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2;
+ERROR: relation "my_tab_partitions" does not exist on datanode1
+LINE 1: ...partition_name,high_value,subpartition_count FROM MY_TAB_PAR...
+ ^
+SELECT table_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_TAB_SUBPARTITIONS WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2,3;
+ERROR: relation "my_tab_subpartitions" does not exist on datanode1
+LINE 1: ...bpartition_name,high_value,high_value_length FROM MY_TAB_SUB...
+ ^
+SELECT table_name,index_name,partition_count,partitioning_key_count,partitioning_type,subpartitioning_type FROM MY_PART_INDEXES WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2;
+ERROR: relation "my_part_indexes" does not exist on datanode1
+LINE 1: ...count,partitioning_type,subpartitioning_type FROM MY_PART_IN...
+ ^
+SELECT index_name,partition_name,high_value,high_value_length FROM MY_IND_PARTITIONS WHERE index_name = 't_multi_keys_list_default_idx_l' ORDER BY 1,2;
+ERROR: relation "my_ind_partitions" does not exist on datanode1
+LINE 1: ...,partition_name,high_value,high_value_length FROM MY_IND_PAR...
+ ^
+SELECT index_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_IND_SUBPARTITIONS WHERE index_name = 't_multi_keys_list_default_idx_l' ORDER BY 1,2,3;
+ERROR: relation "my_ind_subpartitions" does not exist on datanode1
+LINE 1: ...bpartition_name,high_value,high_value_length FROM MY_IND_SUB...
+ ^
+-- test partition key value and datatype not matched
+CREATE TABLE t_single_key_list_value (a int, b int, c int)
+PARTITION BY LIST (a)
+(
+ PARTITION p1 VALUES ( 0 ),
+ PARTITION p2 VALUES ( 1 ),
+ PARTITION p3 VALUES ( 2, date '12-10-2010' )
+); -- ERROR
+ERROR: list partition value in "p3" does not match datatype of partition key "a"
+CREATE TABLE t_multi_keys_list_value (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,0) ),
+ PARTITION p2 VALUES ( (0,1) ),
+ PARTITION p3 VALUES ( (2,1), (NULL,date '12-10-2010') )
+); -- ERROR
+ERROR: list partition value in "p3" does not match datatype of partition key "b"
+DROP TABLE IF EXISTS t_multi_keys_list_default;
+DROP TABLESPACE part_adb_temp_tbspc;
+DROP SCHEMA partition_a_db_schema CASCADE;
+-- -----------------------------------test with B compatibility
+create database part_bdb WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c part_bdb
+CREATE SCHEMA partition_b_db_schema;
+SET CURRENT_SCHEMA TO partition_b_db_schema;
+-- -----------------------------------test partitions clause with B compatibility
+-- range with partitions clause
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 3;
+ERROR: syntax error at or near ";"
+LINE 2: PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 3;
+ ^
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+DROP TABLE t_range_partitions_clause;
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '2'
+-- list with partitions clause
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 3;
+ERROR: syntax error at or near ";"
+LINE 2: PARTITION BY LIST COLUMNS(a,b) PARTITIONS 3;
+ ^
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES IN ((0,0)),
+ PARTITION p2 VALUES IN ((1,1), (1,2)),
+ PARTITION p3 VALUES IN ((2,1), (2,2), (2,3))
+);
+DROP TABLE t_list_partitions_clause;
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 1
+(
+ PARTITION p1 VALUES IN ((0,0)),
+ PARTITION p2 VALUES IN ((1,1), (1,2)),
+ PARTITION p3 VALUES IN ((2,1), (2,2), (2,3))
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '1'
+-- key with partitions clause
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 3;
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 0;
+ERROR: Invalid number of partitions
+LINE 2: PARTITION BY KEY(a) PARTITIONS 0;
+ ^
+DETAIL: partitions number must be a positive integer
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS -1;
+ERROR: syntax error at or near "-"
+LINE 2: PARTITION BY KEY(a) PARTITIONS -1;
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 2.5;
+ERROR: syntax error at or near "2.5"
+LINE 2: PARTITION BY KEY(a) PARTITIONS 2.5;
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS '5';
+ERROR: syntax error at or near "'5'"
+LINE 2: PARTITION BY KEY(a) PARTITIONS '5';
+ ^
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 1048576;
+ERROR: Invalid number of partitions
+DETAIL: partitions number '1048576' cannot be greater than 1048575
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 3
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 4
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '4'
+-- range-range with subpartitions clause
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_range_range_subpartitions_clause;
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 3 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '3'
+-- range-list with subpartitions clause
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_range_list_subpartitions_clause;
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 1 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '1'
+-- range-key with subpartitions clause
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY key(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: Invalid number of partitions
+LINE 2: ...N BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0
+ ^
+DETAIL: subpartitions number must be a positive integer
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0.2E+1
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "0.2E+1"
+LINE 2: ...RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0.2E+1
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2.5
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "2.5"
+LINE 2: ...BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2.5
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS '5'
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: syntax error at or near "'5'"
+LINE 2: ...BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS '5'
+ ^
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 1048576
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: Invalid number of partitions
+DETAIL: subpartitions number '1048576' cannot be greater than 1048575
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2,
+ SUBPARTITION p1sub3
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '2'
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2,
+ SUBPARTITION p1sub3
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p2sub3,
+ SUBPARTITION p2sub4
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: The number of defined subpartitions in partition "p2" does not match the subpartitions number: 3
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p2sub3
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p1sp0
+ )
+);
+ERROR: duplicate subpartition name: "p1sp0"
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 11
+(
+ PARTITION p11111111111111111111111111111111111111111111111111111111111 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+ERROR: identifier too long
+DETAIL: The subpartition name "p11111111111111111111111111111111111111111111111111111111111sp10" is too long
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 11
+(
+ PARTITION p1111111111111111111111111111111111111111111111111111111111 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+SELECT pg_get_tabledef('t_range_hash_subpartitions_clause'::regclass);
+ pg_get_tabledef
+------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_range_hash_subpartitions_clause ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY RANGE (a) SUBPARTITION BY HASH (c) +
+ ( +
+ PARTITION p1111111111111111111111111111111111111111111111111111111111 VALUES LESS THAN (100) TABLESPACE pg_default+
+ ( +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp0 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp1 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp2 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp3 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp4 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp5 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp6 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp7 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp8 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp9 TABLESPACE pg_default, +
+ SUBPARTITION p1111111111111111111111111111111111111111111111111111111111sp10 TABLESPACE pg_default +
+ ), +
+ PARTITION p2 VALUES LESS THAN (200) TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p2sp0 TABLESPACE pg_default, +
+ SUBPARTITION p2sp1 TABLESPACE pg_default, +
+ SUBPARTITION p2sp2 TABLESPACE pg_default, +
+ SUBPARTITION p2sp3 TABLESPACE pg_default, +
+ SUBPARTITION p2sp4 TABLESPACE pg_default, +
+ SUBPARTITION p2sp5 TABLESPACE pg_default, +
+ SUBPARTITION p2sp6 TABLESPACE pg_default, +
+ SUBPARTITION p2sp7 TABLESPACE pg_default, +
+ SUBPARTITION p2sp8 TABLESPACE pg_default, +
+ SUBPARTITION p2sp9 TABLESPACE pg_default, +
+ SUBPARTITION p2sp10 TABLESPACE pg_default +
+ ) +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE t_range_hash_subpartitions_clause;
+-- list-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- list-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- list-key with subpartitions clause
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY KEY(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100),
+ PARTITION p2 VALUES IN (200)
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2;
+ERROR: syntax error at or near ";"
+LINE 2: ... BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2;
+ ^
+-- key-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- key-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- key-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 1 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: the number of defined partitions does not match the partitions number '1'
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 3
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+ERROR: Invalid number of partitions
+DETAIL: The number of defined subpartitions in partition "p1" does not match the subpartitions number: 3
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1,
+ PARTITION p2
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+-- test the key of partition and subpartition is same column
+CREATE TABLE t_single_key_range_subpart(id int, birthdate int)
+ PARTITION BY RANGE (birthdate)
+ SUBPARTITION BY HASH (birthdate)
+ SUBPARTITIONS 2 (
+ PARTITION p0 VALUES LESS THAN (1990),
+ PARTITION p1 VALUES LESS THAN (2000),
+ PARTITION p2 VALUES LESS THAN MAXVALUE
+);
+DROP TABLE IF EXISTS t_single_key_range_subpart;
+-- --------------------------------------------------------test range columns syntax with B compatibility
+CREATE TABLE t_single_key_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a)
+(
+ PARTITION p1 VALUES LESS THAN (100) TABLESPACE = pg_default,
+ PARTITION p2 VALUES LESS THAN (200),
+ PARTITION p3 VALUES LESS THAN (300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+SELECT pg_get_tabledef('t_single_key_range'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_single_key_range ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY RANGE (a) +
+ ( +
+ PARTITION p1 VALUES LESS THAN (100) TABLESPACE pg_default, +
+ PARTITION p2 VALUES LESS THAN (200) TABLESPACE pg_default, +
+ PARTITION p3 VALUES LESS THAN (300) TABLESPACE pg_default, +
+ PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE IF EXISTS t_single_key_range;
+-- error
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+ERROR: partition bound list contains too few elements
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+\d+ t_multi_keys_range
+ Table "partition_b_db_schema.t_multi_keys_range"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | | plain | |
+ c | integer | | plain | |
+Partition By RANGE(a, b)
+Number of partitions: 5 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+SELECT pg_get_tabledef('t_multi_keys_range'::regclass);
+ pg_get_tabledef
+------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_multi_keys_range ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY RANGE (a, b) +
+ ( +
+ PARTITION p1 VALUES LESS THAN (100, 100) TABLESPACE pg_default, +
+ PARTITION p2 VALUES LESS THAN (200, 200) TABLESPACE pg_default, +
+ PARTITION p3 VALUES LESS THAN (300, 300) TABLESPACE pg_default, +
+ PARTITION p4 VALUES LESS THAN (400, MAXVALUE) TABLESPACE pg_default, +
+ PARTITION p5 VALUES LESS THAN (MAXVALUE, MAXVALUE) TABLESPACE pg_default+
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE IF EXISTS t_multi_keys_range;
+-- --------------------------------------------------------test number of columns
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+ERROR: too many partition keys for partitioned table
+HINT: Partittion key columns can not be more than 16
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100)
+);
+ERROR: partition bound list contains too few elements
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+DROP TABLE IF EXISTS t_multi_keys_range_num;
+-- --------------------------------------------------------test key partition with B compatibility
+CREATE TABLE t_part_by_key (a int, b int, c int)
+PARTITION BY KEY(a)
+(
+ PARTITION p1 TABLESPACE = pg_default,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+\d+ t_part_by_key
+ Table "partition_b_db_schema.t_part_by_key"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+---------+-----------+---------+--------------+-------------
+ a | integer | | plain | |
+ b | integer | | plain | |
+ c | integer | | plain | |
+Partition By HASH(a)
+Number of partitions: 5 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+SELECT pg_get_tabledef('t_part_by_key'::regclass);
+ pg_get_tabledef
+------------------------------------------
+ SET search_path = partition_b_db_schema;+
+ CREATE TABLE t_part_by_key ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) +
+ ( +
+ PARTITION p1 TABLESPACE pg_default, +
+ PARTITION p2 TABLESPACE pg_default, +
+ PARTITION p3 TABLESPACE pg_default, +
+ PARTITION p4 TABLESPACE pg_default, +
+ PARTITION p5 TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+DROP TABLE IF EXISTS t_part_by_key;
+CREATE TABLE t_part_by_key_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY KEY(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+ERROR: Un-support feature
+DETAIL: The partition key's length should be 1.
+-- --------------------------------------------------------test list partition with B compatibility
+-- errors
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES IN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES IN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES IN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES IN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+ERROR: Invalid partition values
+DETAIL: list partition values in "p1" does not match the number of partition keys
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN (100,100,100,100),
+ PARTITION p2 VALUES IN (200,100,100,100),
+ PARTITION p3 VALUES IN (300,100,100,100),
+ PARTITION p4 VALUES IN (400,100,100,100)
+);
+ERROR: Invalid partition values
+DETAIL: list partition values in "p1" does not match the number of partition keys
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100))
+);
+ERROR: Invalid partition values
+DETAIL: list partition values in "p3" does not match the number of partition keys
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,NULL,100),(100,100,NULL,100)),
+ PARTITION p2 VALUES IN ((200,200,100,100),(100,100,100,200)),
+ PARTITION p3 VALUES IN ((300,300,100,100),(100,100,100,300)),
+ PARTITION p4 VALUES IN ((400,400,100,100),(100,100,100,400))
+);
+ERROR: list partition p1 has overlapped value
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,NULL,100),(100,100,100,400)),
+ PARTITION p2 VALUES IN ((200,200,100,100),(100,100,100,300)),
+ PARTITION p3 VALUES IN ((300,300,100,100),(100,100,100,200)),
+ PARTITION p4 VALUES IN ((400,400,100,100),(100,100,NULL,100))
+);
+ERROR: list partition p1 and p4 has overlapped value
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,MAXVALUE) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+ERROR: syntax error at or near "MAXVALUE"
+LINE 4: PARTITION p1 VALUES IN ( (0,MAXVALUE) ),
+ ^
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,DEFAULT) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+ERROR: syntax error at or near "DEFAULT"
+LINE 4: PARTITION p1 VALUES IN ( (0,DEFAULT) ),
+ ^
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN (MAXVALUE),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+ERROR: syntax error at or near "MAXVALUE"
+LINE 4: PARTITION p1 VALUES IN (MAXVALUE),
+ ^
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (NULL, NULL)),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,A), (1,1), (1,2) )
+);
+ERROR: column "a" does not exist
+LINE 5: PARTITION p2 VALUES IN ( (0,1), (0,2), (0,A), (1,1), (1,...
+ ^
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100))
+);
+ERROR: too many partition keys for partitioned table
+HINT: Partittion key columns can not be more than 16
+-- normal
+CREATE TABLE t_multi_keys_list (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100))
+);
+DROP TABLE IF EXISTS t_multi_keys_list;
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0) ) TABLESPACE = pg_default,
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (2,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (3,3) )
+);
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_multi_keys_list ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p1 VALUES ((0,0)) TABLESPACE pg_default, +
+ PARTITION p2 VALUES ((0,1),(0,2),(0,3),(1,1),(1,2)) TABLESPACE pg_default,+
+ PARTITION p3 VALUES ((2,0),(2,1)) TABLESPACE pg_default, +
+ PARTITION p4 VALUES ((3,2),(3,3)) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+INSERT INTO t_multi_keys_list VALUES(0,0,0);
+SELECT * FROM t_multi_keys_list PARTITION(p1) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 0 | 0
+(1 row)
+
+INSERT INTO t_multi_keys_list VALUES(0,1,0);
+INSERT INTO t_multi_keys_list VALUES(0,2,0);
+INSERT INTO t_multi_keys_list VALUES(0,3,0);
+INSERT INTO t_multi_keys_list VALUES(1,1,0);
+INSERT INTO t_multi_keys_list VALUES(1,2,0);
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+(5 rows)
+
+INSERT INTO t_multi_keys_list VALUES(2,0,0);
+INSERT INTO t_multi_keys_list VALUES(2,1,0);
+SELECT * FROM t_multi_keys_list PARTITION(p3) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 0 | 0
+ 2 | 1 | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list VALUES(3,2,0);
+INSERT INTO t_multi_keys_list VALUES(3,3,0);
+SELECT * FROM t_multi_keys_list PARTITION(p4) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 3 | 2 | 0
+ 3 | 3 | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list VALUES(4,4,4);
+ERROR: inserted partition key does not map to any table partition
+DROP TABLE IF EXISTS t_multi_keys_list;
+-- test with null keys
+CREATE TABLE t_multi_keys_list_null (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (NULL,NULL) )
+);
+CREATE INDEX t_multi_keys_list_null_idx_l ON t_multi_keys_list_null(a,b,c) LOCAL;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_multi_keys_list_null ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,1),(0,2),(0,3),(1,1),(1,2)) TABLESPACE pg_default, +
+ PARTITION p1 VALUES ((0,NULL)) TABLESPACE pg_default, +
+ PARTITION p4 VALUES ((3,2),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION p3 VALUES ((NULL,0),(2,1)) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX t_multi_keys_list_null_idx_l ON t_multi_keys_list_null USING btree (a, b, c) LOCAL(PARTITION p3_a_b_c_idx, PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+INSERT INTO t_multi_keys_list_null VALUES(0,NULL,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p1) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 0 | | 0
+(1 row)
+
+INSERT INTO t_multi_keys_list_null VALUES(0,1,0);
+INSERT INTO t_multi_keys_list_null VALUES(0,2,0);
+INSERT INTO t_multi_keys_list_null VALUES(0,3,0);
+INSERT INTO t_multi_keys_list_null VALUES(1,1,0);
+INSERT INTO t_multi_keys_list_null VALUES(1,2,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p2) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 1 | 1 | 0
+ 1 | 2 | 0
+(5 rows)
+
+INSERT INTO t_multi_keys_list_null VALUES(NULL,0,0);
+INSERT INTO t_multi_keys_list_null VALUES(2,1,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ | 0 | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list_null VALUES(3,2,0);
+INSERT INTO t_multi_keys_list_null VALUES(NULL,NULL,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p4) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 3 | 2 | 0
+ | | 0
+(2 rows)
+
+INSERT INTO t_multi_keys_list_null VALUES(4,4,4);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a IS NULL;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (a IS NULL)
+ Selected Partitions: 1,4
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: (a IS NULL)
+ Selected Partitions: 1,4
+(8 rows)
+
+SELECT a FROM t_multi_keys_list_null WHERE a IS NULL;
+ a
+---
+
+
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a = 0;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (a = 0)
+ Selected Partitions: 2..3
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: (a = 0)
+ Selected Partitions: 2..3
+(8 rows)
+
+SELECT a FROM t_multi_keys_list_null WHERE a = 0;
+ a
+---
+ 0
+ 0
+ 0
+ 0
+(4 rows)
+
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_null WHERE b IS NULL;
+ QUERY PLAN
+------------------------------------------------------
+ Partition Iterator
+ Iterations: 2
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (b IS NULL)
+ Selected Partitions: 2,4
+(5 rows)
+
+SELECT b FROM t_multi_keys_list_null WHERE b IS NULL;
+ b
+---
+
+
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_null WHERE b = 1;
+ QUERY PLAN
+------------------------------------------------------
+ Partition Iterator
+ Iterations: 2
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (b = 1)
+ Selected Partitions: 1,3
+(5 rows)
+
+SELECT b FROM t_multi_keys_list_null WHERE b = 1;
+ b
+---
+ 1
+ 1
+ 1
+(3 rows)
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a = 4;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: 0
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (a = 4)
+ Selected Partitions: NONE
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: (a = 4)
+ Selected Partitions: NONE
+(8 rows)
+
+SELECT a FROM t_multi_keys_list_null WHERE a = 4;
+ a
+---
+(0 rows)
+
+EXPLAIN (costs false)
+SELECT a,b FROM t_multi_keys_list_null WHERE a < 1 ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (a < 1)
+ Selected Partitions: 1..4
+(7 rows)
+
+SELECT a,b FROM t_multi_keys_list_null WHERE a < 1 ORDER BY 1,2;
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+(4 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (0,1);
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_null_idx_l on t_multi_keys_list_null
+ Index Cond: ((a = 0) AND (b = 1))
+ Selected Partitions: 3
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (0,1);
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (NULL,0);
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (NULL,0);
+ a | b | c
+---+---+---
+(0 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a IS NULL AND b = 0;
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Partitioned Index Only Scan using t_multi_keys_list_null_idx_l on t_multi_keys_list_null
+ Index Cond: ((a IS NULL) AND (b = 0))
+ Selected Partitions: 1
+(3 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE a IS NULL AND b = 0;
+ a | b | c
+---+---+---
+ | 0 | 0
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a = 0 OR b = 0 ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: ((a = 0) OR (b = 0))
+ Selected Partitions: 1..3
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE a = 0 OR b = 0 ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ | 0 | 0
+(5 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a IN (2,0) ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 3
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (a = ANY ('{2,0}'::integer[]))
+ Selected Partitions: 1..3
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: (a = ANY ('{2,0}'::integer[]))
+ Selected Partitions: 1..3
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE a IN (2,0) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 0 | 1 | 0
+ 0 | 2 | 0
+ 0 | 3 | 0
+ 0 | | 0
+ 2 | 1 | 0
+(5 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) IN ((1,1), (3,2)) ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (((a = 1) AND (b = 1)) OR ((a = 3) AND (b = 2)))
+ Selected Partitions: 3..4
+ -> BitmapOr
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: ((a = 1) AND (b = 1))
+ Selected Partitions: 3..4
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: ((a = 3) AND (b = 2))
+ Selected Partitions: 3..4
+(14 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) IN ((1,1), (3,2)) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 1 | 1 | 0
+ 3 | 2 | 0
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) =ANY(ARRAY[(2,1), (3,2)]) ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 4
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (ROW(a, b) = ANY (ARRAY[ROW(2, 1), ROW(3, 2)]))
+ Selected Partitions: 1..4
+(7 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) =ANY(ARRAY[(2,1), (3,2)]) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 3 | 2 | 0
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Bitmap Heap Scan on t_multi_keys_list_null
+ Recheck Cond: (a = ANY ('{2,3}'::integer[]))
+ Selected Partitions: 1,4
+ -> Partitioned Bitmap Index Scan on t_multi_keys_list_null_idx_l
+ Index Cond: (a = ANY ('{2,3}'::integer[]))
+ Selected Partitions: 1,4
+(10 rows)
+
+SELECT * FROM t_multi_keys_list_null WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 3 | 2 | 0
+(2 rows)
+
+PREPARE part_bdb_stmt(int) as SELECT a,b FROM t_multi_keys_list_null WHERE b = $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt(3);
+ QUERY PLAN
+------------------------------------------------------------
+ Sort
+ Sort Key: a
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (b = $1)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt(1);
+ a | b
+---+---
+ 0 | 1
+ 1 | 1
+ 2 | 1
+(3 rows)
+
+EXECUTE part_bdb_stmt(2);
+ a | b
+---+---
+ 0 | 2
+ 1 | 2
+ 3 | 2
+(3 rows)
+
+PREPARE part_bdb_stmt1(int) as SELECT a,b FROM t_multi_keys_list_null WHERE a != $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt1(3);
+ QUERY PLAN
+------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (a <> $1)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt1(1);
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+ 2 | 1
+ 3 | 2
+(6 rows)
+
+EXECUTE part_bdb_stmt1(2);
+ a | b
+---+---
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 |
+ 1 | 1
+ 1 | 2
+ 3 | 2
+(7 rows)
+
+PREPARE part_bdb_stmt2(int) as SELECT a,b FROM t_multi_keys_list_null WHERE a >= $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt2(3);
+ QUERY PLAN
+------------------------------------------------------------
+ Sort
+ Sort Key: a, b
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (a >= $1)
+ Selected Partitions: PART
+(7 rows)
+
+EXECUTE part_bdb_stmt2(1);
+ a | b
+---+---
+ 1 | 1
+ 1 | 2
+ 2 | 1
+ 3 | 2
+(4 rows)
+
+EXECUTE part_bdb_stmt2(2);
+ a | b
+---+---
+ 2 | 1
+ 3 | 2
+(2 rows)
+
+PREPARE part_bdb_stmt3(int, int) as SELECT a,b FROM t_multi_keys_list_null WHERE (a,b) = ($1,$2);
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt3(0,1);
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Partition Iterator
+ Iterations: PART
+ -> Partitioned Index Only Scan using t_multi_keys_list_null_idx_l on t_multi_keys_list_null
+ Index Cond: ((a = $1) AND (b = $2))
+ Selected Partitions: PART
+(5 rows)
+
+EXECUTE part_bdb_stmt3(0,1);
+ a | b
+---+---
+ 0 | 1
+(1 row)
+
+EXECUTE part_bdb_stmt3(3,2);
+ a | b
+---+---
+ 3 | 2
+(1 row)
+
+UPDATE t_multi_keys_list_null SET a=2, b=1 where a=1 and b=1;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+ | 0 | 0
+(3 rows)
+
+UPDATE t_multi_keys_list_null SET a=NULL, b=0 where a=1 and b=2;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+ | 0 | 0
+ | 0 | 0
+(4 rows)
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_null PARTITION(p3, p4) where b = 0;
+ QUERY PLAN
+------------------------------------------------------------
+ Delete on t_multi_keys_list_null
+ -> Partition Iterator
+ Iterations: 2
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Filter: (b = 0)
+ Selected Partitions: 1,4
+(6 rows)
+
+DELETE t_multi_keys_list_null PARTITION(p3, p4) where b = 0;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_null PARTITION FOR(0,NULL);
+ QUERY PLAN
+------------------------------------------------------
+ Delete on t_multi_keys_list_null
+ -> Partitioned Seq Scan on t_multi_keys_list_null
+ Selected Partitions: 2
+(3 rows)
+
+DELETE t_multi_keys_list_null PARTITION FOR(0,NULL);
+SELECT * FROM t_multi_keys_list_null PARTITION(p1) ORDER BY a,b;
+ a | b | c
+---+---+---
+(0 rows)
+
+-- alter table partition
+CREATE INDEX test_multi_list_key_gi on t_multi_keys_list_null(c);
+CREATE TABLESPACE part_bdb_temp_tbspc RELATIVE LOCATION 'tablespace/part_bdb_temp_tbspc';
+ALTER TABLE t_multi_keys_list_null MOVE PARTITION FOR(0,NULL) TABLESPACE part_bdb_temp_tbspc;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_multi_keys_list_null ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,1),(0,2),(0,3),(1,1),(1,2)) TABLESPACE pg_default, +
+ PARTITION p1 VALUES ((0,NULL)) TABLESPACE part_bdb_temp_tbspc, +
+ PARTITION p4 VALUES ((3,2),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION p3 VALUES ((NULL,0),(2,1)) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX test_multi_list_key_gi ON t_multi_keys_list_null USING btree (c) TABLESPACE pg_default; +
+ CREATE INDEX t_multi_keys_list_null_idx_l ON t_multi_keys_list_null USING btree (a, b, c) LOCAL(PARTITION p3_a_b_c_idx, PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+CREATE TABLE t_alter_partition_temp (a int, b int, c int);
+INSERT INTO t_alter_partition_temp VALUES(NULL,0,1);
+INSERT INTO t_alter_partition_temp VALUES(2,1,2);
+CREATE INDEX t_alter_partition_temp_idx_l ON t_alter_partition_temp(a,b,c);
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 2
+ | 0 | 1
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+ALTER TABLE t_multi_keys_list_null EXCHANGE PARTITION (p3) WITH TABLE t_alter_partition_temp UPDATE GLOBAL INDEX;
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 0
+ 2 | 1 | 0
+(2 rows)
+
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b,c;
+ a | b | c
+---+---+---
+ 2 | 1 | 2
+ | 0 | 1
+(2 rows)
+
+DROP TABLE IF EXISTS t_alter_partition_temp;
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES (1);
+ERROR: Invalid partition values
+DETAIL: list partition values in "p5" does not match the number of partition keys
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((2,1));
+ERROR: list boundary of adding partition MUST NOT overlap with existing partition
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((2,1,1));
+ERROR: Invalid partition values
+DETAIL: list partition values in "p5" does not match the number of partition keys
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((4,NULL),(2,2),(4,NULL));
+ERROR: list partition p5 has overlapped value
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((4,4));
+INSERT INTO t_multi_keys_list_null VALUES(4,4,4);
+SELECT * FROM t_multi_keys_list_null PARTITION(p5) ORDER BY a,b;
+ a | b | c
+---+---+---
+ 4 | 4 | 4
+(1 row)
+
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1);
+ERROR: number of boundary items NOT EQUAL to number of partition keys
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1,5,8);
+ERROR: number of boundary items NOT EQUAL to number of partition keys
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1,5);
+ERROR: The partition number is invalid or out-of-range
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (2,1) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ERROR: partition "p3" of relation "t_multi_keys_list_null" does not exist
+ALTER TABLE t_multi_keys_list_null TRUNCATE PARTITION FOR (4,4) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_null PARTITION(p5) ORDER BY a,b;
+ a | b | c
+---+---+---
+(0 rows)
+
+ALTER TABLE t_multi_keys_list_null RENAME PARTITION FOR (0,NULL) TO p0;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = partition_b_db_schema; +
+ CREATE TABLE t_multi_keys_list_null ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,1),(0,2),(0,3),(1,1),(1,2)) TABLESPACE pg_default, +
+ PARTITION p0 VALUES ((0,NULL)) TABLESPACE part_bdb_temp_tbspc, +
+ PARTITION p4 VALUES ((3,2),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION p5 VALUES ((4,4)) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT; +
+ CREATE INDEX test_multi_list_key_gi ON t_multi_keys_list_null USING btree (c) TABLESPACE pg_default; +
+ CREATE INDEX t_multi_keys_list_null_idx_l ON t_multi_keys_list_null USING btree (a, b, c) LOCAL(PARTITION p1_a_b_c_idx, PARTITION p2_a_b_c_idx, PARTITION p4_a_b_c_idx, PARTITION p5_a_b_c_idx) TABLESPACE pg_default;
+(1 row)
+
+-- test views
+SELECT table_name,partitioning_type,partition_count,partitioning_key_count,subpartitioning_type FROM MY_PART_TABLES ORDER BY 1;
+ERROR: relation "my_part_tables" does not exist on datanode1
+LINE 1: ...,partitioning_key_count,subpartitioning_type FROM MY_PART_TA...
+ ^
+SELECT table_name,partition_name,high_value,subpartition_count FROM MY_TAB_PARTITIONS ORDER BY 1,2;
+ERROR: relation "my_tab_partitions" does not exist on datanode1
+LINE 1: ...partition_name,high_value,subpartition_count FROM MY_TAB_PAR...
+ ^
+SELECT table_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_TAB_SUBPARTITIONS ORDER BY 1,2,3;
+ERROR: relation "my_tab_subpartitions" does not exist on datanode1
+LINE 1: ...bpartition_name,high_value,high_value_length FROM MY_TAB_SUB...
+ ^
+SELECT table_name,index_name,partition_count,partitioning_key_count,partitioning_type,subpartitioning_type FROM MY_PART_INDEXES ORDER BY 1,2;
+ERROR: relation "my_part_indexes" does not exist on datanode1
+LINE 1: ...count,partitioning_type,subpartitioning_type FROM MY_PART_IN...
+ ^
+SELECT index_name,partition_name,high_value,high_value_length FROM MY_IND_PARTITIONS ORDER BY 1,2;
+ERROR: relation "my_ind_partitions" does not exist on datanode1
+LINE 1: ...,partition_name,high_value,high_value_length FROM MY_IND_PAR...
+ ^
+SELECT index_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_IND_SUBPARTITIONS ORDER BY 1,2,3;
+ERROR: relation "my_ind_subpartitions" does not exist on datanode1
+LINE 1: ...bpartition_name,high_value,high_value_length FROM MY_IND_SUB...
+ ^
+DROP TABLE IF EXISTS t_multi_keys_list_null;
+DROP TABLESPACE part_bdb_temp_tbspc;
+-- test subpart
+CREATE TABLE t_keys_range_list (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY LIST (b,c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION sbp11 VALUES ((1,1)),
+ SUBPARTITION sbp12 VALUES ((1,2)),
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION sbp21 VALUES ((2,1)),
+ SUBPARTITION sbp22 VALUES ((2,2)),
+ ),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE) (
+ SUBPARTITION sbp31 VALUES ((3,1)),
+ SUBPARTITION sbp32 VALUES ((3,2)),
+ )
+);
+ERROR: Un-support feature
+DETAIL: The partition key's length should be 1.
+\d+ t_keys_range_list
+SELECT pg_get_tabledef('t_keys_range_list'::regclass);
+ERROR: relation "t_keys_range_list" does not exist
+LINE 1: SELECT pg_get_tabledef('t_keys_range_list'::regclass);
+ ^
+CONTEXT: referenced column: pg_get_tabledef
+DROP TABLE IF EXISTS t_keys_range_list;
+NOTICE: table "t_keys_range_list" does not exist, skipping
+-- MAXVALUE in subpartiton
+CREATE TABLE range_011
+(
+co1 SMALLINT
+,co2 INTEGER
+,co3 BIGINT
+)
+PARTITION BY range COLUMNS(co1) PARTITIONS 3 SUBPARTITION BY range (co2)
+(
+PARTITION p_range_1 values less than (10)
+(
+SUBPARTITION p_range_1_1 values less than ( 20 ),
+SUBPARTITION p_range_1_2 values less than ( 100 ),
+SUBPARTITION p_range_1_5 values less than (MAXVALUE)
+),
+PARTITION p_range_2 values less than (20)
+(
+SUBPARTITION p_range_2_1 values less than ( 20 ),
+SUBPARTITION p_range_2_2 values less than ( 100 ),
+SUBPARTITION p_range_2_5 values less than (MAXVALUE)
+),
+PARTITION p_range_3 values less than MAXVALUE
+(
+SUBPARTITION p_range_3_1 values less than ( 20 ),
+SUBPARTITION p_range_3_2 values less than ( 100 ),
+SUBPARTITION p_range_3_5 values less than (MAXVALUE)
+)) ENABLE ROW MOVEMENT;
+DROP TABLE range_011;
+CREATE TABLE range_011
+(
+co1 SMALLINT
+,co2 INTEGER
+,co3 BIGINT
+)
+PARTITION BY range COLUMNS(co1) PARTITIONS 3 SUBPARTITION BY range (co2)
+(
+PARTITION p_range_1 values less than (10)
+(
+SUBPARTITION p_range_1_1 values less than ( 20 ),
+SUBPARTITION p_range_1_2 values less than ( 100 ),
+SUBPARTITION p_range_1_5 values less than (MAXVALUE)
+),
+PARTITION p_range_2 values less than (20)
+(
+SUBPARTITION p_range_2_1 values less than ( 20 ),
+SUBPARTITION p_range_2_2 values less than ( 100 ),
+SUBPARTITION p_range_2_5 values less than (MAXVALUE)
+),
+PARTITION p_range_3 values less than MAXVALUE
+(
+SUBPARTITION p_range_3_1 values less than ( 20 ),
+SUBPARTITION p_range_3_2 values less than ( 100 ),
+SUBPARTITION p_range_3_5 values less than MAXVALUE
+)) ENABLE ROW MOVEMENT; -- ERROR
+ERROR: syntax error at or near "MAXVALUE"
+LINE 25: SUBPARTITION p_range_3_5 values less than MAXVALUE
+ ^
+-- END
+DROP SCHEMA partition_b_db_schema CASCADE;
+\c regression
+drop database part_bdb;
diff --git a/src/test/regress/expected/hw_partition_hash_dml.out b/src/test/regress/expected/hw_partition_hash_dml.out
index 8bbab4c1a..ba8e85eae 100644
--- a/src/test/regress/expected/hw_partition_hash_dml.out
+++ b/src/test/regress/expected/hw_partition_hash_dml.out
@@ -187,18 +187,18 @@ insert into hw_hash_partition_dml_t2 values (28, 'EEE');
select * from hw_hash_partition_dml_t3;
id | name
----+------
- 27 | xiao
- 11 | he
1 | qin
+ 11 | he
+ 27 | xiao
(3 rows)
select * from hw_hash_partition_dml_t2;
id | name
----+------
- 27 | qi
- 11 | zhao
1 | xi
28 | EEE
+ 11 | zhao
+ 27 | qi
(4 rows)
delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 28;
diff --git a/src/test/regress/expected/hw_partition_hash_dql.out b/src/test/regress/expected/hw_partition_hash_dql.out
index a88e04a09..0dbddbef4 100644
--- a/src/test/regress/expected/hw_partition_hash_dql.out
+++ b/src/test/regress/expected/hw_partition_hash_dql.out
@@ -2104,7 +2104,7 @@ explain (verbose on, costs off)
-> Partitioned Seq Scan on fvt_compress.test_explain_format_on_part_table
Output: id
Filter: ((test_explain_format_on_part_table.id = 5) OR (test_explain_format_on_part_table.id = 25) OR (test_explain_format_on_part_table.id = 45) OR (test_explain_format_on_part_table.id = 65) OR (test_explain_format_on_part_table.id = 85))
- Selected Partitions: 3,5..7
+ Selected Partitions: 3..5,7
(7 rows)
-- two continous segments, non-text formast
@@ -2155,7 +2155,7 @@ explain (verbose on, costs off, FORMAT JSON)
"Alias": "test_explain_format_on_part_table", +
"Output": ["id"], +
"Filter": "((test_explain_format_on_part_table.id = 5) OR (test_explain_format_on_part_table.id = 25) OR (test_explain_format_on_part_table.id = 45) OR (test_explain_format_on_part_table.id = 65) OR (test_explain_format_on_part_table.id = 85))",+
- "Selected Partitions": "3,5..7" +
+ "Selected Partitions": "3..5,7" +
} +
] +
} +
diff --git a/src/test/regress/expected/hw_partition_list_ddl.out b/src/test/regress/expected/hw_partition_list_ddl.out
index 911c65615..7d473b30b 100644
--- a/src/test/regress/expected/hw_partition_list_ddl.out
+++ b/src/test/regress/expected/hw_partition_list_ddl.out
@@ -267,6 +267,34 @@ partition p81 values ( 81 ),
partition p82 values ( 82 )
);
drop table test_list;
+create table test_listkey_datatype
+(
+col_2 INT2,
+col_3 INT4,
+col_4 INT4,
+col_5 INT4,
+col_6 INT4,
+col_32 NUMERIC,
+col_33 VARCHAR(10),
+col_34 CHAR,
+col_35 BPCHAR,
+col_36 TIMESTAMP WITHOUT TIME ZONE,
+col_37 DATE
+) partition by list(col_5,col_4,col_6,col_37)
+(
+ partition p1 values ((2,1,2,'2022-02-03'),(6,3,6,'2022-02-07')),
+ partition p2 values ((5,4,5,'2022-02-08')),
+ partition p3 values ((7,6,7,'2022-02-09')),
+ partition p7 values (default)
+);
+insert into test_listkey_datatype(col_5,col_4,col_6,col_37) values(6,3,6,'2022-02-07');
+select col_5 from test_listkey_datatype partition (p1);
+ col_5
+-------
+ 6
+(1 row)
+
+drop table test_listkey_datatype;
drop schema FVT_COMPRESS_QWER cascade;
NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table bmsql_order_line
diff --git a/src/test/regress/expected/hw_partition_parallel.out b/src/test/regress/expected/hw_partition_parallel.out
new file mode 100644
index 000000000..ece1cbae8
--- /dev/null
+++ b/src/test/regress/expected/hw_partition_parallel.out
@@ -0,0 +1,577 @@
+-- prepare
+DROP SCHEMA partition_parallel CASCADE;
+ERROR: schema "partition_parallel" does not exist
+CREATE SCHEMA partition_parallel;
+SET CURRENT_SCHEMA TO partition_parallel;
+--
+----range table----
+--
+--prepare
+CREATE TABLE range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (time_id)
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+INSERT INTO range_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX range_sales_idx1 ON range_sales(channel_id) LOCAL;
+CREATE INDEX range_sales_idx2 ON range_sales(customer_id) GLOBAL;
+--create a temp table to exchange
+CREATE TABLE range_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON range_temp(channel_id);
+INSERT INTO range_temp SELECT * FROM range_sales WHERE time_id < '2009-01-01';
+--drop
+\parallel on
+ALTER TABLE range_sales DROP PARTITION time_2008 UPDATE GLOBAL INDEX;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2010);
+\parallel off
+ count
+-------
+ 270
+(1 row)
+
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--split
+ALTER TABLE range_sales RENAME PARTITION time_2009 TO time_2009_temp;
+\parallel on
+ALTER TABLE range_sales SPLIT PARTITION time_2009_temp AT ('2009-01-01')
+ INTO (PARTITION time_2008, PARTITION time_2009) UPDATE GLOBAL INDEX;
+UPDATE range_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM range_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+ count
+-------
+ 0
+(1 row)
+
+--truncate
+\parallel on
+ALTER TABLE range_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx1)*/ COUNT(channel_id) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx2)*/ COUNT(customer_id) FROM range_sales;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--exchange
+\parallel on
+ALTER TABLE range_sales EXCHANGE PARTITION (time_2008) WITH TABLE range_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx1)*/ COUNT(channel_id) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx2)*/ COUNT(customer_id) FROM range_sales;
+UPDATE range_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM range_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+ count
+-------
+ 0
+(1 row)
+
+--merge
+\parallel on
+ALTER TABLE range_sales MERGE PARTITIONS time_2008, time_2009 INTO PARTITION time_2009 UPDATE GLOBAL INDEX;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--finish
+DROP TABLE range_sales;
+DROP TABLE range_temp;
+--
+----list table----
+--
+--prepare
+CREATE TABLE list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY LIST (channel_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2'),
+ PARTITION channel2 VALUES ('3', '4', '5'),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+);
+INSERT INTO list_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX list_sales_idx1 ON list_sales(channel_id) LOCAL;
+CREATE INDEX list_sales_idx2 ON list_sales(type_id) GLOBAL;
+--create a temp table to exchange
+CREATE TABLE list_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON list_temp(channel_id);
+INSERT INTO list_temp SELECT * FROM list_sales WHERE channel_id in ('0', '1', '2');
+--drop
+\parallel on
+ALTER TABLE list_sales DROP PARTITION channel1 UPDATE GLOBAL INDEX;
+UPDATE list_sales SET type_id = -1 WHERE channel_id = '6';
+INSERT INTO list_sales VALUES(1,1,'2011-06-01', '8',-1,1,1);
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel3);
+\parallel off
+ count
+-------
+ 200
+(1 row)
+
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+ count
+-------
+ 101
+(1 row)
+
+--add
+\parallel on
+ALTER TABLE list_sales ADD PARTITION channel1 VALUES ('0', '1', '2');
+UPDATE list_sales PARTITION (channel3) SET type_id = 1 WHERE type_id = -1;
+DELETE FROM list_sales WHERE type_id = -1 AND channel_id in ('8', '9');
+\parallel off
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+ count
+-------
+ 0
+(1 row)
+
+--truncate
+\parallel on
+ALTER TABLE list_sales TRUNCATE PARTITION channel1 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx1)*/ COUNT(channel_id) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx2)*/ COUNT(type_id) FROM list_sales;
+UPDATE list_sales SET type_id = -1 WHERE channel_id = '6';
+INSERT INTO list_sales VALUES(1,1,'2011-06-01', '8',-1,1,1);
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+ count
+-------
+ 101
+(1 row)
+
+--exchange
+\parallel on
+ALTER TABLE list_sales EXCHANGE PARTITION (channel1) WITH TABLE list_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx1)*/ COUNT(channel_id) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx2)*/ COUNT(type_id) FROM list_sales;
+UPDATE list_sales PARTITION (channel3) SET type_id = 1 WHERE type_id = -1;
+DELETE FROM list_sales WHERE type_id = -1 AND channel_id in ('8', '9');
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+ count
+-------
+ 0
+(1 row)
+
+--finish
+DROP TABLE list_sales;
+DROP TABLE list_temp;
+--
+----interval table----
+--
+--prepare
+CREATE TABLE interval_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (time_id) INTERVAL ('1 year')
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+INSERT INTO interval_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX interval_sales_idx1 ON interval_sales(channel_id) LOCAL;
+CREATE INDEX interval_sales_idx2 ON interval_sales(customer_id) GLOBAL;
+--create a temp table to exchange
+CREATE TABLE interval_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON interval_temp(channel_id);
+INSERT INTO interval_temp SELECT * FROM interval_sales WHERE time_id < '2009-01-01';
+--drop
+\parallel on
+ALTER TABLE interval_sales DROP PARTITION time_2008 UPDATE GLOBAL INDEX;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2010);
+\parallel off
+ count
+-------
+ 270
+(1 row)
+
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--split
+ALTER TABLE interval_sales RENAME PARTITION time_2009 TO time_2009_temp;
+\parallel on
+ALTER TABLE interval_sales SPLIT PARTITION time_2009_temp AT ('2009-01-01')
+ INTO (PARTITION time_2008, PARTITION time_2009) UPDATE GLOBAL INDEX;
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 0
+(1 row)
+
+--truncate
+\parallel on
+ALTER TABLE interval_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx1)*/ COUNT(channel_id) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx2)*/ COUNT(customer_id) FROM interval_sales;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--exchange
+\parallel on
+ALTER TABLE interval_sales EXCHANGE PARTITION (time_2008) WITH TABLE interval_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx1)*/ COUNT(channel_id) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx2)*/ COUNT(customer_id) FROM interval_sales;
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 0
+(1 row)
+
+--merge
+\parallel on
+ALTER TABLE interval_sales MERGE PARTITIONS time_2008, time_2009 INTO PARTITION time_2009 UPDATE GLOBAL INDEX;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 2
+(1 row)
+
+--insert
+\parallel on
+INSERT INTO interval_sales VALUES (1,1,'2017-06-01','1',1,1);
+INSERT INTO interval_sales VALUES (1,1,'2017-07-01','1',1,1);
+INSERT INTO interval_sales VALUES (1,1,'2018-06-01','1',1,1);
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+ count
+-------
+ 0
+(1 row)
+
+--finish
+DROP TABLE interval_sales;
+DROP TABLE interval_temp;
+--
+----range-list table----
+--
+--prepare
+CREATE TABLE range_list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id)
+(
+ PARTITION customer1 VALUES LESS THAN (200)
+ (
+ SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'),
+ SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'),
+ SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'),
+ SUBPARTITION customer1_channel4 VALUES (DEFAULT)
+ ),
+ PARTITION customer2 VALUES LESS THAN (500)
+ (
+ SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'),
+ SUBPARTITION customer2_channel2 VALUES (DEFAULT)
+ ),
+ PARTITION customer3 VALUES LESS THAN (800),
+ PARTITION customer4 VALUES LESS THAN (1200)
+ (
+ SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
+ )
+);
+INSERT INTO range_list_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX range_list_sales_idx1 ON range_list_sales(customer_id) LOCAL;
+CREATE INDEX range_list_sales_idx2 ON range_list_sales(channel_id) GLOBAL;
+--create a temp table to exchange
+CREATE TABLE range_list_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON range_list_temp(customer_id);
+INSERT INTO range_list_temp SELECT * FROM range_list_sales WHERE customer_id < 200 AND channel_id in ('0', '1', '2');
+--drop
+\parallel on
+ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1 UPDATE GLOBAL INDEX;
+UPDATE range_list_sales SET type_id = -1 WHERE customer_id = 700;
+INSERT INTO range_list_sales VALUES(1,1000,'2011-06-01', '1',-1,1,1);
+SELECT /*+ tablescan(range_list_sales)*/ COUNT(*) FROM range_list_sales SUBPARTITION (customer1_channel4);
+\parallel off
+ count
+-------
+ 20
+(1 row)
+
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+ count
+-------
+ 2
+(1 row)
+
+--split
+\parallel on
+ALTER TABLE range_list_sales SPLIT SUBPARTITION customer1_channel4 VALUES ('0', '1', '2')
+ INTO (SUBPARTITION customer1_channel1, SUBPARTITION customer1_channel4_temp) UPDATE GLOBAL INDEX;
+UPDATE range_list_sales PARTITION (customer3) SET type_id = 1 WHERE customer_id = 700;
+DELETE FROM range_list_sales WHERE type_id = -1 AND customer_id >= 800;
+\parallel off
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+ count
+-------
+ 0
+(1 row)
+
+--truncate
+\parallel on
+ALTER TABLE range_list_sales TRUNCATE PARTITION customer1 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_list_sales)*/ COUNT(*) FROM range_list_sales SUBPARTITION (customer1_channel2);
+SELECT /*+ indexonlyscan(range_list_sales range_list_sales_idx1)*/ COUNT(customer_id) FROM range_list_sales SUBPARTITION (customer1_channel3);
+SELECT /*+ indexonlyscan(range_list_sales range_list_sales_idx2)*/ COUNT(channel_id) FROM range_list_sales;
+UPDATE range_list_sales SET type_id = -1 WHERE customer_id = 700;
+INSERT INTO range_list_sales VALUES(1,1000,'2011-06-01', '1',-1,1,1);
+\parallel off
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+ count
+-------
+--?.*
+(1 row)
+
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+ count
+-------
+ 2
+(1 row)
+
+--finish
+DROP TABLE range_list_sales;
+DROP TABLE range_list_temp;
+-- clean
+DROP SCHEMA partition_parallel CASCADE;
diff --git a/src/test/regress/expected/hw_partitionno.out b/src/test/regress/expected/hw_partitionno.out
new file mode 100644
index 000000000..3570d0a5f
--- /dev/null
+++ b/src/test/regress/expected/hw_partitionno.out
@@ -0,0 +1,505 @@
+-- prepare
+DROP SCHEMA partitionno CASCADE;
+ERROR: schema "partitionno" does not exist
+CREATE SCHEMA partitionno;
+SET CURRENT_SCHEMA TO partitionno;
+PREPARE partition_get_partitionno AS
+SELECT relname, partitionno, subpartitionno, boundaries
+FROM pg_partition
+WHERE parentid = (
+ SELECT c.oid
+ FROM pg_class c
+ JOIN pg_namespace n ON c.relnamespace = n.oid
+ WHERE c.relname = $1
+ AND n.nspname = CURRENT_SCHEMA
+)
+ORDER BY relname;
+PREPARE subpartition_get_partitionno AS
+WITH partition_oid AS (
+ SELECT oid
+ FROM pg_partition
+ WHERE parentid = (
+ SELECT c.oid
+ FROM pg_class c
+ JOIN pg_namespace n ON c.relnamespace = n.oid
+ WHERE c.relname = $1
+ AND n.nspname = CURRENT_SCHEMA
+ )
+ )
+SELECT relname, partitionno, subpartitionno, boundaries
+FROM pg_partition p
+ JOIN partition_oid part
+ ON p.oid = part.oid OR p.parentid = part.oid
+ORDER BY relname;
+--
+-- 1. test for range partition
+--
+CREATE TABLE range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY RANGE (time_id)
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_sales_pkey" for table "range_sales"
+CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL;
+CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+-------------+-------------+----------------+--------------
+ range_sales | -4 | |
+ time_2008 | 1 | | {2009-01-01}
+ time_2009 | 2 | | {2010-01-01}
+ time_2010 | 3 | | {2011-01-01}
+ time_2011 | 4 | | {2012-01-01}
+(5 rows)
+
+-- add/drop partition
+ALTER TABLE range_sales ADD PARTITION time_default VALUES LESS THAN (MAXVALUE);
+ALTER TABLE range_sales DROP PARTITION time_2008;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------+-------------+----------------+--------------
+ range_sales | -5 | |
+ time_2009 | 2 | | {2010-01-01}
+ time_2010 | 3 | | {2011-01-01}
+ time_2011 | 4 | | {2012-01-01}
+ time_default | 5 | | {NULL}
+(5 rows)
+
+-- merge/split partition
+ALTER TABLE range_sales SPLIT PARTITION time_default AT ('2013-01-01') INTO (PARTITION time_2012, PARTITION time_default_temp);
+ALTER TABLE range_sales RENAME PARTITION time_default_temp TO time_default;
+ALTER TABLE range_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+---------------+-------------+----------------+------------------------------
+ range_sales | -7 | |
+ time_2010_old | 3 | | {"Sat Jan 01 00:00:00 2011"}
+ time_2011 | 4 | | {2012-01-01}
+ time_2012 | 6 | | {"Tue Jan 01 00:00:00 2013"}
+ time_default | 7 | | {NULL}
+(5 rows)
+
+-- truncate partition with gpi
+ALTER TABLE range_sales TRUNCATE PARTITION time_2011 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+---------------+-------------+----------------+------------------------------
+ range_sales | -7 | |
+ time_2010_old | 3 | | {"Sat Jan 01 00:00:00 2011"}
+ time_2011 | 4 | | {2012-01-01}
+ time_2012 | 6 | | {"Tue Jan 01 00:00:00 2013"}
+ time_default | 7 | | {NULL}
+(5 rows)
+
+-- vacuum full
+VACUUM FULL range_sales;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+---------------+-------------+----------------+------------------------------
+ range_sales | -4 | |
+ time_2010_old | 1 | | {"Sat Jan 01 00:00:00 2011"}
+ time_2011 | 2 | | {2012-01-01}
+ time_2012 | 3 | | {"Tue Jan 01 00:00:00 2013"}
+ time_default | 4 | | {NULL}
+(5 rows)
+
+--reset
+ALTER TABLE range_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('range_sales');
+ relname | partitionno | subpartitionno | boundaries
+---------------+-------------+----------------+------------------------------
+ range_sales | -4 | |
+ time_2010_old | 1 | | {"Sat Jan 01 00:00:00 2011"}
+ time_2011 | 2 | | {2012-01-01}
+ time_2012 | 3 | | {"Tue Jan 01 00:00:00 2013"}
+ time_default | 4 | | {NULL}
+(5 rows)
+
+DROP TABLE range_sales;
+--
+-- 2. test for interval partition
+--
+CREATE TABLE interval_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY RANGE (time_id) INTERVAL ('1 year')
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01')
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "interval_sales_pkey" for table "interval_sales"
+CREATE INDEX interval_sales_idx1 ON interval_sales(product_id) LOCAL;
+CREATE INDEX interval_sales_idx2 ON interval_sales(time_id) GLOBAL;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+--------------
+ interval_sales | -3 | |
+ time_2008 | 1 | | {2009-01-01}
+ time_2009 | 2 | | {2010-01-01}
+ time_2010 | 3 | | {2011-01-01}
+(4 rows)
+
+-- add/drop partition
+INSERT INTO interval_sales VALUES (1,1,'2013-01-01','A',1,1,1);
+INSERT INTO interval_sales VALUES (2,2,'2012-01-01','B',2,2,2);
+ALTER TABLE interval_sales DROP PARTITION time_2008;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+------------------------------
+ interval_sales | -5 | |
+ sys_p1 | 4 | | {"Wed Jan 01 00:00:00 2014"}
+ sys_p2 | 5 | | {"Tue Jan 01 00:00:00 2013"}
+ time_2009 | 2 | | {2010-01-01}
+ time_2010 | 3 | | {2011-01-01}
+(5 rows)
+
+-- merge/split partition
+ALTER TABLE interval_sales SPLIT PARTITION time_2009 AT ('2009-01-01') INTO (PARTITION time_2008, PARTITION time_2009_temp);
+ALTER TABLE interval_sales RENAME PARTITION time_2009_temp TO time_2009;
+ALTER TABLE interval_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+------------------------------
+ interval_sales | -7 | |
+ sys_p1 | 4 | | {"Wed Jan 01 00:00:00 2014"}
+ sys_p2 | 5 | | {"Tue Jan 01 00:00:00 2013"}
+ time_2008 | 6 | | {"Thu Jan 01 00:00:00 2009"}
+ time_2010_old | 3 | | {"Sat Jan 01 00:00:00 2011"}
+(5 rows)
+
+-- truncate partition with gpi
+ALTER TABLE interval_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+------------------------------
+ interval_sales | -7 | |
+ sys_p1 | 4 | | {"Wed Jan 01 00:00:00 2014"}
+ sys_p2 | 5 | | {"Tue Jan 01 00:00:00 2013"}
+ time_2008 | 6 | | {"Thu Jan 01 00:00:00 2009"}
+ time_2010_old | 3 | | {"Sat Jan 01 00:00:00 2011"}
+(5 rows)
+
+-- vacuum full
+VACUUM FULL interval_sales;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+------------------------------
+ interval_sales | -4 | |
+ sys_p1 | 4 | | {"Wed Jan 01 00:00:00 2014"}
+ sys_p2 | 3 | | {"Tue Jan 01 00:00:00 2013"}
+ time_2008 | 1 | | {"Thu Jan 01 00:00:00 2009"}
+ time_2010_old | 2 | | {"Sat Jan 01 00:00:00 2011"}
+(5 rows)
+
+--reset
+ALTER TABLE interval_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('interval_sales');
+ relname | partitionno | subpartitionno | boundaries
+----------------+-------------+----------------+------------------------------
+ interval_sales | -4 | |
+ sys_p1 | 4 | | {"Wed Jan 01 00:00:00 2014"}
+ sys_p2 | 3 | | {"Tue Jan 01 00:00:00 2013"}
+ time_2008 | 1 | | {"Thu Jan 01 00:00:00 2009"}
+ time_2010_old | 2 | | {"Sat Jan 01 00:00:00 2011"}
+(5 rows)
+
+DROP TABLE interval_sales;
+--
+-- 3. test for list partition
+--
+CREATE TABLE list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY LIST (channel_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2'),
+ PARTITION channel2 VALUES ('3', '4', '5'),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_sales_pkey" for table "list_sales"
+CREATE INDEX list_sales_idx1 ON list_sales(product_id) LOCAL;
+CREATE INDEX list_sales_idx2 ON list_sales(time_id) GLOBAL;
+EXECUTE partition_get_partitionno('list_sales');
+ relname | partitionno | subpartitionno | boundaries
+------------+-------------+----------------+------------
+ channel1 | 1 | | {0,1,2}
+ channel2 | 2 | | {3,4,5}
+ channel3 | 3 | | {6,7}
+ channel4 | 4 | | {8,9}
+ list_sales | -4 | |
+(5 rows)
+
+-- add/drop partition
+ALTER TABLE list_sales ADD PARTITION channel_default VALUES (DEFAULT);
+ALTER TABLE list_sales DROP PARTITION channel4;
+EXECUTE partition_get_partitionno('list_sales');
+ relname | partitionno | subpartitionno | boundaries
+-----------------+-------------+----------------+------------
+ channel1 | 1 | | {0,1,2}
+ channel2 | 2 | | {3,4,5}
+ channel3 | 3 | | {6,7}
+ channel_default | 5 | | {NULL}
+ list_sales | -5 | |
+(5 rows)
+
+-- truncate partition with gpi
+ALTER TABLE list_sales TRUNCATE PARTITION channel2 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('list_sales');
+ relname | partitionno | subpartitionno | boundaries
+-----------------+-------------+----------------+------------
+ channel1 | 1 | | {0,1,2}
+ channel2 | 2 | | {3,4,5}
+ channel3 | 3 | | {6,7}
+ channel_default | 5 | | {NULL}
+ list_sales | -5 | |
+(5 rows)
+
+-- vacuum full
+VACUUM FULL list_sales;
+EXECUTE partition_get_partitionno('list_sales');
+ relname | partitionno | subpartitionno | boundaries
+-----------------+-------------+----------------+------------
+ channel1 | 1 | | {0,1,2}
+ channel2 | 2 | | {3,4,5}
+ channel3 | 3 | | {6,7}
+ channel_default | 4 | | {NULL}
+ list_sales | -4 | |
+(5 rows)
+
+--reset
+ALTER TABLE list_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('list_sales');
+ relname | partitionno | subpartitionno | boundaries
+-----------------+-------------+----------------+------------
+ channel1 | 1 | | {0,1,2}
+ channel2 | 2 | | {3,4,5}
+ channel3 | 3 | | {6,7}
+ channel_default | 4 | | {NULL}
+ list_sales | -4 | |
+(5 rows)
+
+DROP TABLE list_sales;
+--
+-- 4. test for list-range partition
+--
+CREATE TABLE list_range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2')
+ (
+ SUBPARTITION channel1_customer1 VALUES LESS THAN (200),
+ SUBPARTITION channel1_customer2 VALUES LESS THAN (500),
+ SUBPARTITION channel1_customer3 VALUES LESS THAN (800),
+ SUBPARTITION channel1_customer4 VALUES LESS THAN (1200)
+ ),
+ PARTITION channel2 VALUES ('3', '4', '5')
+ (
+ SUBPARTITION channel2_customer1 VALUES LESS THAN (500),
+ SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+ (
+ SUBPARTITION channel4_customer1 VALUES LESS THAN (1200)
+ )
+);
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_range_sales_pkey" for table "list_range_sales"
+CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) GLOBAL;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -2 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer2 | | 2 | {NULL}
+ channel3 | 3 | -1 | {6,7}
+ channel3_subpartdefault1 | | 1 | {NULL}
+ channel4 | 4 | -1 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ list_range_sales | -4 | |
+(13 rows)
+
+-- add/drop partition
+ALTER TABLE list_range_sales DROP PARTITION channel3;
+ALTER TABLE list_range_sales ADD PARTITION channel3 VALUES ('6', '7')
+(
+ SUBPARTITION channel3_customer1 VALUES LESS THAN (200),
+ SUBPARTITION channel3_customer2 VALUES LESS THAN (500),
+ SUBPARTITION channel3_customer3 VALUES LESS THAN (800)
+);
+ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES (DEFAULT);
+ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_customer2 VALUES LESS THAN (2000);
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -2 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer2 | | 2 | {NULL}
+ channel3 | 5 | -3 | {6,7}
+ channel3_customer1 | | 1 | {200}
+ channel3_customer2 | | 2 | {500}
+ channel3_customer3 | | 3 | {800}
+ channel4 | 4 | -2 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ channel4_customer2 | | 2 | {2000}
+ channel5 | 6 | -1 | {NULL}
+ channel5_subpartdefault1 | | 1 | {NULL}
+ list_range_sales | -6 | |
+(18 rows)
+
+-- merge/split partition
+ALTER TABLE list_range_sales SPLIT SUBPARTITION channel2_customer2 AT (800) INTO (SUBPARTITION channel2_customer3, SUBPARTITION channel2_customer4);
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -4 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer3 | | 3 | {800}
+ channel2_customer4 | | 4 | {NULL}
+ channel3 | 5 | -3 | {6,7}
+ channel3_customer1 | | 1 | {200}
+ channel3_customer2 | | 2 | {500}
+ channel3_customer3 | | 3 | {800}
+ channel4 | 4 | -2 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ channel4_customer2 | | 2 | {2000}
+ channel5 | 6 | -1 | {NULL}
+ channel5_subpartdefault1 | | 1 | {NULL}
+ list_range_sales | -6 | |
+(19 rows)
+
+-- truncate partition with gpi
+ALTER TABLE list_range_sales TRUNCATE PARTITION channel1 UPDATE GLOBAL INDEX;
+ALTER TABLE list_range_sales TRUNCATE SUBPARTITION channel4_customer1 UPDATE GLOBAL INDEX;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -4 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer3 | | 3 | {800}
+ channel2_customer4 | | 4 | {NULL}
+ channel3 | 5 | -3 | {6,7}
+ channel3_customer1 | | 1 | {200}
+ channel3_customer2 | | 2 | {500}
+ channel3_customer3 | | 3 | {800}
+ channel4 | 4 | -2 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ channel4_customer2 | | 2 | {2000}
+ channel5 | 6 | -1 | {NULL}
+ channel5_subpartdefault1 | | 1 | {NULL}
+ list_range_sales | -6 | |
+(19 rows)
+
+-- vacuum full
+VACUUM FULL list_range_sales;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -3 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer3 | | 2 | {800}
+ channel2_customer4 | | 3 | {NULL}
+ channel3 | 3 | -3 | {6,7}
+ channel3_customer1 | | 1 | {200}
+ channel3_customer2 | | 2 | {500}
+ channel3_customer3 | | 3 | {800}
+ channel4 | 4 | -2 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ channel4_customer2 | | 2 | {2000}
+ channel5 | 5 | -1 | {NULL}
+ channel5_subpartdefault1 | | 1 | {NULL}
+ list_range_sales | -5 | |
+(19 rows)
+
+--reset
+ALTER TABLE list_range_sales RESET PARTITION;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+ relname | partitionno | subpartitionno | boundaries
+--------------------------+-------------+----------------+------------
+ channel1 | 1 | -4 | {0,1,2}
+ channel1_customer1 | | 1 | {200}
+ channel1_customer2 | | 2 | {500}
+ channel1_customer3 | | 3 | {800}
+ channel1_customer4 | | 4 | {1200}
+ channel2 | 2 | -3 | {3,4,5}
+ channel2_customer1 | | 1 | {500}
+ channel2_customer3 | | 2 | {800}
+ channel2_customer4 | | 3 | {NULL}
+ channel3 | 3 | -3 | {6,7}
+ channel3_customer1 | | 1 | {200}
+ channel3_customer2 | | 2 | {500}
+ channel3_customer3 | | 3 | {800}
+ channel4 | 4 | -2 | {8,9}
+ channel4_customer1 | | 1 | {1200}
+ channel4_customer2 | | 2 | {2000}
+ channel5 | 5 | -1 | {NULL}
+ channel5_subpartdefault1 | | 1 | {NULL}
+ list_range_sales | -5 | |
+(19 rows)
+
+DROP TABLE list_range_sales;
+-- clean
+DEALLOCATE partition_get_partitionno;
+DEALLOCATE subpartition_get_partitionno;
+DROP SCHEMA partitionno CASCADE;
diff --git a/src/test/regress/expected/hw_subpartition_add_drop_partition.out b/src/test/regress/expected/hw_subpartition_add_drop_partition.out
index b796355cc..70c66c2e6 100644
--- a/src/test/regress/expected/hw_subpartition_add_drop_partition.out
+++ b/src/test/regress/expected/hw_subpartition_add_drop_partition.out
@@ -94,7 +94,7 @@ ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION custom
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01');
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -386,7 +386,7 @@ ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION custome
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -655,7 +655,7 @@ LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1;
^
--fail, invalid format
ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -925,7 +925,7 @@ ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1216,7 +1216,7 @@ ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1488,7 +1488,7 @@ LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1;
^
--fail, invalid format
ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1743,7 +1743,7 @@ ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--success, add 1 subpartition
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE);
--check for ok after add
@@ -1997,7 +1997,7 @@ ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--success, add 1 subpartition
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT);
--check for ok after add
diff --git a/src/test/regress/expected/hw_subpartition_alter_table.out b/src/test/regress/expected/hw_subpartition_alter_table.out
index ba9a928f1..5ec6a1613 100644
--- a/src/test/regress/expected/hw_subpartition_alter_table.out
+++ b/src/test/regress/expected/hw_subpartition_alter_table.out
@@ -223,13 +223,16 @@ Command: ALTER TABLE SUBPARTITION
Description: change the definition of a subpartition
Syntax:
ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
- action [, ... ];
+ { action [, ... ] | reset_clause };
where action can be:
+ row_clause |
add_clause |
drop_clause |
split_clause |
truncate_clause
+where row_clause can be:
+{ ENABLE | DISABLE } ROW MOVEMENT
where add_clause can be:
ADD { partition_less_than_item | partition_list_item } [ ( subpartition_definition_list ) ]
MODIFY PARTITION partition_name ADD subpartition_definition
@@ -249,5 +252,6 @@ AT ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tab
VALUES ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] )
where truncate_clause can be:
TRUNCATE SUBPARTITION { subpartition_name } [ UPDATE GLOBAL INDEX ]
-NOTICE: 'ALTER TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
+where reset_clause can be:
+RESET PARTITION;
diff --git a/src/test/regress/expected/hw_subpartition_createtable.out b/src/test/regress/expected/hw_subpartition_createtable.out
index e623bdd59..a516dc261 100644
--- a/src/test/regress/expected/hw_subpartition_createtable.out
+++ b/src/test/regress/expected/hw_subpartition_createtable.out
@@ -73,9 +73,9 @@ insert into list_hash values('201903', '6', '1', 1);
select * from list_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
201902 | 2 | 1 | 1
201902 | 3 | 1 | 1
- 201902 | 1 | 1 | 1
201903 | 4 | 1 | 1
201903 | 5 | 1 | 1
201903 | 6 | 1 | 1
@@ -187,12 +187,12 @@ insert into range_hash values('201903', '2', '1', 1);
select * from range_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 1 | 1 | 1
201902 | 2 | 1 | 1
- 201902 | 1 | 1 | 1
- 201902 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table range_hash;
@@ -263,12 +263,12 @@ insert into hash_list values('201903', '2', '1', 1);
select * from hash_list;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_list;
@@ -301,12 +301,12 @@ insert into hash_hash values('201903', '2', '1', 1);
select * from hash_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
- 201901 | 1 | 1 | 1
- 201901 | 1 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_hash;
@@ -339,12 +339,12 @@ insert into hash_range values('201903', '2', '1', 1);
select * from hash_range;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_range;
@@ -854,29 +854,6 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code)
);
drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-ERROR: The two partition keys of a subpartition partition table are the same.
-DETAIL: N/A
--二级分区的键值一样
CREATE TABLE list_list
(
@@ -1746,6 +1723,560 @@ create table t1(like range_list including partition);
ERROR: Un-support feature
DETAIL: Create Table like with subpartition only support range strategy.
drop table range_list;
+-- test the key of partition and subpartition is same column
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '201902' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+insert into list_list values('201902', '1', '1', 1);
+insert into list_list values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+insert into list_list values('2', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM list_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_list
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_list WHERE month_code = '201902' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_list
+ Filter: ((month_code)::text = '201902'::text)
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(8 rows)
+
+drop table list_list;
+CREATE TABLE list_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into list_hash values('201902', '1', '1', 1);
+insert into list_hash values('201902', '2', '1', 1);
+insert into list_hash values('201903', '5', '1', 1);
+insert into list_hash values('201903', '6', '1', 1);
+SELECT * FROM list_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM list_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table list_hash;
+CREATE TABLE list_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a values less than ('1'),
+ SUBPARTITION p_201901_b values less than ('2')
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a values less than ('3'),
+ SUBPARTITION p_201902_b values less than ('4')
+ )
+);
+insert into list_range values('201902', '1', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+insert into list_range values('201903', '2', '1', 1);
+SELECT * FROM list_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201903 | 2 | 1 | 1
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM list_range SUBPARTITION FOR ('201903','201903') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_range
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_range WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_range
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table list_range;
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('201901'),
+ SUBPARTITION p_201901_b values ('201902')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('201903'),
+ SUBPARTITION p_201902_b values ('201904')
+ )
+);
+insert into range_list values('201901', '1', '1', 1);
+insert into range_list values('201902', '2', '1', 1);
+insert into range_list values('201903', '1', '1', 1);
+insert into range_list values('201904', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM range_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_list
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_list WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_list
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_list;
+CREATE TABLE range_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into range_hash values('201901', '1', '1', 1);
+insert into range_hash values('201902', '2', '1', 1);
+insert into range_hash values('201903', '2', '1', 1);
+insert into range_hash values('20190322', '1', '1', 1);
+SELECT * FROM range_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+(1 row)
+
+SELECT * FROM range_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 2 | 1 | 1
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM range_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_hash;
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '20190220' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '20190230' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '20190320' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '20190330' )
+ )
+);
+insert into range_range values('201902', '1', '1', 1);
+insert into range_range values('20190222', '2', '1', 1);
+insert into range_range values('201903', '2', '1', 1);
+insert into range_range values('20190322', '1', '1', 1);
+insert into range_range values('20190333', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM range_range SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_range
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_range WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_range
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_range;
+CREATE TABLE hash_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES ( '201901' ),
+ SUBPARTITION p_201901_b VALUES ( '201902' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES ( '201901' ),
+ SUBPARTITION p_201902_b VALUES ( '201902' )
+ )
+);
+insert into hash_list values('201901', '1', '1', 1);
+insert into hash_list values('201901', '2', '1', 1);
+insert into hash_list values('201902', '1', '1', 1);
+insert into hash_list values('201902', '2', '1', 1);
+insert into hash_list values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+SELECT * FROM hash_list SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+ 201901 | 2 | 1 | 1
+(2 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201902_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_list
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_list WHERE month_code = '201901' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_list
+ Filter: ((month_code)::text = '201901'::text)
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(8 rows)
+
+drop table hash_list;
+CREATE TABLE hash_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY hash (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into hash_hash values('201901', '1', '1', 1);
+insert into hash_hash values('201901', '2', '1', 1);
+insert into hash_hash values('201903', '1', '1', 1);
+insert into hash_hash values('201903', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM hash_hash SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table hash_hash;
+CREATE TABLE hash_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY range (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN ( '201903' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN ( '201903' )
+ )
+);
+insert into hash_range values('201901', '1', '1', 1);
+insert into hash_range values('201901', '2', '1', 1);
+insert into hash_range values('201902', '1', '1', 1);
+insert into hash_range values('201902', '2', '1', 1);
+insert into hash_range values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+SELECT * FROM hash_range SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+ 201901 | 2 | 1 | 1
+(2 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201902_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_range SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_range
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_range WHERE month_code = '201902' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_range
+ Filter: ((month_code)::text = '201902'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table hash_range;
--clean
DROP SCHEMA subpartition_createtable CASCADE;
NOTICE: drop cascades to table range_range_02
@@ -1754,23 +2285,30 @@ Command: CREATE TABLE SUBPARTITION
Description: define a new table subpartition
Syntax:
CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name
-( { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ]
+( { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ COLLATE collation ] [ column_constraint [ ... ] ]
| table_constraint
| LIKE source_table [ like_option [...] ] }
[, ... ]
)
[ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ]
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ COMPRESS | NOCOMPRESS ]
[ TABLESPACE tablespace_name ]
- PARTITION BY {RANGE | LIST | HASH} (partition_key) SUBPARTITION BY {RANGE | LIST | HASH} (subpartition_key)
+ PARTITION BY {RANGE [ COLUMNS ] | LIST [ COLUMNS ] | HASH | KEY} (partition_key) [ PARTITIONS integer ] SUBPARTITION BY {RANGE | LIST | HASH | KEY} (subpartition_key) [ SUBPARTITIONS integer ]
(
- PARTITION partition_name1 [ VALUES LESS THAN (val1) | VALUES (val1[, ...]) ] [ TABLESPACE tablespace ]
+ PARTITION partition_name1 [ VALUES LESS THAN { (val1 | MAXVALUE) | MAXVALUE } | VALUES [ IN ] (val1[, ...]) ] [ TABLESPACE [=] tablespace ]
(
- { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE tablespace ] } [, ...]
+ { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE [=] tablespace ] } [, ...]
)
[, ...]
) [ { ENABLE | DISABLE } ROW MOVEMENT ];
+NOTICE: [ COLUMNS ] is only available in B-format database!
+NOTICE: [ PARTITIONS integer ] in RANGE/LIST partition is only available in B-format database!
+NOTICE: [ IN ] is only available in B-format database!
+NOTICE: KEY is only available in B-format database!
+NOTICE: MAXVALUE without parentheses is only available in B-format database!
where column_constraint can be:
[ CONSTRAINT constraint_name ]
@@ -1778,13 +2316,14 @@ where column_constraint can be:
NULL |
CHECK ( expression ) |
DEFAULT default_expr |
- GENERATED ALWAYS AS ( generation_expr ) STORED |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
AUTO_INCREMENT |
- UNIQUE index_parameters |
+ UNIQUE [KEY] index_parameters |
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
where table_constraint can be:
[ CONSTRAINT [ constraint_name ] ]
{ CHECK ( expression ) |
@@ -1799,10 +2338,12 @@ where index_parameters can be:
[ WITH ( {storage_parameter = value} [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
-NOTICE: 'CREATE TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode!
+NOTICE: 'CREATE TABLE SUBPARTITION' is only available in CENTRALIZED mode!
NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
-NOTICE: '[ index_name ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ USING method ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: '[ ASC | DESC ]' in table_constraint is only avaliable in CENTRALIZED mode and B-format database!
-NOTICE: 'AUTO_INCREMENT' is only avaliable in CENTRALIZED mode and B-format database!
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
diff --git a/src/test/regress/expected/hw_subpartition_scan.out b/src/test/regress/expected/hw_subpartition_scan.out
index 7fcd11a4d..85c6a7823 100644
--- a/src/test/regress/expected/hw_subpartition_scan.out
+++ b/src/test/regress/expected/hw_subpartition_scan.out
@@ -803,7 +803,7 @@ explain (costs off) select * from list_list_02 where col_1=(select max(1) from l
QUERY PLAN
------------------------------------------------------------------------
Partition Iterator
- Iterations: PART
+ Iterations: 7, Sub Iterations: 16
InitPlan 2 (returns $3)
-> Result
InitPlan 1 (returns $2)
@@ -817,8 +817,9 @@ explain (costs off) select * from list_list_02 where col_1=(select max(1) from l
Selected Subpartitions: ALL
-> Partitioned Seq Scan on list_list_02
Filter: (col_1 = $3)
- Selected Partitions: PART
-(16 rows)
+ Selected Partitions: 1..7
+ Selected Subpartitions: ALL
+(17 rows)
select * from list_list_02 where col_1=(select max(1) from list_list_02);
col_1 | col_2 | col_3 | col_4
diff --git a/src/test/regress/expected/hw_subpartition_split.out b/src/test/regress/expected/hw_subpartition_split.out
index 0033543d9..b52ab4cff 100644
--- a/src/test/regress/expected/hw_subpartition_split.out
+++ b/src/test/regress/expected/hw_subpartition_split.out
@@ -143,6 +143,12 @@ alter table list_list split subpartition p_201902_a values (3) into
subpartition p_201902_ac
);
ERROR: Only the default boundary subpartition can be splited.
+alter table list_list split subpartition p_201902_c values ((2,3),(3,4)) into
+(
+ subpartition p_201902_c,
+ subpartition p_201902_d
+);
+ERROR: number of boundary items NOT EQUAL to number of partition keys
drop table list_list;
-- range subpartition
CREATE TABLE range_range
diff --git a/src/test/regress/expected/merge_where_col.out b/src/test/regress/expected/merge_where_col.out
index 9c5ef7c16..f8e081c3f 100644
--- a/src/test/regress/expected/merge_where_col.out
+++ b/src/test/regress/expected/merge_where_col.out
@@ -132,10 +132,34 @@ SELECT * FROM tb_b ORDER BY 1;
(4 rows)
ROLLBACK;
+create table col_com_base_1(
+col_int integer,
+col_double double precision,
+col_date date
+);
+create table col_com_base_2(
+col_int integer,
+col_double double precision,
+col_date date
+);
+MERGE INTO col_com_base_1 Table_004 USING col_com_base_2 Table_003
+ ON ( Table_003.col_double = Table_004.col_double )
+WHEN MATCHED THEN UPDATE SET col_date = col_date
+WHERE Table_004.col_int = ( select SUM(Table_004.col_int) from col_com_base_1);
+ERROR: cannot use aggregate function in UPDATE
+LINE 4: WHERE Table_004.col_int = ( select SUM(Table_004.col_int) f...
+ ^
+UPDATE col_com_base_1 Table_004 SET col_int = 2 where Table_004.col_int = ( select SUM(Table_004.col_int) from col_com_base_1);
+ERROR: cannot use aggregate function in UPDATE
+LINE 1: ...ET col_int = 2 where Table_004.col_int = ( select SUM(Table_...
+ ^
+UPDATE col_com_base_1 Table_004 SET col_int = 2 where Table_004.col_int = ( select SUM(col_int) from col_com_base_1);
-- clean up
DROP SCHEMA merge_where_col CASCADE;
-NOTICE: drop cascades to 4 other objects
+NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table merge_nest_tab1
drop cascades to table dt2
drop cascades to table tb_a
drop cascades to table tb_b
+drop cascades to table col_com_base_1
+drop cascades to table col_com_base_2
diff --git a/src/test/regress/expected/multi_update.out b/src/test/regress/expected/multi_update.out
index a8804cb01..b6064358b 100644
--- a/src/test/regress/expected/multi_update.out
+++ b/src/test/regress/expected/multi_update.out
@@ -636,8 +636,8 @@ select * from t_p_mutil_t1;
select * from t_p_mutil_t2;
t1 | t2
----+----
- 2 | 6
3 | 6
+ 2 | 6
(2 rows)
-- subpartition
diff --git a/src/test/regress/expected/mysql_delimiter.out b/src/test/regress/expected/mysql_delimiter.out
index 36d9be132..232b3edb0 100644
--- a/src/test/regress/expected/mysql_delimiter.out
+++ b/src/test/regress/expected/mysql_delimiter.out
@@ -45,10 +45,24 @@ select 1//
delimiter ;//
--Test delimiter length
-delimiter aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
-ERROR: syntax error at or near "";""
-LINE 1: delimiter aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ";"
- ^
+delimiter ""
+ERROR: zero-length delimited identifier at or near """"
+LINE 1: delimiter "" ";"
+ ^
+delimiter ''
+ERROR: DELIMITER must be followed by a 'delimiter' character or string at or near "";""
+LINE 1: delimiter '' ";"
+ ^
+delimiter aaaaaaaaaaaaaaaa
+ERROR: 'delimiter' length should less than 16 at or near "";""
+LINE 1: delimiter aaaaaaaaaaaaaaaa ";"
+ ^
+delimiter "aaaaaaaaaaaaaaaa"
+ERROR: 'delimiter' length should less than 16 at or near "";""
+LINE 1: delimiter "aaaaaaaaaaaaaaaa" ";"
+ ^
+delimiter aaaaaaaaaaaaaaa
+delimiter ;
--Test delimiter %
delimiter %;
select 1%
diff --git a/src/test/regress/expected/partition_minmax.out b/src/test/regress/expected/partition_minmax.out
index c5cc80d8b..793b8103d 100644
--- a/src/test/regress/expected/partition_minmax.out
+++ b/src/test/regress/expected/partition_minmax.out
@@ -1455,7 +1455,7 @@ explain(costs off, verbose on) select * from test_list_lt t1 where a = (select m
--------------------------------------------------------------------------------------------------------------------------
Partition Iterator
Output: t1.a, t1.b
- Iterations: PART
+ Iterations: 3
InitPlan 2 (returns $3)
-> Result
Output: $2
@@ -1477,10 +1477,10 @@ explain(costs off, verbose on) select * from test_list_lt t1 where a = (select m
-> Partitioned Bitmap Heap Scan on partition_minmax.test_list_lt t1
Output: t1.a, t1.b
Recheck Cond: (t1.a = $3)
- Selected Partitions: PART
+ Selected Partitions: 1..3
-> Partitioned Bitmap Index Scan on idx_list_a
Index Cond: (t1.a = $3)
- Selected Partitions: PART
+ Selected Partitions: 1..3
(28 rows)
select * from test_list_lt t1 where a = (select min(t2.a) from test_list_lt t2);
diff --git a/src/test/regress/expected/partition_pruning.out b/src/test/regress/expected/partition_pruning.out
new file mode 100644
index 000000000..977d8bbcf
--- /dev/null
+++ b/src/test/regress/expected/partition_pruning.out
@@ -0,0 +1,110 @@
+DROP SCHEMA partition_pruning;
+ERROR: schema "partition_pruning" does not exist
+CREATE SCHEMA partition_pruning;
+SET CURRENT_SCHEMA TO partition_pruning;
+drop table test_range;
+ERROR: table "test_range" does not exist
+create table test_range (a int, b int, c int) WITH (STORAGE_TYPE=USTORE)
+partition by range(a)
+(
+ partition p1 values less than (2000),
+ partition p2 values less than (3000),
+ partition p3 values less than (4000),
+ partition p4 values less than (5000),
+ partition p5 values less than (maxvalue)
+)ENABLE ROW MOVEMENT;
+insert into test_range values(1,1,1);
+insert into test_range values(3001,1,1);
+prepare p1 as select * from test_range where ctid = '(0,1)' and a = $1;
+explain (costs off)execute p1(1);
+ QUERY PLAN
+------------------------------------
+ Partitioned Tid Scan on test_range
+ TID Cond: (ctid = '(0,1)'::tid)
+ Filter: (a = $1)
+ Selected Partitions: PART
+(4 rows)
+
+execute p1(1);
+ a | b | c
+---+---+---
+ 1 | 1 | 1
+(1 row)
+
+execute p1(3001);
+ a | b | c
+------+---+---
+ 3001 | 1 | 1
+(1 row)
+
+drop table test_range;
+drop table test_range_pt;
+ERROR: table "test_range_pt" does not exist
+create table test_range_pt (a int, b int, c int)
+partition by range(a)
+(
+ partition p1 values less than (2000),
+ partition p2 values less than (3000),
+ partition p3 values less than (4000),
+ partition p4 values less than (5000),
+ partition p5 values less than (maxvalue)
+)ENABLE ROW MOVEMENT;
+insert into test_range_pt values(1,1),(2001,2),(3001,3),(4001,4),(5001,5);
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 or a is null;
+explain (costs off)execute p1(2001);
+ QUERY PLAN
+---------------------------------------------
+ Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on test_range_pt
+ Filter: ((a = $1) OR (a IS NULL))
+ Selected Partitions: PART
+(5 rows)
+
+execute p1(2001);
+ a | b | c
+------+---+---
+ 2001 | 2 |
+(1 row)
+
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 or a = $2;
+explain (costs off)execute p1(2001,3001);
+ QUERY PLAN
+---------------------------------------------
+ Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on test_range_pt
+ Filter: ((a = $1) OR (a = $2))
+ Selected Partitions: PART
+(5 rows)
+
+execute p1(2001,3001);
+ a | b | c
+------+---+---
+ 2001 | 2 |
+ 3001 | 3 |
+(2 rows)
+
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 and a = $2;
+explain (costs off)execute p1(2001,3001);
+ QUERY PLAN
+---------------------------------------------------
+ Result
+ One-Time Filter: ($1 = $2)
+ -> Partition Iterator
+ Iterations: PART
+ -> Partitioned Seq Scan on test_range_pt
+ Filter: (a = $2)
+ Selected Partitions: PART
+(7 rows)
+
+execute p1(2001,3001);
+ a | b | c
+---+---+---
+(0 rows)
+
+drop table test_range_pt;
+DROP SCHEMA partition_pruning;
diff --git a/src/test/regress/expected/pl_debugger_client.out b/src/test/regress/expected/pl_debugger_client.out
index 58be1f660..ec849ff20 100755
--- a/src/test/regress/expected/pl_debugger_client.out
+++ b/src/test/regress/expected/pl_debugger_client.out
@@ -1286,7 +1286,7 @@ select * from dbe_pldebugger.set_var('test.c', '$$2021-07-31$$::timestamp'); --
(1 row)
select * from dbe_pldebugger.set_var('vrec', '(1,1,1)::test%rowtype'); -- not ok
-WARNING: Exception occurs when trying to set variable: syntax error at or near "rowtype"
+WARNING: Exception occurs when trying to set variable: cannot cast type record to test
set_var
---------
f
diff --git a/src/test/regress/expected/plan_hint.out b/src/test/regress/expected/plan_hint.out
index d1872878f..0b9196bf2 100755
--- a/src/test/regress/expected/plan_hint.out
+++ b/src/test/regress/expected/plan_hint.out
@@ -2428,6 +2428,127 @@ WARNING: LINE 1: unsupport distributed hint at ')'
Filter: (a = 10)
(3 rows)
+explain (costs off) select /*+indexscan(''')*/ 1;
+ERROR: memory is temporarily unavailable
+DETAIL: Failed on request of size 1024 bytes under queryid 0 in stringinfo.cpp:58.
+explain (costs off) select /*+indexscan(""")*/ 1;
+ERROR: memory is temporarily unavailable
+DETAIL: Failed on request of size 1024 bytes under queryid 0 in stringinfo.cpp:58.
+explain (costs off) select /*+indexscan($$$)*/ 1;
+ERROR: memory is temporarily unavailable
+DETAIL: Failed on request of size 1024 bytes under queryid 0 in stringinfo.cpp:58.
+create table subpartition_hash_hash (
+ c1 int,
+ c2 int,
+ c3 text,
+ c4 varchar(20),
+ c5 int generated always as(2 * c1) stored
+) partition by hash(c1) subpartition by hash(c2) (
+ partition p1 (
+ subpartition p1_1,
+ subpartition p1_2,
+ subpartition p1_3,
+ subpartition p1_4,
+ subpartition p1_5
+ ),
+ partition p2 (
+ subpartition p2_1,
+ subpartition p2_2,
+ subpartition p2_3,
+ subpartition p2_4,
+ subpartition p2_5
+ ),
+ partition p3 (
+ subpartition p3_1,
+ subpartition p3_2,
+ subpartition p3_3,
+ subpartition p3_4,
+ subpartition p3_5
+ ),
+ partition p4 (
+ subpartition p4_1,
+ subpartition p4_2,
+ subpartition p4_3,
+ subpartition p4_4,
+ subpartition p4_5
+ ),
+ partition p5 (
+ subpartition p5_1,
+ subpartition p5_2,
+ subpartition p5_3,
+ subpartition p5_4,
+ subpartition p5_5
+ )
+);
+create index subpartition_hash_hash_i1 on subpartition_hash_hash(c1) local;
+create index subpartition_hash_hash_i2 on subpartition_hash_hash(c2) local;
+create index subpartition_hash_hash_i3 on subpartition_hash_hash(c3) local;
+create index subpartition_hash_hash_i4 on subpartition_hash_hash(c4) local;
+create index subpartition_hash_hash_i5 on subpartition_hash_hash(c5) local;
+create table partition_range (c1 int, c2 int, c3 text, c4 varchar(20)) with(orientation = column) partition by range(c1, c2) (
+ partition p1
+ values less than(10000, 10000),
+ partition p2
+ values less than(20000, 20000),
+ partition p3
+ values less than(30000, 30000),
+ partition p4
+ values less than(40000, 40000),
+ partition p5
+ values less than(50000, 50000),
+ partition p6
+ values less than(60000, 60000),
+ partition p7
+ values less than(70000, 70000),
+ partition p8
+ values less than(80000, 80000),
+ partition p9
+ values less than(90000, 90000),
+ partition p10
+ values less than(MAXVALUE, MAXVALUE)
+);
+create index partition_range_i1 on partition_range using btree(c1) local;
+create index partition_range_i2 on partition_range using psort(c2) local;
+create index partition_range_i3 on partition_range using btree(c3) local;
+create index partition_range_i4 on partition_range using btree(c4) local;
+explain (analyse,timing off,costs off) create table tb_create_merge_append6 as (
+ select
+ /*+ indexscan(subpartition_hash_hash subpartition_hash_hash_i1)*/
+ subpartition_hash_hash.c1 c1,
+ subpartition_hash_hash.c3 c2,
+ partition_range.c1 c3
+ from subpartition_hash_hash
+ join partition_range on subpartition_hash_hash.c2 = partition_range.c2
+ and subpartition_hash_hash.c1 > 8888
+ and subpartition_hash_hash.c1 < 88888
+ order by subpartition_hash_hash.c1
+ limit 100 offset 10
+);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on tb_create_merge_append6 (actual rows=0 loops=1)
+ -> Limit (actual rows=0 loops=1)
+ -> Sort (actual rows=0 loops=1)
+ Sort Key: subpartition_hash_hash.c1
+ Sort Method: quicksort Memory: 25kB
+ -> Hash Join (actual rows=0 loops=1)
+ Hash Cond: (partition_range.c2 = subpartition_hash_hash.c2)
+ -> Partition Iterator (actual rows=0 loops=1)
+ Iterations: 10
+ -> Row Adapter (actual rows=0 loops=10)
+ -> Partitioned CStore Scan on partition_range (actual rows=0 loops=10)
+ Selected Partitions: 1..10
+ -> Hash (Actual time: never executed)
+ Buckets: 0 Batches: 0 Memory Usage: 0kB
+ -> Partition Iterator (Actual time: never executed)
+ Iterations: 5, Sub Iterations: 25
+ -> Partitioned Index Scan using subpartition_hash_hash_i1 on subpartition_hash_hash (Actual time: never executed)
+ Index Cond: ((c1 > 8888) AND (c1 < 88888))
+ Selected Partitions: 1..5
+ Selected Subpartitions: ALL
+--?.*
+(21 rows)
+
drop view hint_view_1;
drop view hint_view_2;
drop view hint_view_3;
@@ -2449,4 +2570,8 @@ drop table hint_t4;
drop table hint_t5;
drop table hint_vec;
drop schema plan_hint cascade;
-NOTICE: drop cascades to table src
+NOTICE: drop cascades to 4 other objects
+DETAIL: drop cascades to table src
+drop cascades to table subpartition_hash_hash
+drop cascades to table partition_range
+drop cascades to table tb_create_merge_append6
diff --git a/src/test/regress/expected/query_rewrite.out b/src/test/regress/expected/query_rewrite.out
index 611e9b6a5..c1a70b92c 100755
--- a/src/test/regress/expected/query_rewrite.out
+++ b/src/test/regress/expected/query_rewrite.out
@@ -278,13 +278,56 @@ select 1 from customer where c_birth_month not in (with tmp1 as (select 1 fro
----------
(0 rows)
+--fix bug: Error hint: TableScan(seq_t0), relation name "seq_t0" is not found.
+drop table if exists seq_t0;
+NOTICE: table "seq_t0" does not exist, skipping
+drop table if exists seq_t1;
+NOTICE: table "seq_t1" does not exist, skipping
+create table seq_t0(a int, b int8 );
+create table seq_t1(a int, b int8 );
+explain (costs off) select /*+ tablescan(seq_t0) */ b from seq_t0 union all select /*+ tablescan(seq_t1) */ b from seq_t1;
+ QUERY PLAN
+--------------------------------
+ Result
+ -> Append
+ -> Seq Scan on seq_t0
+ -> Seq Scan on seq_t1
+(4 rows)
+
+--test pulling up sublinks: in orclause.
+drop table if exists t1;
+drop table if exists t2;
+create table t1(c1 int, c2 int, c3 int);
+create table t2(c1 int, c2 int, c3 int);
+insert into t1 values(1,0),(2,0),(1,0),(2,1),(1,1),(1,0),(2,0),(1,0),(2,1),(1,1),(2,3),(2,1),(1,2);
+insert into t2 values(1,0,1),(2,0,2),(1,0,1),(2,1,1),(1,1,0),(1,0,1),(2,0,2),(1,0,1),(2,1,1),(1,1,0),(0,0,1);
+explain (verbose, costs off) select * from t2 where t2.c1 in (select t1.c1 from t1 group by t1.c1, t1.c2) or t2.c2 = 1;
+ QUERY PLAN
+------------------------------------------------
+ Hash Left Join
+ Output: t2.c1, t2.c2, t2.c3
+ Hash Cond: (t2.c1 = t1.c1)
+ Filter: ((t1.c1 IS NOT NULL) OR (t2.c2 = 1))
+ -> Seq Scan on query_rewrite.t2
+ Output: t2.c1, t2.c2, t2.c3
+ -> Hash
+ Output: t1.c1
+ -> HashAggregate
+ Output: t1.c1
+ Group By Key: t1.c1
+ -> Seq Scan on query_rewrite.t1
+ Output: t1.c1
+(13 rows)
+
+drop table if exists t1;
+drop table if exists t2;
drop schema query_rewrite cascade;
NOTICE: drop cascades to 7 other objects
-DETAIL: drop cascades to table t1
-drop cascades to table t2
-drop cascades to table t3
+DETAIL: drop cascades to table t3
drop cascades to table k1
drop cascades to table k2
drop cascades to table k3
drop cascades to table customer
+drop cascades to table seq_t0
+drop cascades to table seq_t1
reset current_schema;
diff --git a/src/test/regress/expected/row_partition_iterator_elimination.out b/src/test/regress/expected/row_partition_iterator_elimination.out
index 93b3d7686..b83470b18 100644
--- a/src/test/regress/expected/row_partition_iterator_elimination.out
+++ b/src/test/regress/expected/row_partition_iterator_elimination.out
@@ -126,7 +126,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -137,7 +137,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -148,7 +148,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -197,7 +197,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partition Iterator
Output: t2.a, t2.b, t2.c, t2.d
Iterations: 4
@@ -217,7 +217,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partition Iterator
Output: t2.a, t2.b, t2.c, t2.d
Iterations: 4
@@ -242,7 +242,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(14 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -260,7 +260,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(13 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -343,7 +343,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -354,7 +354,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -365,7 +365,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -422,7 +422,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(18 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -444,7 +444,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(17 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -459,7 +459,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.a, t2.b, t2.c, t2.d
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_range_pt t2
@@ -479,7 +479,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.b
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_range_pt t2
@@ -568,7 +568,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -579,7 +579,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -590,7 +590,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Filter: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -642,7 +642,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -668,7 +668,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -695,7 +695,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -719,7 +719,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.b
Filter: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -809,7 +809,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -820,7 +820,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Index Only Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -831,7 +831,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -886,7 +886,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(16 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -906,7 +906,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(15 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -925,7 +925,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(14 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -943,7 +943,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(13 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -1026,7 +1026,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -1037,7 +1037,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Index Only Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -1048,7 +1048,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -1105,7 +1105,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(18 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -1127,7 +1127,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(17 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -1142,7 +1142,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.a, t2.b, t2.c, t2.d
-> Partitioned Index Scan using idx_range_local on row_partition_iterator_elimination.test_range_pt t2
@@ -1162,7 +1162,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.b
-> Partitioned Index Scan using idx_range_local on row_partition_iterator_elimination.test_range_pt t2
@@ -1251,7 +1251,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(7 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -1262,7 +1262,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Index Only Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -1273,7 +1273,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(6 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -1325,7 +1325,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -1351,7 +1351,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -1378,7 +1378,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -1402,7 +1402,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.b
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -1492,10 +1492,10 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -1506,10 +1506,10 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -1520,10 +1520,10 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -1587,10 +1587,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(19 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -1610,10 +1610,10 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(18 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -1635,10 +1635,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(20 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -1659,10 +1659,10 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(19 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -1745,10 +1745,10 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -1759,10 +1759,10 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -1773,10 +1773,10 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -1842,10 +1842,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(21 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -1867,10 +1867,10 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(20 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -1885,10 +1885,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.a, t2.b, t2.c, t2.d
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_range_pt t2
@@ -1911,10 +1911,10 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Hash
Output: t2.b
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_range_pt t2
@@ -2006,10 +2006,10 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -2020,10 +2020,10 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -2034,10 +2034,10 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(9 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -2098,10 +2098,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -2127,10 +2127,10 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -2157,10 +2157,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.a, t2.b, t2.c, t2.d
Sort Key: t2.b
@@ -2187,10 +2187,10 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.b
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Sort
Output: t2.b
Sort Key: t2.b
@@ -2288,7 +2288,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
Filter: (test_hash_ht.a = 30)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
- Selected Partitions: 4
+ Selected Partitions: 17
(11 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -2303,7 +2303,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
Filter: (test_hash_ht.a = 30)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -2318,7 +2318,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
Filter: (test_hash_ht.a = 30)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -2383,7 +2383,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
Filter: (t1.a = 5)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
- Selected Partitions: 9
+ Selected Partitions: 12
-> Vector Partition Iterator
Output: t2.a, t2.b, t2.c, t2.d
Iterations: 4
@@ -2409,7 +2409,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
Filter: (t1.a = 5)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
- Selected Partitions: 9
+ Selected Partitions: 12
-> Vector Partition Iterator
Output: t2.a, t2.b, t2.c, t2.d
Iterations: 4
@@ -2442,7 +2442,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
Filter: (t1.a = 5)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
- Selected Partitions: 9
+ Selected Partitions: 12
(20 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -2466,7 +2466,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
Filter: (t1.a = 5)
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
- Selected Partitions: 9
+ Selected Partitions: 12
(19 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -2554,7 +2554,7 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(11 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -2569,7 +2569,7 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Index Only Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -2584,7 +2584,7 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(10 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -2657,7 +2657,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(22 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -2683,7 +2683,7 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(21 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -2708,7 +2708,7 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(20 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -2732,7 +2732,7 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Index Scan using idx_hash_local on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(19 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -2820,10 +2820,10 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 order by
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(14 rows)
explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
@@ -2838,10 +2838,10 @@ explain(costs off, verbose on) select count(a) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(13 rows)
explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
@@ -2856,10 +2856,10 @@ explain(costs off, verbose on) select count(b) from test_hash_ht where a = 30;
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht
Output: a, b, c, d
Recheck Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(13 rows)
explain(costs off, verbose on) select * from test_range_pt where a = 30 order by 1,2,3,4;
@@ -2941,10 +2941,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(25 rows)
explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5;
@@ -2970,10 +2970,10 @@ explain(costs off, verbose on) select count(t1.a) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(24 rows)
explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5 order by 1,2,3,4;
@@ -3001,10 +3001,10 @@ explain(costs off, verbose on) select * from test_hash_ht t1 join test_range_pt
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(26 rows)
explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test_range_pt t2 on t1.b = t2.b where t1.a = 5 and t2.a = 5;
@@ -3031,10 +3031,10 @@ explain(costs off, verbose on) select count(t1.b) from test_hash_ht t1 join test
-> Partitioned Bitmap Heap Scan on row_partition_iterator_elimination.test_hash_ht t1
Output: t1.a, t1.b, t1.c, t1.d
Recheck Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Bitmap Index Scan on idx_hash_local
Index Cond: (t1.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
(25 rows)
select * from test_hash_ht where a = 30 order by 1,2,3,4;
@@ -3131,14 +3131,14 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 and ctid
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
InitPlan 1 (returns $2)
-> Limit
Output: row_partition_iterator_elimination.test_hash_ht.ctid
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(12 rows)
explain(costs off, verbose on) select * from (select * from test_hash_ht where a = 5 and ctid = (select ctid from test_hash_ht where a = 5 limit 1)) t1 join (select * from test_range_pt where a = 5 and ctid = (select ctid from test_range_pt where a = 5 limit 1)) t2 on t1.a = t2.a where t1.a = 5;
@@ -3152,7 +3152,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
InitPlan 2 (returns $5)
-> Limit
Output: row_partition_iterator_elimination.test_range_pt.ctid
@@ -3164,7 +3164,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Tid Scan on row_partition_iterator_elimination.test_range_pt
Output: row_partition_iterator_elimination.test_range_pt.a, row_partition_iterator_elimination.test_range_pt.b, row_partition_iterator_elimination.test_range_pt.c, row_partition_iterator_elimination.test_range_pt.d
TID Cond: (row_partition_iterator_elimination.test_range_pt.ctid = $5)
@@ -3198,14 +3198,14 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 and ctid
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
InitPlan 1 (returns $2)
-> Limit
Output: row_partition_iterator_elimination.test_hash_ht.ctid
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(12 rows)
explain(costs off, verbose on) select * from (select * from test_hash_ht where a = 5 and ctid = (select ctid from test_hash_ht where a = 5 limit 1)) t1 join (select * from test_range_pt where a = 5 and ctid = (select ctid from test_range_pt where a = 5 limit 1)) t2 on t1.a = t2.a where t1.a = 5;
@@ -3219,7 +3219,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
InitPlan 2 (returns $5)
-> Limit
Output: row_partition_iterator_elimination.test_range_pt.ctid
@@ -3231,7 +3231,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Tid Scan on row_partition_iterator_elimination.test_range_pt
Output: row_partition_iterator_elimination.test_range_pt.a, row_partition_iterator_elimination.test_range_pt.b, row_partition_iterator_elimination.test_range_pt.c, row_partition_iterator_elimination.test_range_pt.d
TID Cond: (row_partition_iterator_elimination.test_range_pt.ctid = $5)
@@ -3265,14 +3265,14 @@ explain(costs off, verbose on) select * from test_hash_ht where a = 30 and ctid
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
InitPlan 1 (returns $2)
-> Limit
Output: row_partition_iterator_elimination.test_hash_ht.ctid
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 30)
- Selected Partitions: 4
+ Selected Partitions: 17
(12 rows)
explain(costs off, verbose on) select * from (select * from test_hash_ht where a = 5 and ctid = (select ctid from test_hash_ht where a = 5 limit 1)) t1 join (select * from test_range_pt where a = 5 and ctid = (select ctid from test_range_pt where a = 5 limit 1)) t2 on t1.a = t2.a where t1.a = 5;
@@ -3286,7 +3286,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
-> Partitioned Seq Scan on row_partition_iterator_elimination.test_hash_ht
Output: row_partition_iterator_elimination.test_hash_ht.ctid
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
InitPlan 2 (returns $5)
-> Limit
Output: row_partition_iterator_elimination.test_range_pt.ctid
@@ -3298,7 +3298,7 @@ explain(costs off, verbose on) select * from (select * from test_hash_ht where a
Output: row_partition_iterator_elimination.test_hash_ht.a, row_partition_iterator_elimination.test_hash_ht.b, row_partition_iterator_elimination.test_hash_ht.c, row_partition_iterator_elimination.test_hash_ht.d
TID Cond: (row_partition_iterator_elimination.test_hash_ht.ctid = $2)
Filter: (row_partition_iterator_elimination.test_hash_ht.a = 5)
- Selected Partitions: 9
+ Selected Partitions: 12
-> Partitioned Tid Scan on row_partition_iterator_elimination.test_range_pt
Output: row_partition_iterator_elimination.test_range_pt.a, row_partition_iterator_elimination.test_range_pt.b, row_partition_iterator_elimination.test_range_pt.c, row_partition_iterator_elimination.test_range_pt.d
TID Cond: (row_partition_iterator_elimination.test_range_pt.ctid = $5)
diff --git a/src/test/regress/expected/segment_subpartition_add_drop_partition.out b/src/test/regress/expected/segment_subpartition_add_drop_partition.out
index 1822f1671..9e6168925 100644
--- a/src/test/regress/expected/segment_subpartition_add_drop_partition.out
+++ b/src/test/regress/expected/segment_subpartition_add_drop_partition.out
@@ -77,7 +77,7 @@ ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION custom
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01');
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -353,7 +353,7 @@ ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION custome
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -622,7 +622,7 @@ LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1;
^
--fail, invalid format
ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -883,7 +883,7 @@ ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1155,7 +1155,7 @@ ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1424,7 +1424,7 @@ LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1;
^
--fail, invalid format
ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1679,7 +1679,7 @@ ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--success, add 1 subpartition
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE);
--check for ok after add
@@ -1933,7 +1933,7 @@ ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--success, add 1 subpartition
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT);
--check for ok after add
diff --git a/src/test/regress/expected/segment_subpartition_createtable.out b/src/test/regress/expected/segment_subpartition_createtable.out
index 1d876f1b7..ef7b5bbcd 100644
--- a/src/test/regress/expected/segment_subpartition_createtable.out
+++ b/src/test/regress/expected/segment_subpartition_createtable.out
@@ -73,9 +73,9 @@ insert into list_hash values('201903', '6', '1', 1);
select * from list_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
201902 | 2 | 1 | 1
201902 | 3 | 1 | 1
- 201902 | 1 | 1 | 1
201903 | 4 | 1 | 1
201903 | 5 | 1 | 1
201903 | 6 | 1 | 1
@@ -187,12 +187,12 @@ insert into range_hash values('201903', '2', '1', 1);
select * from range_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 1 | 1 | 1
201902 | 2 | 1 | 1
- 201902 | 1 | 1 | 1
- 201902 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table range_hash;
@@ -263,12 +263,12 @@ insert into hash_list values('201903', '2', '1', 1);
select * from hash_list;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_list;
@@ -301,12 +301,12 @@ insert into hash_hash values('201903', '2', '1', 1);
select * from hash_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
- 201901 | 1 | 1 | 1
- 201901 | 1 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_hash;
@@ -339,12 +339,12 @@ insert into hash_range values('201903', '2', '1', 1);
select * from hash_range;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
(6 rows)
drop table hash_range;
@@ -854,29 +854,6 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code)
);
drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-) WITH (SEGMENT=ON)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-ERROR: The two partition keys of a subpartition partition table are the same.
-DETAIL: N/A
--二级分区的键值一样
CREATE TABLE list_list
(
@@ -1552,6 +1529,659 @@ PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2)
PARTITION p_hash_7
) ENABLE ROW MOVEMENT;
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_pkey" for table "hash_range"
+drop table hash_range;
+-- test create table like only support range_range in subpartition
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '2' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '3' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '2' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '3' )
+ )
+);
+create table t1(like range_range including partition);
+insert into t1 values('201902', '1', '1', 1);
+insert into t1 values('201902', '2', '1', 1);
+insert into t1 values('201902', '1', '1', 1);
+insert into t1 values('201903', '2', '1', 1);
+insert into t1 values('201903', '1', '1', 1);
+insert into t1 values('201903', '2', '1', 1);
+explain (costs off) select * from t1;
+ QUERY PLAN
+--------------------------------------
+ Partition Iterator
+ Iterations: 2, Sub Iterations: 4
+ -> Partitioned Seq Scan on t1
+ Selected Partitions: 1..2
+ Selected Subpartitions: ALL
+(5 rows)
+
+select * from t1;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
+ 201903 | 2 | 1 | 1
+(6 rows)
+
+drop table t1;
+drop table range_range;
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '1' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+create table t1(like list_list including partition);
+ERROR: Un-support feature
+DETAIL: The Like feature is not supported currently for List and Hash.
+drop table list_list;
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('1'),
+ SUBPARTITION p_201901_b values ('2')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('1'),
+ SUBPARTITION p_201902_b values ('2')
+ )
+);
+create table t1(like range_list including partition);
+ERROR: Un-support feature
+DETAIL: Create Table like with subpartition only support range strategy.
+drop table range_list;
+-- test the key of partition and subpartition is same column
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '201902' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+insert into list_list values('201902', '1', '1', 1);
+insert into list_list values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+insert into list_list values('2', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM list_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_list
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_list WHERE month_code = '201902' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_list
+ Filter: ((month_code)::text = '201902'::text)
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(8 rows)
+
+drop table list_list;
+CREATE TABLE list_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into list_hash values('201902', '1', '1', 1);
+insert into list_hash values('201902', '2', '1', 1);
+insert into list_hash values('201903', '5', '1', 1);
+insert into list_hash values('201903', '6', '1', 1);
+SELECT * FROM list_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM list_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table list_hash;
+CREATE TABLE list_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a values less than ('1'),
+ SUBPARTITION p_201901_b values less than ('2')
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a values less than ('3'),
+ SUBPARTITION p_201902_b values less than ('4')
+ )
+);
+insert into list_range values('201902', '1', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+insert into list_range values('201903', '2', '1', 1);
+SELECT * FROM list_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201903 | 2 | 1 | 1
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM list_range SUBPARTITION FOR ('201903','201903') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_range
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM list_range WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on list_range
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table list_range;
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('201901'),
+ SUBPARTITION p_201901_b values ('201902')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('201903'),
+ SUBPARTITION p_201902_b values ('201904')
+ )
+);
+insert into range_list values('201901', '1', '1', 1);
+insert into range_list values('201902', '2', '1', 1);
+insert into range_list values('201903', '1', '1', 1);
+insert into range_list values('201904', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM range_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_list
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_list WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_list
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_list;
+CREATE TABLE range_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into range_hash values('201901', '1', '1', 1);
+insert into range_hash values('201902', '2', '1', 1);
+insert into range_hash values('201903', '2', '1', 1);
+insert into range_hash values('20190322', '1', '1', 1);
+SELECT * FROM range_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+(1 row)
+
+SELECT * FROM range_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 2 | 1 | 1
+(1 row)
+
+EXPLAIN (costs false)
+SELECT * FROM range_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_hash;
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '20190220' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '20190230' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '20190320' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '20190330' )
+ )
+);
+insert into range_range values('201902', '1', '1', 1);
+insert into range_range values('20190222', '2', '1', 1);
+insert into range_range values('201903', '2', '1', 1);
+insert into range_range values('20190322', '1', '1', 1);
+insert into range_range values('20190333', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+EXPLAIN (costs false)
+SELECT * FROM range_range SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_range
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM range_range WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on range_range
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table range_range;
+CREATE TABLE hash_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES ( '201901' ),
+ SUBPARTITION p_201901_b VALUES ( '201902' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES ( '201901' ),
+ SUBPARTITION p_201902_b VALUES ( '201902' )
+ )
+);
+insert into hash_list values('201901', '1', '1', 1);
+insert into hash_list values('201901', '2', '1', 1);
+insert into hash_list values('201902', '1', '1', 1);
+insert into hash_list values('201902', '2', '1', 1);
+insert into hash_list values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+SELECT * FROM hash_list SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+ 201901 | 2 | 1 | 1
+(2 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_list SUBPARTITION (p_201902_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_list
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_list WHERE month_code = '201901' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_list
+ Filter: ((month_code)::text = '201901'::text)
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(8 rows)
+
+drop table hash_list;
+CREATE TABLE hash_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY hash (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into hash_hash values('201901', '1', '1', 1);
+insert into hash_hash values('201901', '2', '1', 1);
+insert into hash_hash values('201903', '1', '1', 1);
+insert into hash_hash values('201903', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM hash_hash SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_hash
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_hash WHERE month_code = '201903' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_hash
+ Filter: ((month_code)::text = '201903'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
+drop table hash_hash;
+CREATE TABLE hash_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY range (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN ( '201903' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN ( '201903' )
+ )
+);
+insert into hash_range values('201901', '1', '1', 1);
+insert into hash_range values('201901', '2', '1', 1);
+insert into hash_range values('201902', '1', '1', 1);
+insert into hash_range values('201902', '2', '1', 1);
+insert into hash_range values('201903', '2', '1', 1);
+ERROR: inserted partition key does not map to any table partition
+SELECT * FROM hash_range SUBPARTITION (p_201901_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201901 | 1 | 1 | 1
+ 201901 | 2 | 1 | 1
+(2 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201901_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+(0 rows)
+
+SELECT * FROM hash_range SUBPARTITION (p_201902_b) ORDER BY 1,2;
+ month_code | dept_code | user_no | sales_amt
+------------+-----------+---------+-----------
+ 201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
+(2 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_range SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: month_code, dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_range
+ Selected Partitions: 1
+ Selected Subpartitions: 1:1
+(7 rows)
+
+EXPLAIN (costs false)
+SELECT * FROM hash_range WHERE month_code = '201902' ORDER BY 1,2;
+ QUERY PLAN
+-------------------------------------------------------------
+ Sort
+ Sort Key: dept_code
+ -> Partition Iterator
+ Iterations: 1, Sub Iterations: 1
+ -> Partitioned Seq Scan on hash_range
+ Filter: ((month_code)::text = '201902'::text)
+ Selected Partitions: 2
+ Selected Subpartitions: 2:1
+(8 rows)
+
drop table hash_range;
--clean
DROP SCHEMA segment_subpartition_createtable CASCADE;
diff --git a/src/test/regress/expected/set_transaction_test.out b/src/test/regress/expected/set_transaction_test.out
new file mode 100644
index 000000000..c29fa8e98
--- /dev/null
+++ b/src/test/regress/expected/set_transaction_test.out
@@ -0,0 +1,73 @@
+\h SET TRANSACTION
+Command: SET TRANSACTION
+Description: set the characteristics of the current transaction
+Syntax:
+{SET [ LOCAL | SESSION | GLOBAL ] TRANSACTION|SET SESSION CHARACTERISTICS AS TRANSACTION}
+ { ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED }
+ | { READ WRITE | READ ONLY | SERIALIZABLE | REPEATABLE READ }
+ } [, ...]
+SET TRANSACTION SNAPSHOT snapshot_id;
+NOTICE: SET GLOBAL TRANSACTION is only available in CENTRALIZED mode and B-format database!
+
+SET GLOBAL TRANSACTION READ ONLY;
+ERROR: SET GLOBAL TRANSACTION is only supported in B_FORMAT.
+SET SESSION TRANSACTION READ ONLY;
+create database test_set_tran dbcompatibility 'b';
+\c test_set_tran
+SET SESSION TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran1;
+set b_format_behavior_compat_options = 'set_session_transaction';
+SET SESSION TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran2;
+ERROR: cannot execute CREATE DATABASE in a read-only transaction
+\c test_set_tran;
+CREATE DATABASE test_set_tran2;
+\c test_set_tran;
+SET GLOBAL TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran3;
+\c test_set_tran;
+CREATE DATABASE test_set_tran4;
+ERROR: cannot execute CREATE DATABASE in a read-only transaction
+SET GLOBAL TRANSACTION READ WRITE;
+CREATE DATABASE test_set_tran4;
+ERROR: cannot execute CREATE DATABASE in a read-only transaction
+\c test_set_tran;
+CREATE DATABASE test_set_tran4;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ WRITE;
+\c test_set_tran;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED READ WRITE;
+\c test_set_tran;
+CREATE USER newuser PASSWORD 'gauss@123';
+SET SESSION AUTHORIZATION newuser PASSWORD 'gauss@123';
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+ERROR: permission denied for database test_set_tran
+DETAIL: N/A
+RESET SESSION AUTHORIZATION;
+\c regression;
+DROP DATABASE test_set_tran;
+DROP DATABASE test_set_tran1;
+DROP DATABASE test_set_tran2;
+DROP DATABASE test_set_tran3;
+DROP DATABASE test_set_tran4;
diff --git a/src/test/regress/expected/setop_1.out b/src/test/regress/expected/setop_1.out
old mode 100644
new mode 100755
index 7ab8c86fa..11fa2b7ea
--- a/src/test/regress/expected/setop_1.out
+++ b/src/test/regress/expected/setop_1.out
@@ -224,10 +224,10 @@ explain (verbose on, costs off) select b, c from test_union_1 minus select b, c
(11 rows)
explain (verbose on, costs off) select b, substr(c, 1, 3), c from test_union_1 minus (select 1, t2.b::varchar(10), t1.c from (select a,b,case c when 1 then 1 else null end as c from test_union_2 where b<0) t1 right join test_union_2 t2 on t1.b=t2.c group by 1, 2, 3);
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------
HashSetOp Except
- Output: "*SELECT* 1".b, "*SELECT* 1".substr, (("*SELECT* 1".c)::numeric), (0)
+ Output: "*SELECT* 1".b, "*SELECT* 1".substr, "*SELECT* 1".c, (0)
-> Append
-> Subquery Scan on "*SELECT* 1"
Output: "*SELECT* 1".b, "*SELECT* 1".substr, "*SELECT* 1".c, 0
@@ -236,17 +236,17 @@ explain (verbose on, costs off) select b, substr(c, 1, 3), c from test_union_1
-> Subquery Scan on "*SELECT* 2"
Output: 1, ("*SELECT* 2".b)::text, "*SELECT* 2".c, 1
-> HashAggregate
- Output: (1), ((t2.b)::character varying(10)), (CASE test_union_2.c WHEN 1 THEN 1::numeric ELSE NULL::numeric END)
- Group By Key: 1, (t2.b)::character varying(10), (CASE test_union_2.c WHEN 1 THEN 1::numeric ELSE NULL::numeric END)
+ Output: (1), ((t2.b)::character varying(10)), (CASE test_union_2.c WHEN 1 THEN 1 ELSE NULL::integer END)
+ Group By Key: 1, (t2.b)::character varying(10), (CASE test_union_2.c WHEN 1 THEN 1 ELSE NULL::integer END)
-> Hash Left Join
- Output: 1, (t2.b)::character varying(10), (CASE test_union_2.c WHEN 1 THEN 1::numeric ELSE NULL::numeric END)
+ Output: 1, (t2.b)::character varying(10), (CASE test_union_2.c WHEN 1 THEN 1 ELSE NULL::integer END)
Hash Cond: (t2.c = test_union_2.b)
-> Seq Scan on distribute_setop_1.test_union_2 t2
Output: t2.a, t2.b, t2.c
-> Hash
- Output: test_union_2.b, (CASE test_union_2.c WHEN 1 THEN 1::numeric ELSE NULL::numeric END)
+ Output: test_union_2.b, (CASE test_union_2.c WHEN 1 THEN 1 ELSE NULL::integer END)
-> Seq Scan on distribute_setop_1.test_union_2
- Output: test_union_2.b, CASE test_union_2.c WHEN 1 THEN 1::numeric ELSE NULL::numeric END
+ Output: test_union_2.b, CASE test_union_2.c WHEN 1 THEN 1 ELSE NULL::integer END
Filter: (test_union_2.b < 0)
(22 rows)
diff --git a/src/test/regress/expected/single_node_enum.out b/src/test/regress/expected/single_node_enum.out
old mode 100644
new mode 100755
index 0a6098ddb..5a2e6ec81
--- a/src/test/regress/expected/single_node_enum.out
+++ b/src/test/regress/expected/single_node_enum.out
@@ -140,15 +140,15 @@ WHERE enumtypid = 'insenum'::regtype
ORDER BY enumsortorder;
enumlabel | so
-----------+----
- L1 | 1
- i1 | 2
- i2 | 3
- i3 | 4
- i4 | 5
- i5 | 6
- i6 | 7
- i7 | 8
- i8 | 9
+ L1 | 1
+ i1 | 2
+ i2 | 3
+ i3 | 4
+ i4 | 5
+ i5 | 6
+ i6 | 7
+ i7 | 8
+ i8 | 9
i9 | 10
i10 | 11
i11 | 12
@@ -160,18 +160,18 @@ ORDER BY enumsortorder;
i17 | 18
i18 | 19
i19 | 20
- i20 |
- i21 |
- i22 |
- i23 |
- i24 |
- i25 |
- i26 |
- i27 |
- i28 |
- i29 |
- i30 |
- L2 |
+ i20 |
+ i21 |
+ i22 |
+ i23 |
+ i24 |
+ i25 |
+ i26 |
+ i27 |
+ i28 |
+ i29 |
+ i30 |
+ L2 |
(32 rows)
--
diff --git a/src/test/regress/expected/single_node_update.out b/src/test/regress/expected/single_node_update.out
index 23bf7aadc..8f4a1ff93 100644
--- a/src/test/regress/expected/single_node_update.out
+++ b/src/test/regress/expected/single_node_update.out
@@ -425,11 +425,11 @@ alter table t4 alter b set default now();
alter table t4 modify b timestamp on update current_timestamp;
\d t4
- Table "public.t4"
- Column | Type | Modifiers
---------+-----------------------------+-------------------------------------------
+ Table "public.t4"
+ Column | Type | Modifiers
+--------+-----------------------------+-----------------------------
a | integer |
- b | timestamp without time zone | default now() on update pg_systimestamp()
+ b | timestamp without time zone | on update pg_systimestamp()
alter table t4 alter b drop default;
\d t4
@@ -482,11 +482,11 @@ alter table t4 alter b drop default;
alter table t4 modify b not null;
alter table t4 modify b timestamp on update current_timestamp;
\d t4
- Table "public.t4"
- Column | Type | Modifiers
---------+-----------------------------+--------------------------------------
+ Table "public.t4"
+ Column | Type | Modifiers
+--------+-----------------------------+-----------------------------
a | integer |
- b | timestamp without time zone | not null on update pg_systimestamp()
+ b | timestamp without time zone | on update pg_systimestamp()
alter table t4 modify b null;
alter table t4 modify b timestamp;
@@ -513,6 +513,30 @@ alter table t4 modify b timestamp on update localtimestamp;
a | integer |
b | timestamp without time zone | on update ('now'::text)::timestamp without time zone
+alter table t4 alter b set default now();
+\d t4;
+ Table "public.t4"
+ Column | Type | Modifiers
+--------+-----------------------------+--------------------------------------------------------------------
+ a | integer |
+ b | timestamp without time zone | default now() on update ('now'::text)::timestamp without time zone
+
+alter table t4 change b b1 timestamp on update current_timestamp;
+\d t4
+ Table "public.t4"
+ Column | Type | Modifiers
+--------+-----------------------------+-----------------------------
+ a | integer |
+ b1 | timestamp without time zone | on update pg_systimestamp()
+
+alter table t4 change b1 b2 timestamp not null default now() on update localtimestamp;
+\d t4
+ Table "public.t4"
+ Column | Type | Modifiers
+--------+-----------------------------+-----------------------------------------------------------------------------
+ a | integer |
+ b2 | timestamp without time zone | not null default now() on update ('now'::text)::timestamp without time zone
+
CREATE TABLE t5(id int, a timestamp default now() on update current_timestamp, b timestamp on update current_timestamp, c timestamp default now());
\d t5
Table "public.t5"
@@ -642,4 +666,4 @@ select * from t_dmpportal_common_intent;
(3 rows)
\c regression
-DROP database mysql;
\ No newline at end of file
+DROP database mysql;
diff --git a/src/test/regress/expected/smp.out b/src/test/regress/expected/smp.out
index ab77c90a4..779039e57 100644
--- a/src/test/regress/expected/smp.out
+++ b/src/test/regress/expected/smp.out
@@ -1204,6 +1204,48 @@ select * from (select a, rownum as row from (select a from t3) where rownum <= 1
10 | 10
(6 rows)
+create table col_table_001 (id int, name char[] ) with (orientation=column);
+create table col_table_002 (id int, aid int,name char[] ,apple char[]) with (orientation=column);
+insert into col_table_001 values(1, '{a,b,c}' );
+insert into col_table_001 values(2, '{b,b,b}' );
+insert into col_table_001 values(3, '{c,c,c}' );
+insert into col_table_001 values(4, '{a}' );
+insert into col_table_001 values(5, '{b}' );
+insert into col_table_001 values(6, '{c}' );
+insert into col_table_001 values(7, '{a,b,c}' );
+insert into col_table_001 values(8, '{b,c,a}' );
+insert into col_table_001 values(9, '{c,a,b}' );
+insert into col_table_001 values(10, '{c,a,b}' );
+insert into col_table_002 values(11, 1,'{a,s,d}' );
+insert into col_table_002 values(12, 1,'{b,n,m}' );
+insert into col_table_002 values(13, 2,'{c,v,b}' );
+insert into col_table_002 values(14, 1,'{a}' );
+insert into col_table_002 values(15, 1,'{b}' );
+insert into col_table_002 values(15, 2,'{c}' );
+insert into col_table_002 values(17, 1,'{a,s,d}','{a,b,c}' );
+insert into col_table_002 values(18, 1,'{b,n,m}','{a,b,c}' );
+insert into col_table_002 values(19, 2,'{c,v,b}','{a,b,c}');
+insert into col_table_002 values(20, 2,'{c,v,b}','{b,c,a}');
+insert into col_table_002 values(21, 21,'{c,c,b}','{b,c,a}');
+select * from col_table_001 where EXISTS (select * from col_table_002 where col_table_001.name[1] =col_table_002.apple[1]) order by id;
+ id | name
+----+---------
+ 1 | {a,b,c}
+ 2 | {b,b,b}
+ 4 | {a}
+ 5 | {b}
+ 7 | {a,b,c}
+ 8 | {b,c,a}
+(6 rows)
+
+select * from col_table_001 where EXISTS (select * from col_table_002 where col_table_001.name[1:3] =col_table_002.apple[1:3]) order by id;
+ id | name
+----+---------
+ 1 | {a,b,c}
+ 7 | {a,b,c}
+ 8 | {b,c,a}
+(3 rows)
+
CREATE TABLE bmsql_item (
i_id int NoT NULL,
i_name varchar(24),
@@ -1428,11 +1470,13 @@ where no_o_id not in ( with tmp as (select w_id from bmsql_warehouse where bmsql
--clean
set search_path=public;
drop schema test_smp cascade;
-NOTICE: drop cascades to 9 other objects
+NOTICE: drop cascades to 11 other objects
DETAIL: drop cascades to table test_smp.t1
drop cascades to table test_smp.t2
drop cascades to table test_smp.t3
drop cascades to table test_smp.t4
+drop cascades to table test_smp.col_table_001
+drop cascades to table test_smp.col_table_002
drop cascades to table test_smp.bmsql_item
drop cascades to table test_smp.bmsql_warehouse
drop cascades to function test_smp.f1(text)
diff --git a/src/test/regress/expected/sqlbypass_partition.out b/src/test/regress/expected/sqlbypass_partition.out
index effc12636..30e311822 100755
--- a/src/test/regress/expected/sqlbypass_partition.out
+++ b/src/test/regress/expected/sqlbypass_partition.out
@@ -2766,7 +2766,7 @@ explain(costs off, verbose on) select *from test_hash_ht where a = 2000 and b =
Output: a, b, c
Index Cond: (test_hash_ht.b = 1)
Filter: (test_hash_ht.a = 2000)
- Selected Partitions: 5
+ Selected Partitions: 4
(6 rows)
prepare p1 as select *from test_hash_ht where a = $1 and b = $2 order by c;
diff --git a/src/test/regress/expected/sw_bugfix-2.out b/src/test/regress/expected/sw_bugfix-2.out
index c5edc1552..33c1e7d4e 100755
--- a/src/test/regress/expected/sw_bugfix-2.out
+++ b/src/test/regress/expected/sw_bugfix-2.out
@@ -1947,3 +1947,87 @@ ORDER BY t.manager_id;
(1 row)
drop table swcb_employees;
+-- test start with has sub clause
+DROP TABLE IF EXISTS DAT_DEPARTMENT;
+CREATE TABLE DAT_DEPARTMENT(
+ stru_id nvarchar2(10) NOT NULL,
+ sup_stru nvarchar2(10),
+ stru_state nvarchar2(8)
+)
+WITH (orientation=row, compression=no);
+CREATE INDEX sup_stru_dat_department ON DAT_DEPARTMENT USING btree(sup_stru) TABLESPACE pg_default;
+CREATE INDEX idx_br_dept_stru_id ON DAT_DEPARTMENT USING btree(stru_id) TABLESPACE pg_default;
+insert into DAT_DEPARTMENT(stru_id,sup_stru,stru_state) values('01','02','2');
+insert into DAT_DEPARTMENT(stru_id,sup_stru,stru_state) values('02','01','2');
+SELECT A.STRU_ID DEPTID,LEVEL,CONNECT_BY_ISCYCLE
+FROM DAT_DEPARTMENT A
+START WITH A.STRU_ID IN
+(SELECT B.STRU_ID DEPTID
+FROM DAT_DEPARTMENT B
+WHERE B.SUP_STRU = '01' OR B.SUP_STRU='02'
+)
+CONNECT BY NOCYCLE PRIOR A.STRU_ID =A.SUP_STRU;
+ deptid | level | connect_by_iscycle
+--------+-------+--------------------
+ 01 | 1 | 0
+ 02 | 2 | 1
+ 02 | 1 | 0
+ 01 | 2 | 1
+(4 rows)
+
+DROP TABLE DAT_DEPARTMENT;
+-- test RTE_JOIN in start with
+DROP TABLE IF EXISTS zb_layer;
+DROP TABLE IF EXISTS rtms_dict;
+DROP TABLE IF EXISTS zb_model;
+CREATE TABLE zb_layer(
+ id character varying(20) NOT NULL,
+ zb_code character varying(20),
+ zb_name character varying(20),
+ zb_organ character varying(50),
+ zb_apply character varying(20),
+ zb_layer_standard character varying(20),
+ zb_threshold_value character varying(30),
+ zb_warning_value character varying(20)
+)
+WITH (orientation=row, compression=no);
+CREATE TABLE rtms_dict(
+ id character varying(10),
+ area character varying(20),
+ cn_area character varying(30),
+ code character varying(50),
+ cname character varying(50),
+ locale character varying(10)
+)
+WITH (orientation=row, compression=no);
+CREATE TABLE zb_model(
+ id character varying(10) NOT NULL,
+ zb_code character varying(20),
+ zb_name character varying(300),
+ zb_risk_area character varying(3),
+ zb_parent_id character varying(20),
+ zb_weight character varying(10),
+ zb_layer_flag character varying(3),
+ zb_status character varying(3)
+)
+WITH (orientation=row, compression=no);
+SELECT DISTINCT I.ZB_CODE,D.CNAME,DECODE(I.ZB_LAYER_FLAG,NULL,D.CNAME,I.ZB_NAME) ZBNAME
+FROM ZB_MODEL I
+LEFT JOIN ZB_LAYER N ON I.ZB_CODE = N.ZB_CODE
+LEFT JOIN RTMS_DICT D ON D.CODE = I.ZB_RISK_AREA AND D.AREA = 'RICK_AREA'
+WHERE NVL(I.ZB_STATUS,1) = 1
+AND I.ZB_CODE NOT IN
+(
+ SELECT T.ZB_CODE FROM ZB_MODEL T WHERE T.ZB_RISK_AREA = 2
+)
+CONNECT BY PRIOR I.ZB_CODE = I.ZB_PARENT_ID
+START WITH I.ZB_CODE IN
+(SELECT ZB_CODE FROM ZB_MODEL)
+ORDER BY I.ZB_CODE;
+ zb_code | cname | zbname
+---------+-------+--------
+(0 rows)
+
+DROP TABLE zb_layer;
+DROP TABLE rtms_dict;
+DROP TABLE zb_model;
diff --git a/src/test/regress/expected/sw_icbc.out b/src/test/regress/expected/sw_icbc.out
index 39e4baf9e..258ecb39e 100755
--- a/src/test/regress/expected/sw_icbc.out
+++ b/src/test/regress/expected/sw_icbc.out
@@ -425,53 +425,88 @@ START WITH tt.id = 1;
(7 rows)
--test correlated sublink in targetlist
-explain select b.id, (select count(a.id) from t1 a where a.pid = b.id) c from t1 b
+explain (costs off) select b.id, (select count(a.id) from t1 a where a.pid = b.id) c from t1 b
start with b.id=1 connect by prior b.id = b.pid;
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- CTE Scan on tmp_reuslt (cost=18.22..122.64 rows=91 width=4)
+ QUERY PLAN
+--------------------------------------------------------------
+ CTE Scan on tmp_reuslt
CTE tmp_reuslt
- -> StartWith Operator (cost=0.00..18.22 rows=91 width=10)
+ -> StartWith Operator
Start With pseudo atts: RUITR, array_key_1
- -> Recursive Union (cost=0.00..18.22 rows=91 width=10)
- -> Seq Scan on t1 b (cost=0.00..1.11 rows=1 width=10)
+ -> Recursive Union
+ -> Seq Scan on t1 b
Filter: (id = 1)
- -> Hash Join (cost=0.33..1.53 rows=9 width=10)
+ -> Hash Join
Hash Cond: (b.pid = tmp_reuslt."b@id")
- -> Seq Scan on t1 b (cost=0.00..1.09 rows=9 width=10)
- -> Hash (cost=0.20..0.20 rows=10 width=4)
- -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4)
+ -> Seq Scan on t1 b
+ -> Hash
+ -> WorkTable Scan on tmp_reuslt
SubPlan 2
- -> Aggregate (cost=1.12..1.13 rows=1 width=12)
- -> Seq Scan on t1 a (cost=0.00..1.11 rows=2 width=4)
+ -> Aggregate
+ -> Seq Scan on t1 a
Filter: (pid = tmp_reuslt."b@id")
(16 rows)
-explain select * from t1 as test
+explain (costs off) select * from t1 as test
where not exists (select 1 from t1 where test.id = t1.id)
start with test.id = 1 connect by prior test.id = test.pid;
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- CTE Scan on tmp_reuslt (cost=18.22..121.28 rows=46 width=40)
+ QUERY PLAN
+--------------------------------------------------------------------
+ CTE Scan on tmp_reuslt
Filter: (NOT (alternatives: SubPlan 2 or hashed SubPlan 3))
CTE tmp_reuslt
- -> StartWith Operator (cost=0.00..18.22 rows=91 width=10)
+ -> StartWith Operator
Start With pseudo atts: RUITR, array_key_1
- -> Recursive Union (cost=0.00..18.22 rows=91 width=10)
- -> Seq Scan on t1 test (cost=0.00..1.11 rows=1 width=10)
+ -> Recursive Union
+ -> Seq Scan on t1 test
Filter: (id = 1)
- -> Hash Join (cost=0.33..1.53 rows=9 width=10)
+ -> Hash Join
Hash Cond: (test.pid = tmp_reuslt."test@id")
- -> Seq Scan on t1 test (cost=0.00..1.09 rows=9 width=10)
- -> Hash (cost=0.20..0.20 rows=10 width=4)
- -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4)
+ -> Seq Scan on t1 test
+ -> Hash
+ -> WorkTable Scan on tmp_reuslt
SubPlan 2
- -> Seq Scan on t1 (cost=0.00..1.11 rows=1 width=0)
+ -> Seq Scan on t1
Filter: (tmp_reuslt."test@id" = id)
SubPlan 3
- -> Seq Scan on t1 (cost=0.00..1.09 rows=9 width=4)
+ -> Seq Scan on t1
(18 rows)
+--test start with in correlated sublink
+explain (costs off) select * from t1 where t1.name = 'test' and exists(select * from t2 where t1.id = id start with name = 'test' connect by prior id = pid);
+ QUERY PLAN
+-------------------------------------------------------------------------------------
+ Seq Scan on t1
+ Filter: ((name = 'test'::text) AND (alternatives: SubPlan 2 or hashed SubPlan 4))
+ SubPlan 2
+ -> CTE Scan on tmp_reuslt
+ Filter: (t1.id = "t2@id")
+ CTE tmp_reuslt
+ -> StartWith Operator
+ Start With pseudo atts: RUITR, array_key_1
+ -> Recursive Union
+ -> Seq Scan on t2
+ Filter: (name = 'test'::text)
+ -> Hash Join
+ Hash Cond: (swtest.t2.pid = tmp_reuslt."t2@id")
+ -> Seq Scan on t2
+ -> Hash
+ -> WorkTable Scan on tmp_reuslt
+ SubPlan 4
+ -> CTE Scan on tmp_reuslt
+ CTE tmp_reuslt
+ -> StartWith Operator
+ Start With pseudo atts: RUITR, array_key_1
+ -> Recursive Union
+ -> Seq Scan on t2
+ Filter: (name = 'test'::text)
+ -> Hash Join
+ Hash Cond: (swtest.t2.pid = tmp_reuslt."t2@id")
+ -> Seq Scan on t2
+ -> Hash
+ -> WorkTable Scan on tmp_reuslt
+(29 rows)
+
--multiple tables case
explain (costs off) select * from t1, t2 where t1.id = t2.id start with t1.id = t2.id and t1.id = 1 connect by prior t1.id = t1.pid;
QUERY PLAN
diff --git a/src/test/regress/expected/test_auto_increment.out b/src/test/regress/expected/test_auto_increment.out
index 51ad1d667..32038196c 100644
--- a/src/test/regress/expected/test_auto_increment.out
+++ b/src/test/regress/expected/test_auto_increment.out
@@ -43,10 +43,10 @@ DROP TABLE test_create_autoinc;
CREATE TABLE test_create_autoinc(
a int AUTO_INCREMENT UNIQUE KEY,
b varchar(32)
-); -- ERROR
-ERROR: syntax error at or near "KEY"
-LINE 2: a int AUTO_INCREMENT UNIQUE KEY,
- ^
+);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_a_seq" for serial column "test_create_autoinc.a"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_create_autoinc_a_key" for table "test_create_autoinc"
+DROP TABLE test_create_autoinc;
CREATE TABLE test_create_autoinc(
a int AUTO_INCREMENT UNIQUE,
b varchar(32)
@@ -132,6 +132,12 @@ ERROR: multiple default values specified for column "id" of table "test_create_
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key GENERATED ALWAYS AS (a+1) STORED, name varchar(200),a int);
NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
ERROR: both default and generation expression specified for column "id" of table "test_create_autoinc_err"
+CREATE TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int GENERATED ALWAYS AS (id+1) STORED);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+ERROR: generated column cannot refer to auto_increment column
+CREATE TABLE test_create_autoinc_err(id int GENERATED ALWAYS AS (a+1) STORED, name varchar(200),a int auto_increment primary key);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_a_seq" for serial column "test_create_autoinc_err.a"
+ERROR: generated column cannot refer to auto_increment column
--auto_increment value error
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int) auto_increment=-1;
ERROR: syntax error at or near "-"
@@ -161,25 +167,25 @@ CREATE TEMPORARY TABLE test_create_autoinc_err(id int auto_increment primary key
ERROR: invalid input syntax for type int16: "1.1"
DETAIL: text contain invalid character
-- datatype error
-CREATE TABLE test_create_autoinc_err(id SERIAL auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id SERIAL auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id DECIMAL(10,4) auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id DECIMAL(10,4) auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id NUMERIC(10,4) auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id NUMERIC(10,4) auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id text auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id text auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id oid auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id oid auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id int[] auto_increment primary key, name varchar(200),a int);
-NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err_id_seq" for serial column "test_create_autoinc_err.id"
+CREATE TABLE test_create_autoinc_err1(id int[] auto_increment primary key, name varchar(200),a int);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_err1_id_seq" for serial column "test_create_autoinc_err1.id"
ERROR: The datatype of column 'id' does not support auto_increment
-CREATE TABLE test_create_autoinc_err(id int16 auto_increment, name varchar(200),a int, unique(id)) auto_increment=170141183460469231731687303715884105727;
+CREATE TABLE test_create_autoinc_err1(id int16 auto_increment, name varchar(200),a int, unique(id)) auto_increment=170141183460469231731687303715884105727;
ERROR: It's not supported to create int16 column
-- table type error
CREATE TABLE test_create_autoinc_err(id INTEGER auto_increment, name varchar(200),a int, primary key(id)) with (ORIENTATION=column);
@@ -448,6 +454,9 @@ ERROR: Incorrect table definition, there can be only one auto_increment column
CREATE TABLE test_create_autoinc_like_err(LIKE test_create_autoinc_source INCLUDING INDEXES) with (ORIENTATION=column);
ERROR: Un-supported feature
DETAIL: Orientation type column is not supported for auto_increment
+CREATE TABLE test_create_autoinc_like_err(LIKE test_create_autoinc_source INCLUDING INDEXES, a int GENERATED ALWAYS AS (id+1) STORED);
+NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_like_err_id_seq" for serial column "test_create_autoinc_like_err.id"
+ERROR: generated column cannot refer to auto_increment column
--row table
CREATE TABLE test_create_autoinc_like(LIKE test_create_autoinc_source INCLUDING INDEXES);
NOTICE: CREATE TABLE will create implicit sequence "test_create_autoinc_like_id_seq" for serial column "test_create_autoinc_like.id"
@@ -561,10 +570,22 @@ ALTER TABLE test_alter_autoinc_col ADD COLUMN id int AUTO_INCREMENT primary key;
ERROR: Un-supported feature
DETAIL: Orientation type column is not supported for auto_increment
DROP TABLE test_alter_autoinc_col;
+-- auto_increment and generated column
+CREATE TABLE test_alter_autoinc(col int);
+ALTER TABLE test_alter_autoinc ADD COLUMN a int GENERATED ALWAYS AS (b+1), ADD COLUMN b int auto_increment primary key;
+NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_b_seq" for serial column "test_alter_autoinc.b"
+ERROR: column "b" does not exist
+ALTER TABLE test_alter_autoinc ADD COLUMN a int auto_increment primary key, ADD COLUMN b int GENERATED ALWAYS AS (a+1);
+NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_a_seq" for serial column "test_alter_autoinc.a"
+ERROR: generated column cannot refer to auto_increment column
+DROP TABLE test_alter_autoinc;
--astore with data
CREATE TABLE test_alter_autoinc(col int);
INSERT INTO test_alter_autoinc VALUES(1);
INSERT INTO test_alter_autoinc VALUES(2);
+ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT; -- ERROR
+NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_id_seq" for serial column "test_alter_autoinc.id"
+ERROR: auto_increment column must be defined as a unique or primary key
ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT primary key;
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_id_seq" for serial column "test_alter_autoinc.id"
NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_alter_autoinc_pkey" for table "test_alter_autoinc"
@@ -663,7 +684,7 @@ SELECT pg_catalog.pg_get_tabledef('test_alter_autoinc');
-- error
ALTER TABLE test_alter_autoinc ADD COLUMN new_id int AUTO_INCREMENT;
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_new_id_seq" for serial column "test_alter_autoinc.new_id"
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+ERROR: Incorrect column definition, there can be only one auto_increment column
ALTER TABLE test_alter_autoinc ADD COLUMN new_id int AUTO_INCREMENT UNIQUE;
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_new_id_seq" for serial column "test_alter_autoinc.new_id"
ERROR: Incorrect column definition, there can be only one auto_increment column
@@ -673,7 +694,7 @@ ALTER TABLE test_alter_autoinc DROP COLUMN id, ADD new_id NUMERIC(10,4) AUTO_INC
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_new_id_seq" for serial column "test_alter_autoinc.new_id"
ERROR: The datatype of column 'new_id' does not support auto_increment
ALTER TABLE test_alter_autoinc DROP CONSTRAINT test_alter_autoinc_pkey;
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+ERROR: auto_increment column must be defined as a unique or primary key
ALTER TABLE test_alter_autoinc auto_increment=-1;
ERROR: syntax error at or near "-"
LINE 1: ALTER TABLE test_alter_autoinc auto_increment=-1;
@@ -684,9 +705,6 @@ DETAIL: text exceeds the length of int16
ALTER TABLE test_alter_autoinc auto_increment=1.1;
ERROR: invalid input syntax for type int16: "1.1"
DETAIL: text contain invalid character
-ALTER TABLE test_alter_autoinc MODIFY id BIGINT;
-ERROR: Un-supported feature
-DETAIL: auto-increment column cannot be modified.
ALTER LARGE SEQUENCE test_alter_autoinc_id_seq1 RESTART;
ERROR: cannot alter sequence owned by auto_increment column
ALTER LARGE SEQUENCE test_alter_autoinc_id_seq1 maxvalue 90;
@@ -747,15 +765,66 @@ INSERT INTO test_alter_autoinc VALUES(2);
ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT NULL UNIQUE;
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_id_seq" for serial column "test_alter_autoinc.id"
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_alter_autoinc_id_key" for table "test_alter_autoinc"
-INSERT INTO test_alter_autoinc VALUES(3,0);
SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
id | col
----+-----
- 1 | 3
| 1
| 2
+(2 rows)
+
+INSERT INTO test_alter_autoinc VALUES(3,NULL);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+ id | col
+----+-----
+ | 1
+ | 2
+ | 3
(3 rows)
+INSERT INTO test_alter_autoinc VALUES(4,0);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+ id | col
+----+-----
+ 2 | 4
+ | 1
+ | 2
+ | 3
+(4 rows)
+
+DROP TABLE test_alter_autoinc;
+--test alter table add NULL AUTO_INCREMENT UNIQUE
+CREATE TABLE test_alter_autoinc(col int);
+INSERT INTO test_alter_autoinc VALUES(1);
+INSERT INTO test_alter_autoinc VALUES(2);
+ALTER TABLE test_alter_autoinc ADD COLUMN id int NULL AUTO_INCREMENT UNIQUE;
+NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_id_seq" for serial column "test_alter_autoinc.id"
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_alter_autoinc_id_key" for table "test_alter_autoinc"
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+ id | col
+----+-----
+ 1 | 1
+ 2 | 2
+(2 rows)
+
+INSERT INTO test_alter_autoinc VALUES(3,NULL);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+ id | col
+----+-----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+INSERT INTO test_alter_autoinc VALUES(4,0);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+ id | col
+----+-----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
DROP TABLE test_alter_autoinc;
--local temp table with data
CREATE TEMPORARY TABLE test_alter_autoinc_ltemp(col int);
@@ -766,7 +835,7 @@ NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_alter_au
SELECT LAST_INSERT_ID();
last_insert_id
----------------
- 1
+ 4
(1 row)
SELECT id, col FROM test_alter_autoinc_ltemp ORDER BY 1, 2;
@@ -975,7 +1044,7 @@ ERROR: cannot alter auto_increment column "id" default
ALTER TABLE test_alter_autoinc_ltemp DROP COLUMN id, ADD new_id NUMERIC(10,4) AUTO_INCREMENT PRIMARY KEY;
ERROR: must have at least one column
ALTER TABLE test_alter_autoinc_ltemp DROP CONSTRAINT test_alter_autoinc_ltemp_u1;
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+ERROR: auto_increment column must be defined as a unique or primary key
ALTER TABLE test_alter_autoinc_ltemp auto_increment=-1;
ERROR: syntax error at or near "-"
LINE 1: ALTER TABLE test_alter_autoinc_ltemp auto_increment=-1;
@@ -986,9 +1055,6 @@ DETAIL: text exceeds the length of int16
ALTER TABLE test_alter_autoinc_ltemp auto_increment=1.1;
ERROR: invalid input syntax for type int16: "1.1"
DETAIL: text contain invalid character
-ALTER TABLE test_alter_autoinc_ltemp MODIFY id BIGINT;
-ERROR: Un-supported feature
-DETAIL: auto-increment column cannot be modified.
DROP TABLE test_alter_autoinc_ltemp;
--global temp table with data
CREATE GLOBAL TEMPORARY TABLE test_alter_autoinc_gtemp(col int);
@@ -1080,9 +1146,10 @@ CREATE TABLE test_alter_autoinc(
a int,
b varchar(32)
);
-ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE ((seq + 1), seq); -- ERROR
+ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE (a, seq); -- ERROR
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_seq_seq" for serial column "test_alter_autoinc.seq"
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_alter_autoinc_uk" for table "test_alter_autoinc"
+ERROR: auto_increment column must be defined as a unique or primary key
ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE (seq);
NOTICE: ALTER TABLE will create implicit sequence "test_alter_autoinc_seq_seq" for serial column "test_alter_autoinc.seq"
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "test_alter_autoinc_uk" for table "test_alter_autoinc"
@@ -1102,12 +1169,12 @@ SELECT pg_get_tabledef('test_alter_autoinc'::regclass);
(1 row)
ALTER TABLE test_alter_autoinc DROP CONSTRAINT test_alter_autoinc_uk; -- ERROR
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+ERROR: auto_increment column must be defined as a unique or primary key
DROP INDEX test_alter_autoinc_idx1;
ALTER TABLE test_alter_autoinc DROP CONSTRAINT test_alter_autoinc_uk, ADD CONSTRAINT test_alter_autoinc_pk PRIMARY KEY (seq);
NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "test_alter_autoinc_pk" for table "test_alter_autoinc"
ALTER TABLE test_alter_autoinc DROP CONSTRAINT test_alter_autoinc_pk; -- ERROR
-ERROR: Incorrect table definition, auto_increment column must be defined as a key
+ERROR: auto_increment column must be defined as a unique or primary key
DROP TABLE test_alter_autoinc;
-- auto_increment in table with single column PRIMARY KEY
CREATE TABLE single_autoinc_pk(col int auto_increment PRIMARY KEY) AUTO_INCREMENT = 10;
@@ -1386,6 +1453,142 @@ SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
WITH (orientation=row, compression=no);
(1 row)
+DROP TABLE single_autoinc_uk;
+-- auto_increment in table with single column NULL auto_increment UNIQUE
+CREATE TABLE single_autoinc_uk(col int NULL auto_increment UNIQUE KEY) AUTO_INCREMENT = 10;
+NOTICE: CREATE TABLE will create implicit sequence "single_autoinc_uk_col_seq" for serial column "single_autoinc_uk.col"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "single_autoinc_uk_col_key" for table "single_autoinc_uk"
+INSERT INTO single_autoinc_uk VALUES(NULL);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 10
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+(1 row)
+
+INSERT INTO single_autoinc_uk VALUES(1 - 1);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 11
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+(2 rows)
+
+INSERT INTO single_autoinc_uk VALUES(100);
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+ 100
+(3 rows)
+
+INSERT INTO single_autoinc_uk VALUES(DEFAULT);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 101
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+ 100
+ 101
+(4 rows)
+
+SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
+ pg_get_tabledef
+-------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE single_autoinc_uk ( +
+ col integer AUTO_INCREMENT NOT NULL, +
+ CONSTRAINT single_autoinc_uk_col_key UNIQUE (col)+
+ ) AUTO_INCREMENT = 102 +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+DROP TABLE single_autoinc_uk;
+-- auto_increment in table with single column auto_increment UNIQUE
+CREATE TABLE single_autoinc_uk(col int auto_increment UNIQUE KEY) AUTO_INCREMENT = 10;
+NOTICE: CREATE TABLE will create implicit sequence "single_autoinc_uk_col_seq" for serial column "single_autoinc_uk.col"
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "single_autoinc_uk_col_key" for table "single_autoinc_uk"
+INSERT INTO single_autoinc_uk VALUES(NULL);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 10
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+(1 row)
+
+INSERT INTO single_autoinc_uk VALUES(1 - 1);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 11
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+(2 rows)
+
+INSERT INTO single_autoinc_uk VALUES(100);
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+ 100
+(3 rows)
+
+INSERT INTO single_autoinc_uk VALUES(DEFAULT);
+SELECT LAST_INSERT_ID();
+ last_insert_id
+----------------
+ 101
+(1 row)
+
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+ col
+-----
+ 10
+ 11
+ 100
+ 101
+(4 rows)
+
+SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
+ pg_get_tabledef
+-------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE single_autoinc_uk ( +
+ col integer AUTO_INCREMENT NOT NULL, +
+ CONSTRAINT single_autoinc_uk_col_key UNIQUE (col)+
+ ) AUTO_INCREMENT = 102 +
+ WITH (orientation=row, compression=no);
+(1 row)
+
DROP TABLE single_autoinc_uk;
-- test auto_increment with rollback
CREATE TABLE single_autoinc_rollback(col int auto_increment PRIMARY KEY) AUTO_INCREMENT = 10;
@@ -5154,4 +5357,5 @@ SELECT col1,col2 FROM test_autoinc_batch_copy ORDER BY 1;
drop table test_autoinc_batch_copy;
\c regression
+clean connection to all force for database autoinc_b_db;
drop database if exists autoinc_b_db;
diff --git a/src/test/regress/expected/test_b_format_collate.out b/src/test/regress/expected/test_b_format_collate.out
new file mode 100644
index 000000000..708bdd196
--- /dev/null
+++ b/src/test/regress/expected/test_b_format_collate.out
@@ -0,0 +1,1773 @@
+create database test_collate_A dbcompatibility = 'A';
+create database test_collate_B dbcompatibility = 'B';
+\c test_collate_A
+-- test A format
+select 'abCdEf' = 'abcdef' collate "utf8mb4_general_ci";
+ERROR: Un-support feature
+LINE 1: select 'abCdEf' = 'abcdef' collate "utf8mb4_general_ci";
+ ^
+DETAIL: this collation only support in B-format database
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_general_ci";
+ERROR: Un-support feature
+LINE 1: select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_gene...
+ ^
+DETAIL: this collation only support in B-format database
+select 'abCdEf' = 'abcdef' collate "utf8mb4_unicode_ci";
+ERROR: Un-support feature
+LINE 1: select 'abCdEf' = 'abcdef' collate "utf8mb4_unicode_ci";
+ ^
+DETAIL: this collation only support in B-format database
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_unicode_ci";
+ERROR: Un-support feature
+LINE 1: select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_unic...
+ ^
+DETAIL: this collation only support in B-format database
+select 'abCdEf' = 'abcdef' collate "utf8mb4_bin";
+ERROR: Un-support feature
+LINE 1: select 'abCdEf' = 'abcdef' collate "utf8mb4_bin";
+ ^
+DETAIL: this collation only support in B-format database
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_bin";
+ERROR: Un-support feature
+LINE 1: select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_bin"...
+ ^
+DETAIL: this collation only support in B-format database
+drop table if exists t1;
+NOTICE: table "t1" does not exist, skipping
+create table t1(a varchar(10) collate "utf8mb4_general_ci");
+ERROR: Un-support feature
+LINE 1: create table t1(a varchar(10) collate "utf8mb4_general_ci");
+ ^
+DETAIL: this collation only support in B-format database
+drop table if exists t1;
+NOTICE: table "t1" does not exist, skipping
+create table t1(a text);
+create index idx_1 on t1(a collate "utf8mb4_unicode_ci");
+ERROR: Un-support feature
+DETAIL: this collation only support in B-format database
+create unique index idx_2 on t1(a collate "utf8mb4_unicode_ci");
+ERROR: Un-support feature
+DETAIL: this collation only support in B-format database
+-- test binary
+drop table if exists t1;
+create table t1(a blob collate binary);
+ERROR: collation "binary" for encoding "UTF8" does not exist
+LINE 1: create table t1(a blob collate binary);
+ ^
+create table t1(a blob collate utf8mb4_bin);
+ERROR: Un-support feature
+LINE 1: create table t1(a blob collate utf8mb4_bin);
+ ^
+DETAIL: this collation only support in B-format database
+create table t1(a blob);
+-- test B format
+\c test_collate_B
+-- test create table/alter table
+drop table if exists t_collate;
+NOTICE: table "t_collate" does not exist, skipping
+create table t_collate(id int, f1 text collate "utf8mb4_general_ci");
+alter table t_collate add column f2 text collate "utf8mb4_unicode_ci",add column f3 varchar collate "utf8mb4_general_ci";
+alter table t_collate alter f1 type text collate "utf8mb4_bin";
+\d+ t_collate
+ Table "public.t_collate"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-------------------+----------------------------+----------+--------------+-------------
+ id | integer | | plain | |
+ f1 | text | collate utf8mb4_bin | extended | |
+ f2 | text | collate utf8mb4_unicode_ci | extended | |
+ f3 | character varying | collate utf8mb4_general_ci | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+-- test create index
+insert into t_collate select generate_series(1,1000), repeat(chr(int4(random()*26)+65),4),repeat(chr(int4(random()*26)+97),4),repeat(chr(int4(random()*26)+97),4);
+create index idx_f1_default on t_collate(f3);
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Bitmap Heap Scan on public.t_collate
+ Output: id, f1, f2, f3
+ Recheck Cond: ((t_collate.f3)::text = ANY ('{aaaa,bbbb}'::text[]))
+ -> Bitmap Index Scan on idx_f1_default
+ Index Cond: ((t_collate.f3)::text = ANY ('{aaaa,bbbb}'::text[]))
+(5 rows)
+
+drop index if exists idx_f1_default;
+create index idx_f1_utf8mb4 on t_collate(f3 collate "utf8mb4_general_ci");
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Bitmap Heap Scan on public.t_collate
+ Output: id, f1, f2, f3
+ Recheck Cond: ((t_collate.f3)::text = ANY ('{aaaa,bbbb}'::text[]))
+ -> Bitmap Index Scan on idx_f1_utf8mb4
+ Index Cond: ((t_collate.f3)::text = ANY ('{aaaa,bbbb}'::text[]))
+(5 rows)
+
+drop index if exists idx_f1_utf8mb4;
+create index idx_f1_C on t_collate(f3 collate "C");
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+ QUERY PLAN
+----------------------------------------------------------------
+ Seq Scan on public.t_collate
+ Output: id, f1, f2, f3
+ Filter: ((t_collate.f3)::text = ANY ('{aaaa,bbbb}'::text[]))
+(3 rows)
+
+drop index if exists idx_f1_C;
+drop table if exists t_collate;
+--test unique/primary key
+drop table if exists t_uft8_general_text;
+NOTICE: table "t_uft8_general_text" does not exist, skipping
+create table t_uft8_general_text(f1 text unique collate "utf8mb4_general_ci");
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t_uft8_general_text_f1_key" for table "t_uft8_general_text"
+insert into t_uft8_general_text values('S');
+insert into t_uft8_general_text values('s'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_text_f1_key"
+DETAIL: Key (f1)=(s) already exists.
+insert into t_uft8_general_text values('ś'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_text_f1_key"
+DETAIL: Key (f1)=(ś) already exists.
+insert into t_uft8_general_text values('Š'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_text_f1_key"
+DETAIL: Key (f1)=(Š) already exists.
+drop table if exists t_uft8_general_char;
+NOTICE: table "t_uft8_general_char" does not exist, skipping
+create table t_uft8_general_char(f2 char(10) primary key collate "utf8mb4_general_ci");
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_uft8_general_char_pkey" for table "t_uft8_general_char"
+insert into t_uft8_general_char values('S');
+insert into t_uft8_general_char values('s'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_char_pkey"
+DETAIL: Key (f2)=(s ) already exists.
+insert into t_uft8_general_char values('ś'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_char_pkey"
+DETAIL: Key (f2)=(ś ) already exists.
+insert into t_uft8_general_char values('Š'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_general_char_pkey"
+DETAIL: Key (f2)=(Š ) already exists.
+drop table if exists t_uft8_unicode_text;
+NOTICE: table "t_uft8_unicode_text" does not exist, skipping
+create table t_uft8_unicode_text(f1 text unique collate "utf8mb4_unicode_ci");
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "t_uft8_unicode_text_f1_key" for table "t_uft8_unicode_text"
+insert into t_uft8_unicode_text values('S');
+insert into t_uft8_unicode_text values('s'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_text_f1_key"
+DETAIL: Key (f1)=(s) already exists.
+insert into t_uft8_unicode_text values('ś'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_text_f1_key"
+DETAIL: Key (f1)=(ś) already exists.
+insert into t_uft8_unicode_text values('Š'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_text_f1_key"
+DETAIL: Key (f1)=(Š) already exists.
+drop table if exists t_uft8_unicode_char;
+NOTICE: table "t_uft8_unicode_char" does not exist, skipping
+create table t_uft8_unicode_char(f2 char(10) primary key collate "utf8mb4_unicode_ci");
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_uft8_unicode_char_pkey" for table "t_uft8_unicode_char"
+insert into t_uft8_unicode_char values('S');
+insert into t_uft8_unicode_char values('s'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_char_pkey"
+DETAIL: Key (f2)=(s ) already exists.
+insert into t_uft8_unicode_char values('ś'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_char_pkey"
+DETAIL: Key (f2)=(ś ) already exists.
+insert into t_uft8_unicode_char values('Š'); -- fail
+ERROR: duplicate key value violates unique constraint "t_uft8_unicode_char_pkey"
+DETAIL: Key (f2)=(Š ) already exists.
+--
+-- test collate utf8mb4_general_ci
+--
+-- test collation used in expr
+select 'abCdEf' = 'abcdef' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf' != 'abcdef' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' > 'abcdef' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' < 'abcdef' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::character varying = 'abcdef'::character varying collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf'::clob = 'abcdef'::clob collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf'::bpchar = 'abcdef'::bpchar collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf'::char(10) = 'abcdef'::char(10);
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::char(10) = 'abcdef'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abcdefg'::char(10) = 'abcdef'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::char(10) != 'abcdef'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::char(10) > 'abcdef'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::char(10) < 'abcdef'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::nchar(10) = 'abcdef'::nchar(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abcdefg'::nchar(10) = 'abcdef'::nchar(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf'::character(10) = 'abcdef'::character(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abcdefg'::character(10) = 'abcdef'::character(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'ś' = 'Š' collate "utf8mb4_general_ci" , 'Š' = 's' collate "utf8mb4_general_ci";
+ ?column? | ?column?
+----------+----------
+ t | t
+(1 row)
+
+select 'ś' != 'Š' collate "utf8mb4_general_ci", 'Š' != 's' collate "utf8mb4_general_ci";
+ ?column? | ?column?
+----------+----------
+ f | f
+(1 row)
+
+select 'ŠSśs' = 'ssss' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::character varying = 'ssss'::character varying collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::clob = 'ssss'::clob collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::bpchar = 'ssss'::bpchar collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 's'::char(3) = 'Š'::char(3) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::char = 'ssss'::char collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::char(10) = 'ssss'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::nchar(10) = 'ssss'::nchar(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::character(10) = 'ssss'::character(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+-- compare between different types, expected success
+select 'ŠSśs'::character(10) = 'ssss'::varchar collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::clob = 'ssss'::char(10) collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+-- compare str with different collation, expected fail
+select 'abCdEf' collate "utf8mb4_general_ci" = 'abcdef' collate "utf8mb4_general_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "utf8mb4_general_ci";
+ERROR: collation mismatch between explicit collations "utf8mb4_bin" and "utf8mb4_general_ci"
+LINE 1: select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "u...
+ ^
+select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "C";
+ERROR: collation mismatch between explicit collations "utf8mb4_bin" and "C"
+LINE 1: select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "C...
+ ^
+-- types not support collation, expected fail
+select 100 > 50 collate "utf8mb4_general_ci";
+ERROR: collations are not supported by type integer
+LINE 1: select 100 > 50 collate "utf8mb4_general_ci";
+ ^
+select '0'::bool = '1'::bool collate "utf8mb4_general_ci";
+ERROR: collations are not supported by type boolean
+LINE 1: select '0'::bool = '1'::bool collate "utf8mb4_general_ci";
+ ^
+select '100'::money > '50'::money collate "utf8mb4_general_ci";
+ERROR: collations are not supported by type money
+LINE 1: select '100'::money > '50'::money collate "utf8mb4_general_c...
+ ^
+select '00:00:02'::time > '00:00:01'::time collate "utf8mb4_general_ci";
+ERROR: collations are not supported by type time without time zone
+LINE 1: select '00:00:02'::time > '00:00:01'::time collate "utf8mb4_...
+ ^
+-- test column collation
+drop table if exists column_collate;
+NOTICE: table "column_collate" does not exist, skipping
+create table column_collate(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into column_collate values('S','S'),('s','s'),('ś','ś'),('Š','Š'),('z','z'),('Z','Z'),('c','c'),('A','A'),('C','C');
+insert into column_collate values('AaA','AaA'),('bb','bb'),('aAA','aAA'),('Bb','Bb'),('dD','dd'),('Cc','Cc'),('AAA','AAA');
+insert into column_collate values('A1中文','A1中文'), ('b1中文','b1中文'), ('a2中文','a2中文'),
+('B2中文','B2中文'), ('中文d1','中文d1'), ('中文C1','中文C1'), ('中文A3','中文A3');
+-- test where clause
+select f1 from column_collate where f1 = 's';
+ f1
+----
+ S
+ s
+ ś
+ Š
+(4 rows)
+
+select f1 from column_collate where f1 = 'aaa';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f2 from column_collate where f2 = 's';
+ f2
+-----------------
+ S
+ s
+ ś
+ Š
+(4 rows)
+
+select f2 from column_collate where f2 = 'aaa';
+ f2
+-----------------
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+-- test order by clause
+select f1 from column_collate order by f1;
+ f1
+--------
+ A
+ A1中文
+ a2中文
+ AaA
+ AAA
+ aAA
+ b1中文
+ B2中文
+ Bb
+ bb
+ c
+ C
+ Cc
+ dD
+ S
+ s
+ ś
+ Š
+ Z
+ z
+ 中文A3
+ 中文C1
+ 中文d1
+(23 rows)
+
+select f2 from column_collate order by f2;
+ f2
+-----------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AAA
+ aAA
+ b1中文
+ B2中文
+ Bb
+ bb
+ c
+ C
+ Cc
+ dd
+ S
+ s
+ ś
+ Š
+ Z
+ z
+ 中文A3
+ 中文C1
+ 中文d1
+(23 rows)
+
+-- test distinct clause
+insert into column_collate values ('AbcdEf','AbcdEf'), ('abcdEF','abcdEF'), ('中文AbCdEFG','中文AbCdEFG'),
+('中文abcdEFG','中文abcdEFG'), ('中文Ab','中文Ab'), ('中文ab','中文ab');
+select distinct f1 from column_collate;
+ f1
+-------------
+ a2中文
+ Cc
+ dD
+ 中文A3
+ B2中文
+ A
+ c
+ AbcdEf
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ b1中文
+ bb
+ S
+ 中文d1
+ z
+ A1中文
+ AaA
+(18 rows)
+
+select distinct f2 from column_collate;
+ f2
+-----------------
+ a2中文
+ Cc
+ dd
+ 中文A3
+ B2中文
+ A
+ c
+ AbcdEf
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ b1中文
+ bb
+ S
+ 中文d1
+ z
+ A1中文
+ AaA
+(18 rows)
+
+explain (verbose, costs off) select distinct (f1) from column_collate order by f1;
+ QUERY PLAN
+----------------------------------------------------------
+ Sort
+ Output: f1
+ Sort Key: column_collate.f1 COLLATE utf8mb4_general_ci
+ -> HashAggregate
+ Output: f1
+ Group By Key: column_collate.f1
+ -> Seq Scan on public.column_collate
+ Output: f1
+(8 rows)
+
+select distinct f1 from column_collate order by f1;
+ f1
+-------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AbcdEf
+ b1中文
+ B2中文
+ bb
+ c
+ Cc
+ dD
+ S
+ z
+ 中文A3
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+select distinct f2 from column_collate order by f2;
+ f2
+-----------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AbcdEf
+ b1中文
+ B2中文
+ bb
+ c
+ Cc
+ dd
+ S
+ z
+ 中文A3
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+--test unique node
+analyze column_collate;
+explain (verbose, costs off) select distinct (f1) from column_collate order by f1;
+ QUERY PLAN
+----------------------------------------------------------------
+ Unique
+ Output: f1
+ -> Sort
+ Output: f1
+ Sort Key: column_collate.f1 COLLATE utf8mb4_general_ci
+ -> Seq Scan on public.column_collate
+ Output: f1
+(7 rows)
+
+select distinct f1 from column_collate order by f1;
+ f1
+-------------
+ A
+ A1中文
+ a2中文
+ AAA
+ abcdEF
+ b1中文
+ B2中文
+ bb
+ C
+ Cc
+ dD
+ S
+ Z
+ 中文A3
+ 中文ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+select distinct f2 from column_collate order by f2;
+ f2
+-----------------
+ A
+ A1中文
+ a2中文
+ AAA
+ abcdEF
+ b1中文
+ B2中文
+ bb
+ C
+ Cc
+ dd
+ S
+ Z
+ 中文A3
+ 中文ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+-- test group by
+select count(f1),f1 from column_collate group by f1;
+ count | f1
+-------+-------------
+ 1 | Cc
+ 1 | A1中文
+ 1 | 中文C1
+ 1 | A
+ 1 | b1中文
+ 1 | a2中文
+ 2 | c
+ 1 | dD
+ 2 | AbcdEf
+ 2 | 中文Ab
+ 1 | 中文d1
+ 1 | 中文A3
+ 2 | bb
+ 1 | B2中文
+ 2 | z
+ 4 | S
+ 3 | AaA
+ 2 | 中文AbCdEFG
+(18 rows)
+
+select count(f2),f2 from column_collate group by f2;
+ count | f2
+-------+-----------------
+ 1 | Cc
+ 1 | A1中文
+ 1 | 中文C1
+ 1 | A
+ 1 | b1中文
+ 1 | a2中文
+ 2 | c
+ 1 | dd
+ 2 | AbcdEf
+ 2 | 中文Ab
+ 1 | 中文d1
+ 1 | 中文A3
+ 2 | bb
+ 1 | B2中文
+ 2 | z
+ 4 | S
+ 3 | AaA
+ 2 | 中文AbCdEFG
+(18 rows)
+
+-- test like
+select f1 from column_collate where f1 like 'A_%';
+ f1
+--------
+ AaA
+ aAA
+ AAA
+ A1中文
+ a2中文
+ AbcdEf
+ abcdEF
+(7 rows)
+
+select f1 from column_collate where f1 like '%s%';
+ f1
+----
+ S
+ s
+ ś
+ Š
+(4 rows)
+
+select f1 from column_collate where f1 like 'A%f';
+ f1
+--------
+ AbcdEf
+ abcdEF
+(2 rows)
+
+select f1 from column_collate where f1 like 'A__';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f1 from column_collate where f1 like '\A__';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f1 from column_collate where f1 like 'A%\'; -- error
+ERROR: LIKE pattern must not end with escape character
+select f1 from column_collate where f1 like 'A_\';-- error
+ERROR: LIKE pattern must not end with escape character
+select f2 from column_collate where f2 like 'A_%';
+ f2
+-----------------
+ A
+ AaA
+ aAA
+ AAA
+ A1中文
+ a2中文
+ AbcdEf
+ abcdEF
+(8 rows)
+
+select f2 from column_collate where f2 like 'A%\'; -- error
+ERROR: LIKE pattern must not end with escape character
+select f2 from column_collate where f2 like 'A_\';-- error
+ERROR: LIKE pattern must not end with escape character
+-- test notlike
+select f1 from column_collate where f1 not like 'A_%';
+ f1
+-------------
+ S
+ s
+ ś
+ Š
+ z
+ Z
+ c
+ A
+ C
+ bb
+ Bb
+ dD
+ Cc
+ b1中文
+ B2中文
+ 中文d1
+ 中文C1
+ 中文A3
+ 中文AbCdEFG
+ 中文abcdEFG
+ 中文Ab
+ 中文ab
+(22 rows)
+
+select f1 from column_collate where f1 not like '%s%';
+ f1
+-------------
+ z
+ Z
+ c
+ A
+ C
+ AaA
+ bb
+ aAA
+ Bb
+ dD
+ Cc
+ AAA
+ A1中文
+ b1中文
+ a2中文
+ B2中文
+ 中文d1
+ 中文C1
+ 中文A3
+ AbcdEf
+ abcdEF
+ 中文AbCdEFG
+ 中文abcdEFG
+ 中文Ab
+ 中文ab
+(25 rows)
+
+-- test hashjoin
+drop table if exists test_join1;
+NOTICE: table "test_join1" does not exist, skipping
+drop table if exists test_join2;
+NOTICE: table "test_join2" does not exist, skipping
+create table test_join1(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into test_join1 values('S','S'),('s','s'),('ś','ś'),('Š','Š');
+create table test_join2(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into test_join2 values('S','S');
+create table test_join3(f1 text collate "utf8mb4_unicode_ci", f2 char(15) collate "utf8mb4_unicode_ci");
+insert into test_join3 values('S','S');
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ QUERY PLAN
+------------------------------------------------
+ Hash Join
+ Output: tab1.f1, tab2.f1
+ Hash Cond: (tab1.f1 = tab2.f1)
+ -> Seq Scan on public.test_join1 tab1
+ Output: tab1.f1, tab1.f2
+ -> Hash
+ Output: tab2.f1
+ -> Seq Scan on public.test_join2 tab2
+ Output: tab2.f1
+(9 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ f1 | f1
+----+----
+ S | S
+ s | S
+ ś | S
+ Š | S
+(4 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin"
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+ERROR: syntax error at or near "select"
+LINE 2: select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 ...
+ ^
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+-- test nestloop
+set enable_hashjoin=off;
+set enable_nestloop=on;
+set enable_mergejoin=off;
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ QUERY PLAN
+------------------------------------------------
+ Nested Loop
+ Output: tab1.f1, tab2.f1
+ Join Filter: (tab1.f1 = tab2.f1)
+ -> Seq Scan on public.test_join1 tab1
+ Output: tab1.f1, tab1.f2
+ -> Materialize
+ Output: tab2.f1
+ -> Seq Scan on public.test_join2 tab2
+ Output: tab2.f1
+(9 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ f1 | f1
+----+----
+ S | S
+ s | S
+ ś | S
+ Š | S
+(4 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin"
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+ERROR: syntax error at or near "select"
+LINE 2: select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 ...
+ ^
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+-- test mergejoin
+set enable_hashjoin=off;
+set enable_nestloop=off;
+set enable_mergejoin=on;
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ QUERY PLAN
+------------------------------------------------------
+ Merge Join
+ Output: tab1.f1, tab2.f1
+ Merge Cond: (tab1.f1 = tab2.f1)
+ -> Sort
+ Output: tab1.f1
+ Sort Key: tab1.f1 COLLATE utf8mb4_general_ci
+ -> Seq Scan on public.test_join1 tab1
+ Output: tab1.f1
+ -> Sort
+ Output: tab2.f1
+ Sort Key: tab2.f1 COLLATE utf8mb4_general_ci
+ -> Seq Scan on public.test_join2 tab2
+ Output: tab2.f1
+(13 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+ f1 | f1
+----+----
+ S | S
+ s | S
+ ś | S
+ Š | S
+(4 rows)
+
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin";
+ f1 | f1
+----+----
+ S | S
+(1 row)
+
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+ f1 | f1
+----+----
+ S | S
+ s | S
+ ś | S
+ Š | S
+(4 rows)
+
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+ERROR: could not determine which collation to use for string comparison
+HINT: Use the COLLATE clause to set the collation explicitly.
+-- test union
+drop table if exists test_sep_option1;
+NOTICE: table "test_sep_option1" does not exist, skipping
+drop table if exists test_sep_option2;
+NOTICE: table "test_sep_option2" does not exist, skipping
+drop table if exists test_sep_option3;
+NOTICE: table "test_sep_option3" does not exist, skipping
+drop table if exists test_sep_option4;
+NOTICE: table "test_sep_option4" does not exist, skipping
+create table test_sep_option1(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option2(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option3(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+create table test_sep_option4(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+insert into test_sep_option1 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option2 values ('S','S');
+insert into test_sep_option3 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option4 values ('S','S');
+select * from test_sep_option1 union select * from test_sep_option2;
+ f1 | f2
+----+----
+ s | s
+(1 row)
+
+select * from test_sep_option3 union select * from test_sep_option4;
+ f1 | f2
+----+----
+ S | S
+ ś | ś
+ s | s
+ Š | Š
+(4 rows)
+
+select * from test_sep_option1 union select * from test_sep_option3; -- fail
+ERROR: collation mismatch between implicit collations "utf8mb4_general_ci" and "utf8mb4_bin"
+LINE 1: select * from test_sep_option1 union select * from test_sep_...
+ ^
+HINT: You can choose the collation by applying the COLLATE clause to one or both expressions.
+-- test setop
+drop table if exists test_sep_option1;
+drop table if exists test_sep_option2;
+drop table if exists test_sep_option3;
+drop table if exists test_sep_option4;
+create table test_sep_option1(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option2(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option3(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+create table test_sep_option4(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+insert into test_sep_option1 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option2 values ('S','S');
+insert into test_sep_option3 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option4 values ('S','S');
+-- test constraint
+drop table if exists test_primary_key;
+NOTICE: table "test_primary_key" does not exist, skipping
+create table test_primary_key(f1 text primary key collate "utf8mb4_general_ci");
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_primary_key_pkey" for table "test_primary_key"
+insert into test_primary_key values ('a');
+insert into test_primary_key values ('A'); -- fail
+ERROR: duplicate key value violates unique constraint "test_primary_key_pkey"
+DETAIL: Key (f1)=(A) already exists.
+drop table if exists test_unique;
+NOTICE: table "test_unique" does not exist, skipping
+create table test_unique(f1 text unique collate "utf8mb4_general_ci");
+NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_unique_f1_key" for table "test_unique"
+insert into test_unique values ('a');
+insert into test_unique values ('A'); -- fail
+ERROR: duplicate key value violates unique constraint "test_unique_f1_key"
+DETAIL: Key (f1)=(A) already exists.
+drop table if exists test_unique;
+create table test_unique(f1 text collate "utf8mb4_general_ci");
+insert into test_unique values('aaa'), ('AaA');
+create unique index u_idx_1 on test_unique(f1); -- fail
+ERROR: could not create unique index "u_idx_1"
+DETAIL: Key (f1)=(aaa) is duplicated.
+drop table if exists test_constraunt;
+NOTICE: table "test_constraunt" does not exist, skipping
+create table test_constraunt (f1 text);
+alter table test_constraunt add column f text collate "utf8mb4_general_ci"; --success
+--
+-- test ustore with collation utf8mb4_general_ci
+--
+drop table if exists ustore_column_collate;
+NOTICE: table "ustore_column_collate" does not exist, skipping
+create table ustore_column_collate(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci") with (storage_type=ustore);
+-- create table column_collate(f1 text collate "utf8mb4_unicode_ci", f2 char(10) collate "utf8mb4_unicode_ci");
+insert into ustore_column_collate values('S','S'),('s','s'),('ś','ś'),('Š','Š'),('z','z'),('Z','Z'),('c','c'),('A','A'),('C','C');
+insert into ustore_column_collate values('AaA','AaA'),('bb','bb'),('aAA','aAA'),('Bb','Bb'),('dD','dd'),('Cc','Cc'),('AAA','AAA');
+insert into ustore_column_collate values('A1中文','A1中文'), ('b1中文','b1中文'), ('a2中文','a2中文'),
+('B2中文','B2中文'), ('中文d1','中文d1'), ('中文C1','中文C1'), ('中文A3','中文A3');
+-- test where clause
+select f1 from ustore_column_collate where f1 = 'aaa';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f2 from ustore_column_collate where f2 = 'aaa';
+ f2
+-----------------
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+-- test order by clause
+select f1 from ustore_column_collate order by f1;
+ f1
+--------
+ A
+ A1中文
+ a2中文
+ AaA
+ AAA
+ aAA
+ b1中文
+ B2中文
+ Bb
+ bb
+ c
+ C
+ Cc
+ dD
+ S
+ s
+ ś
+ Š
+ Z
+ z
+ 中文A3
+ 中文C1
+ 中文d1
+(23 rows)
+
+select f2 from ustore_column_collate order by f2;
+ f2
+-----------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AAA
+ aAA
+ b1中文
+ B2中文
+ Bb
+ bb
+ c
+ C
+ Cc
+ dd
+ S
+ s
+ ś
+ Š
+ Z
+ z
+ 中文A3
+ 中文C1
+ 中文d1
+(23 rows)
+
+-- test distinct clause
+insert into ustore_column_collate values ('AbcdEf','AbcdEf'), ('abcdEF','abcdEF'), ('中文AbCdEFG','中文AbCdEFG'),
+('中文abcdEFG','中文abcdEFG'), ('中文Ab','中文Ab'), ('中文ab','中文ab');
+select distinct f1 from ustore_column_collate;
+ f1
+-------------
+ a2中文
+ Cc
+ dD
+ 中文A3
+ B2中文
+ A
+ c
+ AbcdEf
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ b1中文
+ bb
+ S
+ 中文d1
+ z
+ A1中文
+ AaA
+(18 rows)
+
+select distinct f2 from ustore_column_collate;
+ f2
+-----------------
+ a2中文
+ Cc
+ dd
+ 中文A3
+ B2中文
+ A
+ c
+ AbcdEf
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ b1中文
+ bb
+ S
+ 中文d1
+ z
+ A1中文
+ AaA
+(18 rows)
+
+select distinct f1 from ustore_column_collate order by f1;
+ f1
+-------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AbcdEf
+ b1中文
+ B2中文
+ bb
+ c
+ Cc
+ dD
+ S
+ z
+ 中文A3
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+select distinct f2 from ustore_column_collate order by f2;
+ f2
+-----------------
+ A
+ A1中文
+ a2中文
+ AaA
+ AbcdEf
+ b1中文
+ B2中文
+ bb
+ c
+ Cc
+ dd
+ S
+ z
+ 中文A3
+ 中文Ab
+ 中文AbCdEFG
+ 中文C1
+ 中文d1
+(18 rows)
+
+-- test group by
+select count(f1),f1 from ustore_column_collate group by f1;
+ count | f1
+-------+-------------
+ 1 | a2中文
+ 1 | Cc
+ 1 | dD
+ 1 | 中文A3
+ 1 | B2中文
+ 1 | A
+ 2 | c
+ 2 | AbcdEf
+ 2 | 中文Ab
+ 2 | 中文AbCdEFG
+ 1 | 中文C1
+ 1 | b1中文
+ 2 | bb
+ 4 | S
+ 1 | 中文d1
+ 2 | z
+ 1 | A1中文
+ 3 | AaA
+(18 rows)
+
+select count(f2),f2 from ustore_column_collate group by f2;
+ count | f2
+-------+-----------------
+ 1 | a2中文
+ 1 | Cc
+ 1 | dd
+ 1 | 中文A3
+ 1 | B2中文
+ 1 | A
+ 2 | c
+ 2 | AbcdEf
+ 2 | 中文Ab
+ 2 | 中文AbCdEFG
+ 1 | 中文C1
+ 1 | b1中文
+ 2 | bb
+ 4 | S
+ 1 | 中文d1
+ 2 | z
+ 1 | A1中文
+ 3 | AaA
+(18 rows)
+
+-- test like
+select f1 from ustore_column_collate where f1 like 'A_%';
+ f1
+--------
+ AaA
+ aAA
+ AAA
+ A1中文
+ a2中文
+ AbcdEf
+ abcdEF
+(7 rows)
+
+select f1 from ustore_column_collate where f1 like 'A%f';
+ f1
+--------
+ AbcdEf
+ abcdEF
+(2 rows)
+
+select f1 from ustore_column_collate where f1 like 'A__';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f1 from ustore_column_collate where f1 like '\A__';
+ f1
+-----
+ AaA
+ aAA
+ AAA
+(3 rows)
+
+select f1 from ustore_column_collate where f1 like 'A%\'; -- error
+ERROR: LIKE pattern must not end with escape character
+select f1 from ustore_column_collate where f1 like 'A_\'; -- error
+ERROR: LIKE pattern must not end with escape character
+select f2 from ustore_column_collate where f2 like 'A_%';
+ f2
+-----------------
+ A
+ AaA
+ aAA
+ AAA
+ A1中文
+ a2中文
+ AbcdEf
+ abcdEF
+(8 rows)
+
+select f2 from ustore_column_collate where f2 like 'A%f';
+ f2
+----
+(0 rows)
+
+select f2 from ustore_column_collate where f2 like 'A__';
+ f2
+----
+(0 rows)
+
+select f2 from ustore_column_collate where f2 like '\A__';
+ f2
+----
+(0 rows)
+
+select f2 from ustore_column_collate where f2 like 'A%\'; -- error
+ERROR: LIKE pattern must not end with escape character
+select f2 from ustore_column_collate where f2 like 'A_\'; -- error
+ERROR: LIKE pattern must not end with escape character
+-- test grouping sets
+create table date_dim(d_year int, d_moy int, d_date_sk int);
+create table store_sales(ss_sold_date_sk int, ss_item_sk int, ss_ext_sales_price int );
+create table item(i_category text, i_item_sk int ,i_manager_id int );
+insert into date_dim values(2000, 11, 1);
+insert into store_sales values(1, 1, 1000);
+insert into item values('Music', 1, 1);
+select dt.d_year, ss_ext_sales_price, item.i_category, grouping(dt.d_year), grouping(ss_ext_sales_price), grouping(item.i_category)
+from date_dim dt, store_sales, item
+where dt.d_date_sk = store_sales.ss_sold_date_sk and store_sales.ss_item_sk = item.i_item_sk and item.i_manager_id = 1 and dt.d_moy = 11 and dt.d_year = 2000 and i_category = 'Music'
+group by grouping sets(dt.d_year,ss_ext_sales_price),item.i_category having grouping(i_category) = 0 order by 1,2,3,4,5,6;
+ d_year | ss_ext_sales_price | i_category | grouping | grouping | grouping
+--------+--------------------+------------+----------+----------+----------
+ 2000 | | Music | 0 | 1 | 0
+ | 1000 | Music | 1 | 0 | 0
+(2 rows)
+
+-- test collate utf8mb4_unicode_ci
+-- test collation used in expr
+select 'abCdEf' = 'abcdef' collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf' != 'abcdef' collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' > 'abcdef' collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' < 'abcdef' collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'AAaabb'::char != 'AAaABb'::char collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'AAaabb'::char > 'AAaABb'::char collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'AAaabb'::char < 'AAaABb'::char collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'ś' = 'Š' collate "utf8mb4_unicode_ci" , 'Š' = 's' collate "utf8mb4_unicode_ci";
+ ?column? | ?column?
+----------+----------
+ t | t
+(1 row)
+
+select 'ŠSśs' = 'ssss' collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 's'::char(3) = 'Š'::char(3) collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'ŠSśs'::char(10) = 'ssss'::char(10) collate "utf8mb4_unicode_ci";
+ ?column?
+----------
+ t
+(1 row)
+
+-- test collate utf8mb4_bin
+select 'abCdEf' = 'abcdef' collate "utf8mb4_bin";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' > 'abcdef' collate "utf8mb4_bin";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' < 'abcdef' collate "utf8mb4_bin";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf' = 'ab' collate "utf8mb4_bin";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'abCdEf' > 'ab' collate "utf8mb4_bin";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'abCdEf' < 'ab' collate "utf8mb4_bin";
+ ?column?
+----------
+ f
+(1 row)
+
+select 'a' > 'A' collate "utf8mb4_bin", 'B' > 'A' collate "utf8mb4_bin", 'a' > 'B' collate "utf8mb4_bin",'b' > 'a' collate "utf8mb4_bin";
+ ?column? | ?column? | ?column? | ?column?
+----------+----------+----------+----------
+ t | t | t | t
+(1 row)
+
+-- test binary
+create table t1(a blob collate utf8mb4_bin);
+ERROR: binary collation only support binary type in B format
+create table t1(a blob collate "C");
+ERROR: binary collation only support binary type in B format
+drop table if exists t1;
+NOTICE: table "t1" does not exist, skipping
+create table t1(a blob collate binary);
+-- test partition table
+drop table if exists test_part_collate;
+NOTICE: table "test_part_collate" does not exist, skipping
+create table test_part_collate (
+f1 int,
+f2 text collate utf8mb4_general_ci,
+f3 text collate utf8mb4_bin
+) partition by range(f1) (
+partition p1 values less than (5),
+partition p2 values less than (10),
+partition p3 values less than MAXVALUE
+);
+insert into test_part_collate values(1, 'bbb', 'a');
+insert into test_part_collate values(2, 'aba', 'A');
+insert into test_part_collate values(6, 'Bbb', 'b');
+insert into test_part_collate values(15, 'BBB', 'B');
+insert into test_part_collate values(3, 'ccc', 'C');
+select * from test_part_collate order by f2;
+ f1 | f2 | f3
+----+-----+----
+ 2 | aba | A
+ 1 | bbb | a
+ 6 | Bbb | b
+ 15 | BBB | B
+ 3 | ccc | C
+(5 rows)
+
+select * from test_part_collate order by f3;
+ f1 | f2 | f3
+----+-----+----
+ 2 | aba | A
+ 15 | BBB | B
+ 3 | ccc | C
+ 1 | bbb | a
+ 6 | Bbb | b
+(5 rows)
+
+select distinct f2 from test_part_collate order by f2;
+ f2
+-----
+ aba
+ bbb
+ ccc
+(3 rows)
+
+select distinct f3 from test_part_collate order by f3;
+ f3
+----
+ A
+ B
+ C
+ a
+ b
+(5 rows)
+
+select * from test_part_collate where f2 = 'bbb';
+ f1 | f2 | f3
+----+-----+----
+ 1 | bbb | a
+ 6 | Bbb | b
+ 15 | BBB | B
+(3 rows)
+
+select * from test_part_collate where f3 = 'b';
+ f1 | f2 | f3
+----+-----+----
+ 6 | Bbb | b
+(1 row)
+
+select f2,count(*) from test_part_collate group by f2;
+ f2 | count
+-----+-------
+ aba | 1
+ bbb | 3
+ ccc | 1
+(3 rows)
+
+select f3,count(*) from test_part_collate group by f3;
+ f3 | count
+----+-------
+ b | 1
+ A | 1
+ C | 1
+ B | 1
+ a | 1
+(5 rows)
+
+-- test table collate
+drop table if exists test_table_collate;
+NOTICE: table "test_table_collate" does not exist, skipping
+create table test_table_collate (a text, b char(10),c character(10) collate "utf8mb4_bin") collate = utf8mb4_general_ci;
+insert into test_table_collate values('bb','bb','bb');
+insert into test_table_collate values('bB','bB','bB');
+insert into test_table_collate values('BB','BB','BB');
+insert into test_table_collate values('ba','ba','ba');
+select * from test_table_collate where b = 'bb';
+ a | b | c
+----+------------+------------
+ bb | bb | bb
+ bB | bB | bB
+ BB | BB | BB
+(3 rows)
+
+select * from test_table_collate where b = 'bb' collate "utf8mb4_bin";
+ a | b | c
+----+------------+------------
+ bb | bb | bb
+(1 row)
+
+select * from test_table_collate where c = 'bb';
+ a | b | c
+----+------------+------------
+ bb | bb | bb
+(1 row)
+
+select * from test_table_collate where c = 'bb' collate "utf8mb4_general_ci";
+ a | b | c
+----+------------+------------
+ bb | bb | bb
+ bB | bB | bB
+ BB | BB | BB
+(3 rows)
+
+select 'a' > 'A' collate utf8mb4_bin;
+ ?column?
+----------
+ t
+(1 row)
+
+select 'a' > 'A' collate 'utf8mb4_bin';
+ ?column?
+----------
+ t
+(1 row)
+
+select 'a' > 'A' collate "utf8mb4_bin";
+ ?column?
+----------
+ t
+(1 row)
+
+create table test1(a text charset utf8mb4 collate utf8mb4_bin);
+create table test2(a text charset 'utf8mb4' collate 'utf8mb4_bin');
+create table test3(a text charset "utf8mb4" collate 'utf8mb4_bin');
+-- test table charset binary
+create table test4(a text) charset "binary";
+alter table test4 charset utf8mb4;
+alter table test4 add a2 varchar(20);
+alter table test4 add a3 varchar(20) collate 'utf8mb4_bin';
+select pg_get_tabledef('test4');
+ pg_get_tabledef
+-------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE test4 ( +
+ a blob, +
+ a2 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci,+
+ a3 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_bin +
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_general_ci" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table test5(a blob charset "binary");
+ERROR: type blob not support set charset
+create table test6(a int charset "binary");
+ERROR: type integer not support set charset
+create table test6(a float charset "binary");
+ERROR: type double precision not support set charset
+select 'a' > 'A' collate UTF8MB4_BIN;
+ ?column?
+----------
+ t
+(1 row)
+
+select 'a' > 'A' collate 'UTF8MB4_BIN';
+ ?column?
+----------
+ t
+(1 row)
+
+select 'a' > 'A' collate "UTF8MB4_BIN";
+ ?column?
+----------
+ t
+(1 row)
+
+select 'a' > 'A' collate "UTF8MB4_bin";
+ ?column?
+----------
+ t
+(1 row)
+
+create table test7(a text charset 'UTF8MB4' collate 'UTF8MB4_BIN');
+create table test8(a text) charset 'UTF8MB4' collate 'UTF8MB4_bin';
+create table test9(a text collate 'UTF8MB4_BIN');
+create table test10(a text charset 'UTF8MB4');
+create table test11(a text charset 'aaa' collate 'UTF8MB4_BIN');
+ERROR: aaa is not a valid encoding name
+LINE 1: create table test11(a text charset 'aaa' collate 'UTF8MB4_BI...
+ ^
+create table test12(a text collate 'utf8mb4_bin.utf8');
+ERROR: collation "utf8mb4_bin.utf8" does not exist
+create table test13(a text collate utf8mb4_bin.utf8);
+ERROR: error schema name for collate
+create table test14(a text collate 'pg_catalog.utf8mb4_bin');
+ERROR: collation "pg_catalog.utf8mb4_bin" does not exist
+create table test15(a text collate pg_catalog.utf8mb4_bin); -- ok
+create table test16(a text collate 'aa_DJ.utf8'); -- ok
+create table test17(a text collate aa_DJ.utf8);
+ERROR: error schema name for collate
+create table test18(a text collate 'pg_catalog.aa_DJ.utf8');
+ERROR: collation "pg_catalog.aa_DJ.utf8" does not exist
+create table test19(a text collate pg_catalog.aa_DJ.utf8);
+ERROR: error schema name for collate
+create table test20(a text collate pg_catalog.utf8);
+ERROR: collation "utf8" does not exist
+
+-- test create table as
+create table test21(a text collate utf8mb4_bin, b text collate utf8mb4_general_ci, c text);
+create table test22 as select * from test21;
+select * from pg_get_tabledef('test22');
+ pg_get_tabledef
+-------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE test22 ( +
+ a text CHARACTER SET "UTF8" COLLATE utf8mb4_bin, +
+ b text CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci,+
+ c text +
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table test23 as select a, c from test21;
+select * from pg_get_tabledef('test23');
+ pg_get_tabledef
+------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE test23 ( +
+ a text CHARACTER SET "UTF8" COLLATE utf8mb4_bin,+
+ c text +
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+set b_format_behavior_compat_options = enable_set_variables;
+set @v1 = 'aa', @v2 = 'bb';
+create table test24 as select @v1 collate 'utf8mb4_bin';
+select * from pg_get_tabledef('test24');
+ pg_get_tabledef
+---------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE test24 ( +
+ "@v1" text CHARACTER SET "UTF8" COLLATE utf8mb4_bin+
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table test25 as select @v1 collate 'utf8mb4_bin', @v2;
+select * from pg_get_tabledef('test25');
+ pg_get_tabledef
+----------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE test25 ( +
+ "@v1" text CHARACTER SET "UTF8" COLLATE utf8mb4_bin,+
+ "@v2" text +
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+\c regression
+clean connection to all force for database test_collate_A;
+clean connection to all force for database test_collate_B;
+DROP DATABASE IF EXISTS test_collate_A;
+DROP DATABASE IF EXISTS test_collate_B;
diff --git a/src/test/regress/expected/ustore_subpartition_add_drop_partition.out b/src/test/regress/expected/ustore_subpartition_add_drop_partition.out
index cbdc2b95e..f22d70664 100644
--- a/src/test/regress/expected/ustore_subpartition_add_drop_partition.out
+++ b/src/test/regress/expected/ustore_subpartition_add_drop_partition.out
@@ -77,7 +77,7 @@ ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION custom
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01');
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -353,7 +353,7 @@ ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION custome
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -622,7 +622,7 @@ LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1;
^
--fail, invalid format
ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X');
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -883,7 +883,7 @@ ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1155,7 +1155,7 @@ ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1424,7 +1424,7 @@ LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1;
^
--fail, invalid format
ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500);
-ERROR: can not add hash partition
+ERROR: can not add hash subpartition
--check for ok after add
SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries
FROM pg_class c1, pg_partition p1, pg_namespace n1
@@ -1679,7 +1679,7 @@ ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2
ERROR: upper boundary of adding partition MUST overtop last existing partition
--fail, invalid format
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT);
-ERROR: can not add none-range partition to range partition table
+ERROR: can not add none-range subpartition to range subpartition table
--success, add 1 subpartition
ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE);
--check for ok after add
@@ -1933,7 +1933,7 @@ ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_
ERROR: list boundary of adding partition MUST NOT overlap with existing partition
--fail, invalid format
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE);
-ERROR: can not add none-list partition to list partition table
+ERROR: can not add none-list subpartition to list subpartition table
--success, add 1 subpartition
ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT);
--check for ok after add
diff --git a/src/test/regress/expected/ustore_subpartition_createtable.out b/src/test/regress/expected/ustore_subpartition_createtable.out
index c373b4129..2bd6f79d0 100644
--- a/src/test/regress/expected/ustore_subpartition_createtable.out
+++ b/src/test/regress/expected/ustore_subpartition_createtable.out
@@ -69,8 +69,8 @@ insert into list_hash values('201903', '5', '1', 1);
select * from list_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201902 | 2 | 1 | 1
201902 | 1 | 1 | 1
+ 201902 | 2 | 1 | 1
201903 | 4 | 1 | 1
201903 | 5 | 1 | 1
(4 rows)
@@ -140,11 +140,11 @@ insert into hash_list values('201903', '1', '1', 1);
select * from hash_list;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
(5 rows)
drop table hash_list;
@@ -176,11 +176,11 @@ insert into hash_hash values('201903', '1', '1', 1);
select * from hash_hash;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 2 | 1 | 1
- 201903 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
+ 201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
- 201901 | 1 | 1 | 1
- 201901 | 1 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
(5 rows)
drop table hash_hash;
@@ -212,11 +212,11 @@ insert into hash_range values('201903', '1', '1', 1);
select * from hash_range;
month_code | dept_code | user_no | sales_amt
------------+-----------+---------+-----------
- 201903 | 1 | 1 | 1
- 201903 | 2 | 1 | 1
201901 | 1 | 1 | 1
201901 | 1 | 1 | 1
201901 | 2 | 1 | 1
+ 201903 | 1 | 1 | 1
+ 201903 | 2 | 1 | 1
(5 rows)
drop table hash_range;
@@ -726,29 +726,6 @@ PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code)
);
drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-) WITH (STORAGE_TYPE=USTORE)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-ERROR: The two partition keys of a subpartition partition table are the same.
-DETAIL: N/A
--二级分区的键值一样
CREATE TABLE list_list
(
diff --git a/src/test/regress/expected/xc_rownum.out b/src/test/regress/expected/xc_rownum.out
index 30e28efaf..d16d9c6ab 100755
--- a/src/test/regress/expected/xc_rownum.out
+++ b/src/test/regress/expected/xc_rownum.out
@@ -2233,23 +2233,211 @@ insert into partition_hash values(40,'forty-three',43);
select rownum,* from partition_hash;
rownum | id | name | age
--------+----+--------------+-----
- 1 | 30 | thirty | 30
- 2 | 30 | Thirty-three | 33
- 3 | 40 | forty | 40
- 4 | 40 | forty-three | 43
- 5 | 20 | twenty | 20
- 6 | 20 | twenty-three | 23
- 7 | 10 | ten | 10
- 8 | 10 | thirteen | 13
+ 1 | 10 | ten | 10
+ 2 | 10 | thirteen | 13
+ 3 | 20 | twenty | 20
+ 4 | 20 | twenty-three | 23
+ 5 | 30 | thirty | 30
+ 6 | 30 | Thirty-three | 33
+ 7 | 40 | forty | 40
+ 8 | 40 | forty-three | 43
(8 rows)
select * from partition_hash where rownum < 5;
id | name | age
----+--------------+-----
- 30 | thirty | 30
- 30 | Thirty-three | 33
- 40 | forty | 40
- 40 | forty-three | 43
+ 10 | ten | 10
+ 10 | thirteen | 13
+ 20 | twenty | 20
+ 20 | twenty-three | 23
(4 rows)
drop table partition_hash;
+create table test_rownum_subquery
+(
+ pk integer,
+ no varchar2
+);
+insert into test_rownum_subquery values (1,'1');
+insert into test_rownum_subquery values (2,'2');
+insert into test_rownum_subquery values (3,'3');
+insert into test_rownum_subquery values (4,'4');
+insert into test_rownum_subquery values (5,'5');
+select * from test_rownum_subquery;
+ pk | no
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+(5 rows)
+
+update test_rownum_subquery t set t.no = to_char(100 - 1 + (
+ select vou_no from (
+ select rownum as vou_no, no from (
+ select distinct no from test_rownum_subquery b order by 1
+ )
+ ) where nvl(no, 0) = nvl(t.no, 0)
+));
+select * from test_rownum_subquery;
+ pk | no
+----+-----
+ 1 | 100
+ 2 | 101
+ 3 | 102
+ 4 | 103
+ 5 | 104
+(5 rows)
+
+drop table test_rownum_subquery;
+create table test_rownum_push_qual(id int);
+insert into test_rownum_push_qual values(generate_series(1, 20));
+-- having qual should not be pushed if accompanied by rownum reference
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 and id between 10 and 20 order by 1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Group
+ Output: id
+ Group By Key: (ROWNUM), test_rownum_push_qual.id
+ Filter: (((ROWNUM) < 10) AND (test_rownum_push_qual.id >= 10) AND (test_rownum_push_qual.id <= 20))
+ -> Sort
+ Output: (ROWNUM), id
+ Sort Key: (ROWNUM), test_rownum_push_qual.id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: ROWNUM, id
+(9 rows)
+
+select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 and id between 10 and 20 order by 1; -- expect 0 rows
+ rownum | id
+--------+----
+(0 rows)
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 or id between 10 and 20 order by 1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------
+ Group
+ Output: id
+ Group By Key: (ROWNUM), test_rownum_push_qual.id
+ Filter: (((ROWNUM) < 10) OR ((test_rownum_push_qual.id >= 10) AND (test_rownum_push_qual.id <= 20)))
+ -> Sort
+ Output: (ROWNUM), id
+ Sort Key: (ROWNUM), test_rownum_push_qual.id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: ROWNUM, id
+(9 rows)
+
+select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 or id between 10 and 20 order by 1; -- expect 20 rows
+ rownum | id
+--------+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+ 15 | 15
+ 16 | 16
+ 17 | 17
+ 18 | 18
+ 19 | 19
+ 20 | 20
+(20 rows)
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having case when ROWNUM < 10 then 'true'::boolean else 'false'::boolean end and id between 10 and 20 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------
+ Group
+ Output: id
+ Group By Key: (ROWNUM), test_rownum_push_qual.id
+ Filter: (CASE WHEN ((ROWNUM) < 10) THEN true ELSE false END AND (test_rownum_push_qual.id >= 10) AND (test_rownum_push_qual.id <= 20))
+ -> Sort
+ Output: (ROWNUM), id
+ Sort Key: (ROWNUM), test_rownum_push_qual.id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: ROWNUM, id
+(9 rows)
+
+select rownum, * from test_rownum_push_qual group by id,rownum having case when ROWNUM < 10 then 'true'::boolean else 'false'::boolean end and id between 10 and 20 order by 1; -- expect 0 rows
+ rownum | id
+--------+----
+(0 rows)
+
+-- do not transform rownum op const to limit const -1, if limit clause is stated
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum < 10 limit 10 offset 10;
+ QUERY PLAN
+------------------------------------------------
+ Limit
+ Output: id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: id
+ Filter: (ROWNUM < 10)
+(5 rows)
+
+select rownum, * from test_rownum_push_qual where rownum < 10 limit 10 offset 10; -- expected 0 rows
+ rownum | id
+--------+----
+(0 rows)
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum > 10 limit 10 offset 10;
+ QUERY PLAN
+------------------------------------------------
+ Limit
+ Output: id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: id
+ Filter: (ROWNUM > 10)
+(5 rows)
+
+select rownum, * from test_rownum_push_qual where rownum > 10 limit 10 offset 10; -- expected 0 rows
+ rownum | id
+--------+----
+(0 rows)
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum < 15 limit 10 offset 10;
+ QUERY PLAN
+------------------------------------------------
+ Limit
+ Output: id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: id
+ Filter: (ROWNUM < 15)
+(5 rows)
+
+select rownum, * from test_rownum_push_qual where rownum < 15 limit 10 offset 10; -- expected 4 rows
+ rownum | id
+--------+----
+ 11 | 11
+ 12 | 12
+ 13 | 13
+ 14 | 14
+(4 rows)
+
+explain (verbose on, costs off) select rownum, * from (select * from test_rownum_push_qual order by 1) where rownum < 10 limit 10 offset 10;
+ QUERY PLAN
+------------------------------------------------------------
+ Limit
+ Output: __unnamed_subquery__.id
+ -> Subquery Scan on __unnamed_subquery__
+ Output: __unnamed_subquery__.id
+ Filter: (ROWNUM < 10)
+ -> Sort
+ Output: test_rownum_push_qual.id
+ Sort Key: test_rownum_push_qual.id
+ -> Seq Scan on public.test_rownum_push_qual
+ Output: test_rownum_push_qual.id
+(10 rows)
+
+select rownum, * from (select * from test_rownum_push_qual order by 1) where rownum < 10 limit 10 offset 10; -- expected 0 rows
+ rownum | id
+--------+----
+(0 rows)
+
diff --git a/src/test/regress/input/charset_b_format.source b/src/test/regress/input/charset_b_format.source
new file mode 100644
index 000000000..e7f1db5e6
--- /dev/null
+++ b/src/test/regress/input/charset_b_format.source
@@ -0,0 +1,228 @@
+-- in A format, should report error.
+create database d_charset dbcompatibility 'A';
+\c d_charset;
+create schema s_charset_1 charset utf8mb4 collate utf8mb4_unicode_ci;
+create table t_charset_1 (c1 varchar(20)) charset utf8mb4 collate utf8mb4_unicode_ci;
+create table a_charset_1 (a1 varchar(20) charset utf8mb4 collate utf8mb4_general_ci);
+create table t_charset_2 (c1 varchar(20)) with(collate = 1537); -- error
+\c regression;
+drop database d_charset;
+create database d_charset dbcompatibility 'B';
+create database d_charset_bak dbcompatibility 'B';
+\c d_charset;
+create table t_charset_0 (c1 varchar(20));
+select pg_get_tabledef('t_charset_0');
+set b_format_behavior_compat_options = 'default_collation';
+-- schema level charset and collate
+create schema s_charset_1 charset utf8mb4 collate utf8mb4_unicode_ci;
+create schema s_charset_2 charset = utf8mb4 collate = utf8mb4_unicode_ci;
+create schema s_charset_3 charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_4 charset = utf8mb4;
+create schema s_charset_5 charset = utf8;
+create schema s_charset_6 charset = gbk; -- error
+create schema s_charset_6;
+create schema s_charset_7 default charset = utf8mb4 default collate = utf8mb4_unicode_ci;
+create schema s_charset_8 charset = "binary";
+create schema s_charset_9 character set = utf8mb4;
+create schema s_charset_10 collate = utf8mb4_general_ci;
+create schema s_charset_11 collate = utf8mb4_bin;
+create schema s_charset_12 collate = binary;
+create schema s_charset_13 collate = "binary";
+create schema s_charset_14 charset = binary;
+create schema s_charset_16 charset = gbk collate = utf8mb4_general_ci; -- error
+create schema s_charset_16 default charset utf8mb4 default collate utf8mb4_unicode_ci;
+create schema s_charset_17 CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+create schema s_charset_18 collate = utf8mb4_unicode_ci charset = utf8mb4;
+create schema s_charset_19 collate = utf8mb4_unicode_ci collate = utf8mb4_general_ci;
+create schema s_charset_20 charset = gbk charset = utf8mb4;
+create schema s_charset_21 collate = utf8mb4_unicode_ci charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_22 collate = "zh_CN.gbk" charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_23 charset utf8mb4 collate "aa_DJ.utf8"; -- error
+create schema s_charset_23 collate "aa_DJ"; -- error
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+create schema s_charset_14 collate = "zh_CN.gbk"; -- error
+create schema s_charset_15 charset = gbk collate = "zh_CN.gbk"; -- error
+alter schema s_charset_1 charset utf8mb4 collate utf8mb4_general_ci;
+alter schema s_charset_2 charset = utf8mb4 collate = utf8mb4_general_ci;
+alter schema s_charset_3 collate = utf8mb4_unicode_ci;
+alter schema s_charset_5 charset = gbk; -- error
+alter schema s_charset_5 charset = gbk collate = "zh_CN.gbk"; -- error
+alter schema s_charset_9 character set = utf8 collate = utf8mb4_unicode_ci;
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+
+-- relation level charset and collate
+create table t_charset_1 (c1 varchar(20)) charset utf8mb4 collate utf8mb4_unicode_ci;
+create table t_charset_2 (c1 varchar(20)) charset = utf8mb4 collate = utf8mb4_unicode_ci;
+create table t_charset_3 (c1 varchar(20)) charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_4 (c1 varchar(20)) charset = utf8mb4;
+create table t_charset_5 (c1 varchar(20)) charset = utf8;
+create table t_charset_6 (c1 varchar(20)) charset = gbk; -- error
+create table t_charset_6 (c1 varchar(20));
+create table t_charset_7 (c1 varchar(20)) default charset = utf8mb4 default collate = utf8mb4_unicode_ci;
+create table t_charset_8 (c1 varchar(20)) charset = binary; -- error
+create table t_charset_8 (c1 text) charset = binary;
+select pg_get_tabledef('t_charset_8');
+create table t_charset_9 (c1 varchar(20)) character set = utf8mb4;
+create table t_charset_10(c1 varchar(20)) collate = utf8mb4_general_ci;
+create table t_charset_11(c1 varchar(20)) collate = utf8mb4_bin;
+create table t_charset_12(c1 varchar(20)) collate = binary;
+create table t_charset_12(c1 varchar(20)) default charset utf8mb4 default collate utf8mb4_unicode_ci;
+create table t_charset_13(c1 varchar(20)) collate = "binary";
+create table t_charset_16(c1 varchar(20)) charset = gbk collate = utf8mb4_general_ci; -- error
+create table t_charset_17(c1 varchar(20)) CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+create table t_charset_18(c1 varchar(20)) collate = utf8mb4_unicode_ci charset = utf8mb4;
+create table t_charset_19(c1 varchar(20)) collate = utf8mb4_unicode_ci collate = utf8mb4_general_ci;
+create table t_charset_20(c1 varchar(20)) charset = gbk charset = utf8mb4;
+create table t_charset_21(c1 varchar(20)) collate = utf8mb4_unicode_ci charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_22(c1 varchar(20)) collate = "zh_CN.gbk" charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_23(like t_charset_22);
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='t_charset_23';
+create table t_charset_24(c1 varchar(20) character set binary); -- error
+create table t_charset_24(c1 varchar(20) character set "binary"); -- error
+create table t_charset_25(c1 varchar(20)) with(collate = 7);
+create table t_charset_26(c1 varchar(20)) charset utf8mb4 collate "aa_DJ.utf8"; -- error
+create table t_charset_26(c1 varchar(20)) collate "aa_DJ"; -- error
+select relname, reloptions from pg_class where relname like 't_charset_%' order by 1;
+alter table t_charset_1 convert to charset binary; -- error
+alter table t_charset_1 convert to charset utf8mb4;
+alter table t_charset_1 convert to character set utf8mb4;
+alter table t_charset_1 convert to character set utf8mb4 collate utf8mb4_general_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+alter table t_charset_1 convert to character set default collate utf8mb4_unicode_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+alter table t_charset_1 charset utf8mb4;
+alter table t_charset_1 character set utf8mb4;
+alter table t_charset_1 character set utf8mb4 collate utf8mb4_bin;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+alter table t_charset_1 collate utf8mb4_unicode_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+alter table t_charset_1 change c1 c2 varchar(30) charset gbk collate utf8mb4_bin; -- error
+alter table t_charset_1 change c1 c2 varchar(30) charset utf8mb4 collate utf8mb4_bin;
+select pg_get_tabledef('t_charset_1');
+
+-- attribute level charset and collate
+create table a_charset_1 (
+a1 varchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a2 varchar(20) charset utf8mb4 collate utf8mb4_unicode_ci,
+a3 varchar(20) charset utf8mb4 collate utf8mb4_bin
+)
+charset utf8mb4 collate utf8mb4_general_ci;
+insert into a_charset_1 values('中国','中国','中国');
+insert into a_charset_1 select a2,a1 from a_charset_1;
+select *,rawtohex(a1),rawtohex(a2),length(a1),length(a2),length(a3),lengthb(a1),lengthb(a2),lengthb(a3) from a_charset_1;
+alter table a_charset_1 convert to charset gbk collate "zh_CN.gbk";
+select rawtohex(a1),rawtohex(a2),rawtohex(a3) from a_charset_1;
+alter table a_charset_1 convert to charset utf8mb4;
+select rawtohex(a1),rawtohex(a2),rawtohex(a3) from a_charset_1;
+
+create table a_charset_2(
+a1 character(20) charset utf8mb4 collate utf8mb4_general_ci,
+a2 char(20) charset utf8mb4 collate utf8mb4_general_ci,
+a3 nchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a4 varchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a5 character varying(20) charset utf8mb4 collate utf8mb4_general_ci,
+a6 varchar2(20) charset utf8mb4 collate utf8mb4_general_ci,
+a7 nvarchar2(20) charset utf8mb4 collate utf8mb4_general_ci,
+a8 text,
+a9 blob
+) charset binary;
+alter table a_charset_2 add a8 varchar(20) charset utf8mb4;
+alter table a_charset_2 add a9 varchar(20) character set utf8mb4;
+alter table a_charset_2 add a10 varchar(20) character set utf8mb4 collate utf8mb4_unicode_ci;
+alter table a_charset_2 add a11 varchar(20) collate utf8mb4_bin;
+alter table a_charset_2 add a12 varchar(20);
+alter table a_charset_2 add a13 int;
+alter table a_charset_2 add a14 varchar(20) charset utf8mb4 collate "aa_DJ.utf8";
+alter table a_charset_2 add a15 varchar(20) collate "aa_DJ.utf8";
+alter table a_charset_2 add a16 varchar(20) collate "aa_DJ";
+alter table a_charset_2 add a17 text charset utf8mb4 collate utf8mb4_general_ci;
+alter table a_charset_2 add a18 clob charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a19 name charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a20 "char" charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a21 BLOB charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a22 RAW charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a23 BYTEA charset utf8mb4 collate utf8mb4_general_ci; -- error
+alter table a_charset_2 add a24 varchar(20) collate "zh_CN.gbk"; -- error;
+select pg_get_tabledef('a_charset_2');
+alter table a_charset_2 add a8 varchar(20) charset utf8mb4 charset utf8mb4; -- error
+alter table a_charset_2 modify a1 int;
+alter table a_charset_2 modify a9 varchar(20) character set gbk; -- error
+alter table a_charset_2 modify a10 varchar(20);
+alter table a_charset_2 modify a11 varchar(20) collate utf8mb4_unicode_ci;
+alter table a_charset_2 modify a12 varchar(20) charset utf8mb4;
+select pg_get_tabledef('a_charset_2');
+
+create table a_charset_3(
+a1 varchar(20) collate "C",
+a2 varchar(20) collate "default",
+a3 varchar(20) collate "POSIX"
+);
+create table a_charset_4(a1 blob);
+
+-- divergence test
+\h create schema;
+\h alter schema;
+\h create table;
+\h create table partition;
+\h create table subpartition;
+\h alter table;
+alter session set current_schema = s_charset_1;
+create table s_t_charset_1(s1 varchar(20));
+select pg_get_tabledef('s_t_charset_1');
+create table s_t_charset_2(s1 varchar(20));
+select pg_get_tabledef('s_t_charset_2');
+create table s_t_charset_3 (like s_t_charset_1);
+select pg_get_tabledef('s_t_charset_3');
+create table s_t_charset_4(s1 varchar(20) charset utf8mb4 collate "aa_DJ");
+create table s_t_charset_5(s1 varchar(20) collate "aa_DJ");
+create table s_t_charset_6(s1 int);
+alter table s_t_charset_6 charset binary;
+alter table s_t_charset_6 convert to charset default collate binary; -- error
+alter table s_t_charset_6 convert to charset default collate utf8mb4_bin;
+select pg_get_tabledef('s_t_charset_6');
+create table s_t_charset_7 as table s_t_charset_1;
+\d+ s_t_charset_7;
+create table s_t_charset_8 as select '123';
+\d+ s_t_charset_8;
+alter session set current_schema = s_charset_12;
+create table s_t_charset_9(s1 varchar(20) charset utf8mb4);
+alter table s_t_charset_9 convert to charset default collate utf8mb4_bin; -- error
+alter session set current_schema = s_charset_1;
+
+-- partition table
+create table p_charset_1(c1 varchar(20),c2 varchar(20),c3 int)
+character set = utf8mb4 collate = utf8mb4_general_ci
+partition by hash(c1)
+(
+partition p1,
+partition p2
+);
+select * from pg_get_tabledef('p_charset_1');
+alter table p_charset_1 convert to character set utf8mb4;
+alter table p_charset_1 collate utf8mb4_unicode_ci;
+insert into p_charset_1 values('a中国a');
+select * from p_charset_1;
+\d+ p_charset_1;
+
+-- temporary table
+create temporary table tem_charset_1(c1 varchar(20),c2 varchar(20),c3 int) character set = utf8mb4;
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='tem_charset_1';
+alter table tem_charset_1 convert to character set utf8mb4;
+alter table tem_charset_1 collate utf8mb4_unicode_ci;
+insert into tem_charset_1 values('a中国a');
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='tem_charset_1';
+
+\! @abs_bindir@/gs_dump d_charset -p @portstring@ -f @abs_bindir@/d_charset.tar -F t >/dev/null 2>&1; echo $?
+\! @abs_bindir@/gs_restore -d d_charset_bak -p @portstring@ @abs_bindir@/d_charset.tar >/dev/null 2>&1; echo $?
+\c d_charset_bak;
+
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+select relname, reloptions from pg_class where relname like 't_charset_%' order by 1;
+select *,rawtohex(a1),rawtohex(a2),length(a1),length(a2),lengthb(a1),lengthb(a2) from a_charset_1;
+select pg_get_tabledef('a_charset_2');
+
+set b_format_behavior_compat_options = '';
+
+\c regression
+drop database if exists d_charset_bak;
+drop database if exists d_charset;
\ No newline at end of file
diff --git a/src/test/regress/input/dump_auto_increment.source b/src/test/regress/input/dump_auto_increment.source
index a24488a32..d11846353 100644
--- a/src/test/regress/input/dump_auto_increment.source
+++ b/src/test/regress/input/dump_auto_increment.source
@@ -22,9 +22,6 @@ INSERT INTO test_dump_autoinc_gtmp VALUES(0,0);
CREATE UNLOGGED TABLE test_dump_autoinc_unlog(col1 int auto_increment NULL UNIQUE, col2 int) AUTO_INCREMENT = 100000;
INSERT INTO test_dump_autoinc_unlog VALUES(0,0);
INSERT INTO test_dump_autoinc_unlog VALUES(0,0);
-CREATE TABLE test_dump_autoinc_pk_gencol(col1 int auto_increment PRIMARY KEY, col2 int generated always as(2*col1) stored) AUTO_INCREMENT = 1000000;
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
CREATE TABLE test_dump_autoinc_range_list
(
col_1 int auto_increment primary key,
@@ -102,10 +99,6 @@ select col1,col2 from test_dump_autoinc_gtmp order by 1,2;
INSERT INTO test_dump_autoinc_unlog VALUES(0,0);
select col1,col2 from test_dump_autoinc_unlog order by 1,2;
-\d+ test_dump_autoinc_pk_gencol
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
-select col1,col2 from test_dump_autoinc_pk_gencol order by 1,2;
-
\d+ test_dump_autoinc_range_list
INSERT INTO test_dump_autoinc_range_list VALUES(0,16,'16');
select col_1,col_2,col_3,col_4 from test_dump_autoinc_range_list order by 1,2,3,4;
diff --git a/src/test/regress/input/dump_partition_b_db.source b/src/test/regress/input/dump_partition_b_db.source
new file mode 100644
index 000000000..44b5f5462
--- /dev/null
+++ b/src/test/regress/input/dump_partition_b_db.source
@@ -0,0 +1,78 @@
+drop database if exists dump_partition_db;
+drop database if exists restore_partition_db;
+
+create database dump_partition_db with dbcompatibility = 'B';
+create database restore_partition_db with dbcompatibility = 'B';
+
+\c dump_partition_db
+CREATE TABLE t_single_key_list (a int, b int, c int)
+PARTITION BY list(a)
+(
+ PARTITION p1 VALUES (100),
+ PARTITION p2 VALUES (200),
+ PARTITION p3 VALUES (300),
+ PARTITION p4 VALUES (400)
+);
+SELECT pg_get_tabledef('t_single_key_list'::regclass);
+
+CREATE TABLE t_multi_keys_list_null (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (NULL,NULL) )
+);
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+
+CREATE TABLE t_multi_keys_list (a varchar(8), b int, c DATE, d int DEFAULT 0)
+PARTITION BY LIST COLUMNS(a,b,c)
+(
+ PARTITION p1 VALUES IN ( ('0',0,'2022-12-31')),
+ PARTITION p2 VALUES IN ( ('{',1,'2022-12-31'), ('''',2,'2022-12-31'), ('0',3,'2022-12-31'), (',',1,'2022-12-31'), (NULL,2,'2022-12-31') ),
+ PARTITION p3 VALUES IN ( ('NULL',0,'2022-12-31'), ('}',1,'2022-12-31') ),
+ PARTITION p4 VALUES IN ( ('{',2,'2022-12-31'), ('3',3,'2022-12-31') ),
+ PARTITION pd VALUES IN (DEFAULT)
+);
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+INSERT INTO t_multi_keys_list VALUES('{',1,'2022-12-31');
+INSERT INTO t_multi_keys_list VALUES(',',1,'2022-12-31');
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY a,b,c;
+
+CREATE TABLE t_part_by_key_num (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 5 SUBPARTITION BY KEY(c) SUBPARTITIONS 3;
+
+CREATE TABLE t_multi_keys_list_tmtz (a DATE, b timestamp with time zone, c int, d int DEFAULT 0)
+PARTITION BY LIST (a,b,c)
+(
+ PARTITION p1 VALUES ( ('2022-01-01','2022-01-01 12:00:00 pst',1)),
+ PARTITION p2 VALUES ( ('2022-02-01','2022-02-01 12:00:00 pst',2), ('2022-02-02','2022-02-02 12:00:00 pst',2), ('2022-02-03','2022-02-03 12:00:00 pst',2)),
+ PARTITION p3 VALUES ( ('2022-03-01','2022-03-01 12:00:00 pst',3), ('2022-03-02','2022-03-02 12:00:00 pst',3) ),
+ PARTITION pd VALUES (DEFAULT)
+);
+SELECT pg_get_tabledef('t_multi_keys_list_tmtz'::regclass);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-01-01','2022-01-01 12:00:00 pst',1);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-01','2022-02-01 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-02','2022-02-02 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-03','2022-02-03 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-03-01','2022-03-01 12:00:00 pst',3);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-03-02','2022-03-02 12:00:00 pst',3);
+SELECT * FROM t_multi_keys_list_tmtz PARTITION(p2) ORDER BY a,b,c;
+
+\! @abs_bindir@/gs_dump dump_partition_db -p @portstring@ -f @abs_bindir@/dump_listpart_test.tar -F t >/dev/null 2>&1; echo $?
+\! @abs_bindir@/gs_restore -d restore_partition_db -p @portstring@ @abs_bindir@/dump_listpart_test.tar >/dev/null 2>&1; echo $?
+
+\c restore_partition_db
+
+\d+ t_multi_keys_list
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+INSERT INTO t_multi_keys_list VALUES('''',2,'2022-12-31');
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY b,a,c;
+
+SELECT pg_get_tabledef('t_part_by_key_num'::regclass);
+
+SELECT pg_get_tabledef('t_multi_keys_list_tmtz'::regclass);
+SELECT * FROM t_multi_keys_list_tmtz PARTITION(p2) ORDER BY a,b,c;
+\c regression
+drop database if exists restore_partition_db;
+drop database if exists dump_partition_db;
diff --git a/src/test/regress/input/event_dump_audit.source b/src/test/regress/input/event_dump_audit.source
new file mode 100644
index 000000000..b34dcc380
--- /dev/null
+++ b/src/test/regress/input/event_dump_audit.source
@@ -0,0 +1,46 @@
+--audit test
+
+drop database if exists event_audit_b;
+create database event_audit_b with dbcompatibility 'b';
+\c event_audit_b
+
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=268435455" > /dev/null 2>&1
+\! sleep 1s
+
+drop event if exists e;
+show audit_system_object;
+create event e on schedule at '3000-01-01 00:00:00' disable do select 1;
+select pg_sleep(10);
+select detail_info from pg_query_audit(trunc((localtimestamp - interval '1' minute), 'second'), trunc(localtimestamp, 'second'))
+where database = 'event_audit_b' AND type='ddl_event';
+drop event if exists e;
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+
+\c regression
+drop database if exists event_audit_b;
+
+--gs_dump
+drop database if exists dump_ev;
+create database dump_ev with dbcompatibility 'b';
+\c dump_ev
+create user event_dump_a with sysadmin password 'Test_event1';
+set role event_dump_a password 'Test_event1';
+create event e on schedule at '3000-01-01 00:00:00' disable do select 1;
+create event public.ea on schedule at '3000-01-01 00:00:00' disable do select 1;
+select job_name from pg_job where dbname='dump_ev';
+
+\c dump_ev
+\! @abs_bindir@/gs_dump dump_ev -p @portstring@ -f @abs_bindir@/dump_ev.tar -n public -F t >/dev/null 2>&1; echo $?
+
+drop database if exists restore_event_dump_db;
+create database restore_event_dump_db with dbcompatibility 'b';
+
+\! @abs_bindir@/gs_restore -d restore_event_dump_db -p @portstring@ @abs_bindir@/dump_ev.tar >/dev/null 2>&1; echo $?
+
+\c restore_event_dump_db
+select job_name, job_status,failure_msg from pg_job where dbname='restore_event_dump_db';
+
+\c regression
+
+drop database if exists dump_ev;
+drop database if exists restore_event_dump_db;
diff --git a/src/test/regress/input/gs_global_config_audit.source b/src/test/regress/input/gs_global_config_audit.source
index d75867c7d..1b454e245 100644
--- a/src/test/regress/input/gs_global_config_audit.source
+++ b/src/test/regress/input/gs_global_config_audit.source
@@ -1,12 +1,12 @@
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_enabled=on" > /dev/null 2>&1
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=33554431" > /dev/null 2>&1
-SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig';
-select * from gs_global_config;
+SELECT * FROM pg_delete_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00');
ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2);
ALTER GLOBAL CONFIGURATION with(last_catchup_threshold=5000);
-select * from gs_global_config;
+select * from gs_global_config where name like '%lockwait%' or name like '%last_catchup_threshold%';
DROP GLOBAL CONFIGURATION lockwait_timeout;
DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval;
-select * from gs_global_config;
-SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig';
+select * from gs_global_config where name like '%lockwait%' or name like '%last_catchup_threshold%';
+SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig' and (detail_info like '%lockwait%' or detail_info like '%last_catchup_threshold%');
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "reset audit_system_object" > /dev/null 2>&1
+
diff --git a/src/test/regress/input/hw_audit_client.source b/src/test/regress/input/hw_audit_client.source
new file mode 100644
index 000000000..5de23e325
--- /dev/null
+++ b/src/test/regress/input/hw_audit_client.source
@@ -0,0 +1,110 @@
+CREATE DATABASE db_audit_client;
+\c db_audit_client
+
+-- set guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=134217727" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- set no audit client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client ='test_gsql@[local], gsql@[local]_test,test_gsql@[local]_test'" > /dev/null 2>&1
+\! sleep 1
+
+-- crerate table
+DROP TABLE IF EXISTS t_audit_client;
+CREATE TABLE t_audit_client (id INTEGER, col1 VARCHAR(20));
+
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+
+-- set no audit client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client =',, gsql@[local] , ,'" > /dev/null 2>&1
+\! sleep 1
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+
+-- query audit log, count = 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+
+-- set no_audit_client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client ='audit gsql@[local]'" > /dev/null 2>&1
+\! sleep 1
+
+-- change current application name
+SET application_name TO 'audit gsql';
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+
+-- query audit log, count = 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'audit gsql@[local]') > 0 AS count_gsql;
+
+-- change current application name
+SET application_name TO 'gsql';
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+
+-- change current application name
+SET application_name TO audit;
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'audit@[local]') > 0 AS count_gsql;
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=511" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+
+-- clean env
+DROP TABLE IF EXISTS t_audit_client_client;
+
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_client;
+DROP DATABASE db_audit_client;
\ No newline at end of file
diff --git a/src/test/regress/input/hw_audit_full.source b/src/test/regress/input/hw_audit_full.source
new file mode 100644
index 000000000..16272506c
--- /dev/null
+++ b/src/test/regress/input/hw_audit_full.source
@@ -0,0 +1,129 @@
+CREATE DATABASE db_audit_full;
+\c db_audit_full
+
+-- close all the audit options
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_login_logout=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_locked=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_violation=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_grant_revoke=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_copy_exec=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_process_set_parameter=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_copy_server_files=1" > /dev/null 2>&1
+\! users="user_audit1, test_user_audit2, user_audit2_test test_user_audit2_test , user_audit3 , $USER, user_audit4 user5" && cmd="full_audit_users='$users'" && @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "$cmd" > /dev/null 2>&1
+\! sleep 1
+
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+
+-- superuser, create table
+CREATE TABLE t_audit_super (id INTEGER, col1 VARCHAR(20));
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..100
+ LOOP
+ execute 'INSERT INTO t_audit_super VALUES (' || i || ', ''audit'');';
+ END LOOP;
+END$$;
+
+-- superuser, create user
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1 CASCADE; CREATE USER user_audit1 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit1;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit2 CASCADE; CREATE USER user_audit2 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit2;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit3 CASCADE; CREATE USER user_audit3 identified by 'audit@2023';" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit4 CASCADE; CREATE USER user_audit4 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit4;" > /dev/null 2>&1
+
+-- user1, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit1 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+
+-- user_audit2, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit2 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+
+-- user_audit4, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit4 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+
+-- 用户登录、注销审计 audit_login_logout
+-- login_success
+-- user_logout
+-- user_audit3, login_failed
+\! @abs_bindir@/gsql -d postgres -p @portstring@ -U user_audit3 -W audit@2022 -c "CREATE TABLE t_audit_login (id INTEGER, col1 VARCHAR(20));"
+
+-- 用户访问越权审计 audit_user_violation
+-- user_audit3, user_violation
+\! @abs_bindir@/gsql -d postgres -p @portstring@ -U user_audit3 -W audit@2023 -c "select * from pg_query_audit(current_date,current_date + interval '24 hours');"
+
+-- superuser only, ddl_directory
+CREATE OR REPLACE DIRECTORY dir as '/tmp/';
+
+-- superuser only, ddl_globalconfig
+ALTER GLOBAL CONFIGURATION with(audit_xid_info=1);
+ALTER GLOBAL CONFIGURATION with(audit_xid_info=0);
+
+-- COPY审计 audit_copy_exec
+-- superuser only, copy_to
+COPY t_audit_super TO '@abs_srcdir@/data/t_audit.data';
+-- superuser only, copy_from
+CREATE TABLE t_audit_super_copy (id INTEGER, col1 VARCHAR(20));
+COPY t_audit_super_copy FROM '@abs_srcdir@/data/t_audit.data';
+
+-- 数据库启动、停止、恢复和切换审计 audit_database_process superuser only
+-- null user, system_stop
+\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1/ > /dev/null 2>&1
+-- null user, system_start
+\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1/ > /dev/null 2>&1
+-- null user, system_recover
+-- null user, system_switch
+--\! @abs_bindir@/gs_ctl switchover -f -m fast
+
+-- superuser, create query function and scale
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -f @abs_srcdir@/data/audit_full_superuser.sql > /dev/null 2>&1
+
+-- audit query
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit1', 49) AS is_audit_type_exist_user_audit1 FROM t_audit_type WHERE id = 1;"
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit2', 49) AS is_audit_type_exist_user_audit2 FROM t_audit_type WHERE id = 1;"
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit4', 49) AS is_audit_type_exist_user_audit4 FROM t_audit_type WHERE id = 1;"
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit3', 2) AS is_audit_type_exist_user_audit3 FROM t_audit_type WHERE id = 2;"
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, (select current_user), 11) AS is_audit_type_exist_superuser FROM t_audit_type WHERE id = 3;"
+
+-- rename user_audit1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "ALTER USER user_audit1 RENAME TO user_audit1_new"
+
+-- user_audit1_new do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit1_new -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+
+-- audit query
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit1_new', 49) AS is_audit_type_exist_user_audit1_new FROM t_audit_type WHERE id = 1;"
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_login_logout" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_locked" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_violation=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_grant_revoke" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_copy_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_process_set_parameter=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_copy_server_files" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "full_audit_users" > /dev/null 2>&1
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1_new CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit2 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit3 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit4 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_type;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_super;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_super_copy;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP FUNCTION IF EXISTS func_count_audit;" > /dev/null 2>&1
+
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_full;
+DROP DATABASE db_audit_full;
\ No newline at end of file
diff --git a/src/test/regress/input/hw_audit_system_func.source b/src/test/regress/input/hw_audit_system_func.source
new file mode 100644
index 000000000..07df21201
--- /dev/null
+++ b/src/test/regress/input/hw_audit_system_func.source
@@ -0,0 +1,134 @@
+CREATE DATABASE db_audit_system_func;
+\c db_audit_system_func
+
+-- set guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=134217727" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_cbm_tracking=on" > /dev/null 2>&1
+
+-- 系统管理函数 配置设置函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT set_config('log_statement_stats', 'off', false);
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%set_config%';
+
+-- 系统管理函数 服务器信号函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT pg_cancel_backend(139989266462464);
+SELECT pg_cancel_session(139856237819648, 139856237819648);
+SELECT pg_reload_conf();
+SELECT pg_rotate_logfile();
+SELECT pg_terminate_session(139855736600320, 139855736600320);
+SELECT pg_terminate_backend(140298793514752);
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cancel_backend%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cancel_session%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_reload_conf%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_rotate_logfile%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_terminate_session%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_terminate_backend%';
+
+-- 系统管理函数 备份恢复控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_create_restore_point('restore_audit');" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_start_backup('restore_audit');" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_stop_backup();" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_switch_xlog();" > /dev/null 2>&1
+
+SELECT pg_cbm_get_merged_file('0/0', '0/0');
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT gs_roach_switch_xlog();" > /dev/null 2>&1
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_create_restore_point%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_start_backup%';
+SELECT type, object_name from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_stop_backup%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_switch_xlog%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cbm_get_merged_file%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_roach_switch_xlog%';
+
+-- 系统管理函数 恢复控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT pg_last_xlog_receive_location();
+SELECT gs_pitr_clean_history_global_barriers('1489739011');
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_last_xlog_receive_location%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_pitr_clean_history_global_barriers%';
+
+-- 系统管理函数 双集群容灾控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/obsserver.key.cipher
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/obsserver.key.rand
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o obsserver > /dev/null 2>&1
+SELECT * from pg_create_physical_replication_slot_extern('prs_audit', false, 'NAS;/data/nas/media/openGauss_uuid/dn1;0;0', false);
+SELECT gs_set_obs_delete_location('0/54000000');
+SELECT gs_hadr_do_switchover();
+SELECT gs_set_obs_delete_location_with_slotname('0/0', '0/0');
+SELECT gs_streaming_dr_in_switchover();
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_create_physical_replication_slot_extern%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_set_obs_delete_location%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_hadr_do_switchover%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_set_obs_delete_location_with_slotname%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_streaming_dr_in_switchover%';
+
+-- 系统管理函数 双集群容灾查询函数
+
+-- 系统管理函数 咨询锁函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT pg_advisory_lock(123);
+SELECT pg_advisory_lock_shared(123);
+SELECT pg_advisory_unlock(123);
+SELECT pg_advisory_unlock_shared(123);
+SELECT pg_advisory_unlock_all();
+SELECT pg_advisory_xact_lock(123);
+SELECT pg_advisory_xact_lock_shared(123);
+SELECT pg_try_advisory_lock(123);
+SELECT pg_try_advisory_lock_shared(123);
+SELECT pg_try_advisory_xact_lock(123);
+SELECT pg_try_advisory_xact_lock_shared(123);
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_lock%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_lock_shared%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock_shared%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock_all%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_xact_lock%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_xact_lock_shared%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_lock%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_lock_shared%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_xact_lock%';
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_xact_lock_shared%';
+
+-- 系统管理函数 段页式存储函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+CREATE TABLESPACE tsp_audit_sysfunc RELATIVE LOCATION 'audit_tablespace/audit_tablespace_1';
+SELECT local_space_shrink('tsp_audit_sysfunc', (SELECT current_database()));
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%local_space_shrink%';
+
+-- 故障注入系统函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT gs_fault_inject(1,'1','1','1','1','1');
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_fault_inject%';
+
+-- 数据损坏检测修复函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT * from local_clear_bad_block_info();
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%local_clear_bad_block_info%';
+
+-- 非白名单系统函数 不审计
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+SELECT current_setting('audit_thread_num');
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_current_xlog_location();" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "pg_database_size('db_audit_system_func');" > /dev/null 2>&1
+
+SELECT count(*) from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec';
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_function_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "wal_level" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_cbm_tracking=on" > /dev/null 2>&1
+
+--clean env
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "DROP TABLESPACE IF EXISTS tsp_audit_sysfunc;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT * from pg_drop_replication_slot('prs_audit');" > /dev/null 2>&1
+
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_system_func;
+DROP DATABASE db_audit_system_func;
\ No newline at end of file
diff --git a/src/test/regress/input/hw_subpartition_tablespace.source b/src/test/regress/input/hw_subpartition_tablespace.source
index afbd21d77..7e5303744 100644
--- a/src/test/regress/input/hw_subpartition_tablespace.source
+++ b/src/test/regress/input/hw_subpartition_tablespace.source
@@ -884,6 +884,84 @@ ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VAL
SELECT pg_get_tabledef('t_hash_list4');
DROP TABLE t_hash_list4;
+--
+----test create index with tablespace----
+--
+CREATE TABLE t_range_list(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1
+PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2)
+(
+ PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1
+ (
+ SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20)
+ ),
+ PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2
+ (
+ SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE2_3 VALUES (DEFAULT)
+ ),
+ PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3
+ (
+ SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20)
+ ),
+ PARTITION P_RANGE4 VALUES LESS THAN (20)
+ (
+ SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE4_2 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts2
+ ),
+ PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3,
+ PARTITION P_RANGE6 VALUES LESS THAN (30)
+);
+
+CREATE INDEX t_range_list_idx ON t_range_list(c1,c2) LOCAL
+(
+ PARTITION idx_p1(
+ SUBPARTITION idx_p1_1 TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION idx_p1_2 TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION idx_p1_3 TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION idx_p1_4
+ ),
+ PARTITION idx_p2 TABLESPACE hw_subpartition_tablespace_ts2(
+ SUBPARTITION idx_p2_1,
+ SUBPARTITION idx_p2_2,
+ SUBPARTITION idx_p2_3
+ ),
+ PARTITION idx_p3 TABLESPACE hw_subpartition_tablespace_ts2(
+ SUBPARTITION idx_p3_1 TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION idx_p3_2 TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION idx_p3_3 TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION idx_p3_4
+ ),
+ PARTITION idx_p4(
+ SUBPARTITION idx_p4_1,
+ SUBPARTITION idx_p4_2 TABLESPACE hw_subpartition_tablespace_ts2
+ ),
+ PARTITION idx_p5 TABLESPACE hw_subpartition_tablespace_ts3(
+ SUBPARTITION idx_p5_1
+ ),
+ PARTITION idx_p6(
+ SUBPARTITION idx_p6_1 TABLESPACE hw_subpartition_tablespace_ts2
+ )
+) TABLESPACE hw_subpartition_tablespace_ts1;
+
+SELECT p.relname, t.spcname FROM pg_partition p, pg_class c, pg_namespace n, pg_tablespace t
+WHERE p.parentid = c.oid
+ AND c.relname='t_range_list_idx'
+ AND c.relnamespace=n.oid
+ AND n.nspname=CURRENT_SCHEMA
+ AND p.reltablespace = t.oid
+ORDER BY p.relname;
+
+SELECT pg_get_indexdef('hw_subpartition_tablespace.t_range_list_idx'::regclass);
+
+DROP TABLE t_range_list;
+
--finish
drop tablespace hw_subpartition_tablespace_ts1;
drop tablespace hw_subpartition_tablespace_ts2;
diff --git a/src/test/regress/input/postgres_fdw.source b/src/test/regress/input/postgres_fdw.source
deleted file mode 100644
index 95c31f09f..000000000
--- a/src/test/regress/input/postgres_fdw.source
+++ /dev/null
@@ -1,843 +0,0 @@
--- ===================================================================
--- create FDW objects
--- ===================================================================
-
-CREATE EXTENSION postgres_fdw;
-
-CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
-CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
- OPTIONS (dbname 'regression', port '@portstring@');
-
-CREATE USER MAPPING FOR public SERVER testserver1
- OPTIONS (user 'value', password 'value');
-CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
-
--- ===================================================================
--- create objects used through FDW loopback server
--- ===================================================================
-CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
-CREATE SCHEMA "S 1";
-CREATE TABLE "S 1"."T 1" (
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10),
- c8 user_enum,
- CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
-);
-CREATE TABLE "S 1"."T 2" (
- c1 int NOT NULL,
- c2 text,
- CONSTRAINT t2_pkey PRIMARY KEY (c1)
-);
-CREATE TABLE "S 1"."T 3" (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- CONSTRAINT t3_pkey PRIMARY KEY (c1)
-);
-CREATE TABLE "S 1"."T 4" (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- CONSTRAINT t4_pkey PRIMARY KEY (c1)
-);
-
-INSERT INTO "S 1"."T 1"
- SELECT id,
- id % 10,
- to_char(id, 'FM00000'),
- '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
- '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
- id % 10,
- id % 10,
- 'foo'::user_enum
- FROM generate_series(1, 1000) id;
-INSERT INTO "S 1"."T 2"
- SELECT id,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-INSERT INTO "S 1"."T 3"
- SELECT id,
- id + 1,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-DELETE FROM "S 1"."T 3" WHERE c1 % 2 != 0; -- delete for outer join tests
-INSERT INTO "S 1"."T 4"
- SELECT id,
- id + 1,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-DELETE FROM "S 1"."T 4" WHERE c1 % 3 != 0; -- delete for outer join tests
-
-ANALYZE "S 1"."T 1";
-ANALYZE "S 1"."T 2";
-ANALYZE "S 1"."T 3";
-ANALYZE "S 1"."T 4";
-
--- ===================================================================
--- create local tables to check whether the grammer is support
--- ===================================================================
-CREATE TABLE local_ft1 (
- c1 int NOT NULL,
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft1',
- c8 user_enum
-);
-
-CREATE TABLE local_ft2 (
- c1 int NOT NULL,
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft2',
- c8 user_enum
-);
--- ===================================================================
--- create foreign tables
--- ===================================================================
-CREATE FOREIGN TABLE ft1 (
- c0 int,
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft1',
- c8 user_enum
-) SERVER loopback;
-ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
-
-CREATE FOREIGN TABLE ft2 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- cx int,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft2',
- c8 user_enum
-) SERVER loopback;
-ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
-
-CREATE FOREIGN TABLE ft4 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text
-) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 3');
-
-CREATE FOREIGN TABLE ft5 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text
-) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 4');
-
--- ===================================================================
--- tests for validator
--- ===================================================================
--- requiressl and some other parameters are omitted because
--- valid values for them depend on configure options
-ALTER SERVER testserver1 OPTIONS (
- use_remote_estimate 'false',
- updatable 'true',
- fdw_startup_cost '123.456',
- fdw_tuple_cost '0.123',
- service 'value',
- connect_timeout 'value',
- dbname 'value',
- host 'value',
- hostaddr 'value',
- port 'value',
- --client_encoding 'value',
- application_name 'value',
- --fallback_application_name 'value',
- keepalives 'value',
- keepalives_idle 'value',
- keepalives_interval 'value',
- -- requiressl 'value',
- sslcompression 'value',
- sslmode 'value',
- sslcert 'value',
- sslkey 'value',
- sslrootcert 'value',
- sslcrl 'value',
- --requirepeer 'value',
- krbsrvname 'value'
- --gsslib 'value'
- --replication 'value'
-);
-ALTER USER MAPPING FOR public SERVER testserver1
- OPTIONS (DROP user, DROP password);
-ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
-ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
-ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
-\det+
-
--- Test that alteration of server options causes reconnection
--- Remote's errors might be non-English, so hide them to ensure stable results
-\set VERBOSITY terse
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work
-ALTER SERVER loopback OPTIONS (SET dbname 'no such database');
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
-DO $d$
- BEGIN
- EXECUTE $$ALTER SERVER loopback
- OPTIONS (SET dbname '$$||current_database()||$$')$$;
- END;
-$d$;
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
-
--- Test that alteration of user mapping options causes reconnection
-ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
- OPTIONS (ADD user 'no such user');
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
-ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
- OPTIONS (DROP user);
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
-\set VERBOSITY default
-
--- Now we should be able to run ANALYZE.
--- To exercise multiple code paths, we use local stats on ft1
--- and remote-estimate mode on ft2.
-ANALYZE ft1;
-ANALYZE ft4;
-ANALYZE ft5;
-ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
-
--- ===================================================================
--- simple queries
--- ===================================================================
--- single table, with/without alias
-EXPLAIN (COSTS false) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
-SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
-SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
--- whole-row reference
-EXPLAIN (VERBOSE, COSTS false) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
-SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
--- empty result
-SELECT * FROM ft1 WHERE false;
--- with WHERE clause
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
-SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
--- with FOR UPDATE/SHARE
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
-SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
-SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
--- aggregate
-SELECT COUNT(*) FROM ft1 t1;
--- join two tables
-SELECT t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
--- subquery
-SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
--- subquery+MAX
-SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
--- used in CTE
-WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
--- fixed values
-SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
--- user-defined operator/function
-CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
-BEGIN
-RETURN abs($1);
-END
-$$ LANGUAGE plpgsql IMMUTABLE;
-CREATE OPERATOR === (
- LEFTARG = int,
- RIGHTARG = int,
- PROCEDURE = int4eq,
- COMMUTATOR = ===,
- NEGATOR = !==
-);
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2;
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
-
--- ===================================================================
--- WHERE with remotely-executable conditions
--- ===================================================================
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
--- parameterized remote path
-EXPLAIN (VERBOSE, COSTS false)
- SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
-SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
--- check both safe and unsafe join conditions
-EXPLAIN (VERBOSE, COSTS false)
- SELECT * FROM ft2 a, ft2 b
- WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
-SELECT * FROM ft2 a, ft2 b
-WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
--- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
-SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
-SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
-
--- bug #15613: bad plan for foreign table scan with lateral reference
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM ft1 AS ref_1) AS subq_0
- RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-
--- use local table to check whether this sql supported
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM local_ft1 AS ref_1) AS subq_0
- RIGHT JOIN local_ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM ft1 AS ref_1) AS subq_0
- RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-
--- use local table to check whether this sql supported
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM local_ft1 AS ref_1) AS subq_0
- RIGHT JOIN local_ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-
--- ===================================================================
--- parameterized queries
--- ===================================================================
--- simple join
-PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2);
-EXECUTE st1(1, 1);
-EXECUTE st1(101, 101);
--- subquery using stable function (can't be sent to remote)
-PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND (c4) = '1970-01-17'::date) ORDER BY c1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20);
-EXECUTE st2(10, 20);
-EXECUTE st2(101, 121);
--- subquery using immutable function (can be sent to remote)
-PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND (c5) = '1970-01-17'::date) ORDER BY c1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20);
-EXECUTE st3(10, 20);
-EXECUTE st3(20, 30);
--- custom plan should be chosen initially
-PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
--- once we try it enough times, should switch to generic plan
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
--- value of $1 should not be sent to remote
-PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
-EXECUTE st5('foo', 1);
-
--- altering FDW options requires replanning
-PREPARE st6 AS SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
-PREPARE st7 AS INSERT INTO ft1 (c1,c2,c3) VALUES (1001,101,'foo');
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
-ALTER TABLE "S 1"."T 1" RENAME TO "T 0";
-ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 0');
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
-EXECUTE st6;
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
-ALTER TABLE "S 1"."T 0" RENAME TO "T 1";
-ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 1');
-
--- cleanup
-DEALLOCATE st1;
-DEALLOCATE st2;
-DEALLOCATE st3;
-DEALLOCATE st4;
-DEALLOCATE st5;
-DEALLOCATE st6;
-DEALLOCATE st7;
-
--- System columns, except ctid, should not be sent to remote
-EXPLAIN (VERBOSE, COSTS false)
-SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1;
-SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1;
-EXPLAIN (VERBOSE, COSTS false)
-SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
-SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
-EXPLAIN (VERBOSE, COSTS false)
-SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
-SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
-EXPLAIN (VERBOSE, COSTS false)
-SELECT ctid, * FROM ft1 t1 LIMIT 1;
-SELECT ctid, * FROM ft1 t1 LIMIT 1;
-
--- ===================================================================
--- used in pl/pgsql function
--- ===================================================================
-CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
-DECLARE
- v_c1 int;
-BEGIN
- SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
- PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
- RETURN v_c1;
-END;
-$$ LANGUAGE plpgsql;
-SELECT f_test(100);
-DROP FUNCTION f_test(int);
-
--- ===================================================================
--- conversion error
--- ===================================================================
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
-SELECT * FROM ft1 WHERE c1 = 1; -- ERROR
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
-
--- ===================================================================
--- subtransaction
--- + local/remote error doesn't break cursor
--- ===================================================================
-BEGIN;
-DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
-FETCH c;
-SAVEPOINT s;
-ERROR OUT; -- ERROR
-ROLLBACK TO s;
-FETCH c;
-SAVEPOINT s;
-SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
-ROLLBACK TO s;
-FETCH c;
-SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
-COMMIT;
-
--- ===================================================================
--- test handling of collations
--- ===================================================================
-create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique);
-create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10))
- server loopback options (table_name 'loct3', use_remote_estimate 'true');
-
--- can be sent to remote
-explain (verbose, costs off) select * from ft3 where f1 = 'foo';
-explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
-explain (verbose, costs off) select * from ft3 where f2 = 'foo';
-explain (verbose, costs off) select * from ft3 where f3 = 'foo';
-explain (verbose, costs off) select * from ft3 f, loct3 l
- where f.f3 = l.f3 and l.f1 = 'foo';
--- can't be sent to remote
-explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
-explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
-explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
-explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
-explain (verbose, costs off) select * from ft3 f, loct3 l
- where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo';
-
--- ===================================================================
--- test writable foreign table stuff
--- ===================================================================
-EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
-INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
-INSERT INTO ft2 (c1,c2,c3)
- VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
-INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
-UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
-UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
- FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
-UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
- FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
-EXPLAIN (verbose, costs off)
- DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
-DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
-DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
-SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
-EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
-
--- Test UPDATE/DELETE with RETURNING on a three-table join
-INSERT INTO ft2 (c1,c2,c3)
- SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id;
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'foo'
- FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
- RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down
-UPDATE ft2 SET c3 = 'foo'
- FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
- RETURNING ft2, ft2.*, ft4, ft4.*;
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2
- USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
- RETURNING 100; -- can be pushed down
-DELETE FROM ft2
- USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
- RETURNING 100;
-DELETE FROM ft2 WHERE ft2.c1 > 1200;
-
--- Test that trigger on remote table works as expected
-CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
-BEGIN
- NEW.c3 = NEW.c3 || '_trig_update';
- RETURN NEW;
-END;
-$$ LANGUAGE plpgsql;
-CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
- ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
-
-INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
-INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
-UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
-
--- Test errors thrown on remote side during update
-ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
-
-INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
-INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
-UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
-
--- Test savepoint/rollback behavior
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
-begin;
-update ft2 set c2 = 42 where c2 = 0;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-savepoint s1;
-update ft2 set c2 = 44 where c2 = 4;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-release savepoint s1;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-savepoint s2;
-update ft2 set c2 = 46 where c2 = 6;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-rollback to savepoint s2;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-release savepoint s2;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-savepoint s3;
-update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
-rollback to savepoint s3;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-release savepoint s3;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
--- none of the above is committed yet remotely
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
-commit;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
-
--- ===================================================================
--- test copy
--- ===================================================================
-select count(*) from ft2;
-select * from ft2 order by c1 limit 10;
-\! rm -f ./foreigntable_ft2.data
-\COPY (select * from ft2) to './foreigntable_ft2.data';
-delete from ft2;
-select * from ft2 order by c1 limit 10;
-\COPY ft2 from './foreigntable_ft2.data';
-select * from ft2 order by c1 limit 10;
-select count(*) from ft2;
-\! rm -f ./foreigntable_ft2.data
-
--- ===================================================================
--- test serial columns (ie, sequence-based defaults)
--- ===================================================================
-create table loc1 (f1 serial, f2 text);
-create foreign table rem1 (f1 serial, f2 text)
- server loopback options(table_name 'loc1');
-select pg_catalog.setval('rem1_f1_seq', 10, false);
-insert into loc1(f2) values('hi');
-insert into rem1(f2) values('hi remote');
-insert into loc1(f2) values('bye');
-insert into rem1(f2) values('bye remote');
-select * from loc1;
-select * from rem1;
-
--- ===================================================================
--- test local triggers
--- ===================================================================
-
--- Trigger functions "borrowed" from triggers regress test.
-CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
-BEGIN
- RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
- TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
- RETURN NULL;
-END;$$;
-
-CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
- FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
-CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
- FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
-
-CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
-LANGUAGE plpgsql AS $$
-
-declare
- oldnew text[];
- relid text;
- argstr text;
-begin
-
- relid := TG_relid::regclass;
- argstr := '';
- for i in 0 .. TG_nargs - 1 loop
- if i > 0 then
- argstr := argstr || ', ';
- end if;
- argstr := argstr || TG_argv[i];
- end loop;
-
- RAISE NOTICE '%(%) % % % ON %',
- tg_name, argstr, TG_when, TG_level, TG_OP, relid;
- oldnew := '{}'::text[];
- if TG_OP != 'INSERT' then
- oldnew := array_append(oldnew, format('OLD: %s', OLD));
- end if;
-
- if TG_OP != 'DELETE' then
- oldnew := array_append(oldnew, format('NEW: %s', NEW));
- end if;
-
- RAISE NOTICE '%', array_to_string(oldnew, ',');
-
- if TG_OP = 'DELETE' then
- return OLD;
- else
- return NEW;
- end if;
-end;
-$$;
-
--- Test basic functionality
-CREATE TRIGGER trig_row_before
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-CREATE TRIGGER trig_row_after
-AFTER INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-delete from rem1;
-insert into rem1 values(1,'insert');
-update rem1 set f2 = 'update' where f1 = 1;
-update rem1 set f2 = f2 || f2;
-
-
--- cleanup
-DROP TRIGGER trig_row_before ON rem1;
-DROP TRIGGER trig_row_after ON rem1;
-DROP TRIGGER trig_stmt_before ON rem1;
-DROP TRIGGER trig_stmt_after ON rem1;
-
-DELETE from rem1;
-
-
--- Test WHEN conditions
-
-CREATE TRIGGER trig_row_before_insupd
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW
-WHEN (NEW.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-CREATE TRIGGER trig_row_after_insupd
-AFTER INSERT OR UPDATE ON rem1
-FOR EACH ROW
-WHEN (NEW.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
--- Insert or update not matching: nothing happens
-INSERT INTO rem1 values(1, 'insert');
-UPDATE rem1 set f2 = 'test';
-
--- Insert or update matching: triggers are fired
-INSERT INTO rem1 values(2, 'update');
-UPDATE rem1 set f2 = 'update update' where f1 = '2';
-
-CREATE TRIGGER trig_row_before_delete
-BEFORE DELETE ON rem1
-FOR EACH ROW
-WHEN (OLD.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-CREATE TRIGGER trig_row_after_delete
-AFTER DELETE ON rem1
-FOR EACH ROW
-WHEN (OLD.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
--- Trigger is fired for f1=2, not for f1=1
-DELETE FROM rem1;
-
--- cleanup
-DROP TRIGGER trig_row_before_insupd ON rem1;
-DROP TRIGGER trig_row_after_insupd ON rem1;
-DROP TRIGGER trig_row_before_delete ON rem1;
-DROP TRIGGER trig_row_after_delete ON rem1;
-
-
--- Test various RETURN statements in BEFORE triggers.
-
-CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
- BEGIN
- NEW.f2 := NEW.f2 || ' triggered !';
- RETURN NEW;
- END
-$$ language plpgsql;
-
-CREATE TRIGGER trig_row_before_insupd
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-
--- The new values should have 'triggered' appended
-INSERT INTO rem1 values(1, 'insert');
-SELECT * from loc1;
-INSERT INTO rem1 values(2, 'insert') RETURNING f2;
-SELECT * from loc1;
-UPDATE rem1 set f2 = '';
-SELECT * from loc1;
-UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
-SELECT * from loc1;
-
-EXPLAIN (verbose, costs off)
-UPDATE rem1 set f1 = 10; -- all columns should be transmitted
-UPDATE rem1 set f1 = 10;
-SELECT * from loc1;
-
-DELETE FROM rem1;
-
--- Add a second trigger, to check that the changes are propagated correctly
--- from trigger to trigger
-CREATE TRIGGER trig_row_before_insupd2
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-
-INSERT INTO rem1 values(1, 'insert');
-SELECT * from loc1;
-INSERT INTO rem1 values(2, 'insert') RETURNING f2;
-SELECT * from loc1;
-UPDATE rem1 set f2 = '';
-SELECT * from loc1;
-UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
-SELECT * from loc1;
-
-DROP TRIGGER trig_row_before_insupd ON rem1;
-DROP TRIGGER trig_row_before_insupd2 ON rem1;
-
-DELETE from rem1;
-
-INSERT INTO rem1 VALUES (1, 'test');
-
--- Test with a trigger returning NULL
-CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
- BEGIN
- RETURN NULL;
- END
-$$ language plpgsql;
-
-CREATE TRIGGER trig_null
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_null();
-
--- Nothing should have changed.
-INSERT INTO rem1 VALUES (2, 'test2');
-
-SELECT * from loc1;
-
-UPDATE rem1 SET f2 = 'test2';
-
-SELECT * from loc1;
-
-DELETE from rem1;
-
-SELECT * from loc1;
-
-DROP TRIGGER trig_null ON rem1;
-DELETE from rem1;
-
--- Test a combination of local and remote triggers
-CREATE TRIGGER trig_row_before
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-CREATE TRIGGER trig_row_after
-AFTER INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-
-CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-
-INSERT INTO rem1(f2) VALUES ('test');
-UPDATE rem1 SET f2 = 'testo';
-
--- Test returning a system attribute
-INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
diff --git a/src/test/regress/input/select_into_file.source b/src/test/regress/input/select_into_file.source
new file mode 100644
index 000000000..bbbb43c48
--- /dev/null
+++ b/src/test/regress/input/select_into_file.source
@@ -0,0 +1,30 @@
+create database test_select_into_file dbcompatibility 'b';
+\c test_select_into_file;
+CREATE TYPE my_enum AS enum('ENUM1','ENUM2');
+create table t(id int, a char(2), b text, c my_enum, d blob, e raw, f bytea);
+insert into t values(1, 'c1', 'text1', 'ENUM1', '01', HEXTORAW('DEADBEEF'), E'\\xDEADBEEF');
+insert into t values(2, 'c2', 'text2', 'ENUM2', '11', HEXTORAW('DEADBEEE'), E'\\xDEADBEEE');
+select * from t;
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS TERMINATED BY '~~';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS ENCLOSED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS OPTIONALLY ENCLOSED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS ENCLOSED BY 't' ESCAPED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' LINES STARTING BY '$';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' LINES TERMINATED BY '&\n';
+\! cat @abs_srcdir@/data/select_into_file.data
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS TERMINATED BY '~' ENCLOSED BY 't' ESCAPED BY '^' LINES STARTING BY '$' TERMINATED BY '&\n';
+\! cat @abs_srcdir@/data/select_into_file.data
+--error dumpfile more than one row
+select * from t into dumpfile '@abs_srcdir@/data/select_into_file.data';
+select * from t limit 1 into dumpfile '@abs_srcdir@/data/select_into_file.data';
+\! cat @abs_srcdir@/data/select_into_file.data
+\c regression;
+drop database test_select_into_file;
+\! rm @abs_srcdir@/data/select_into_file.data
\ No newline at end of file
diff --git a/src/test/regress/input/select_into_user_defined_variables.source b/src/test/regress/input/select_into_user_defined_variables.source
index 6dd46b49a..3a38fbf8c 100644
--- a/src/test/regress/input/select_into_user_defined_variables.source
+++ b/src/test/regress/input/select_into_user_defined_variables.source
@@ -1,15 +1,13 @@
-- error
select 10 into @aa;
-\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=on" >/dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "b_format_behavior_compat_options='enable_set_variables'" >/dev/null 2>&1
\! sleep 1
-- error
select 10 into @aa;
-create database test dbcompatibility 'b';
-\c test
-show enable_set_variable_b_format;
-
+create database test_select_into_var dbcompatibility 'b';
+\c test_select_into_var
drop table if exists t;
create table t(i int, t text, b bool, f float, bi bit(3), vbi bit varying(5));
insert into t(i, t, b, f, bi, vbi)
@@ -57,6 +55,14 @@ select @aa,@bb,@cc,@dd,@ee,@ff;
select * from t where i=100 into @aa,@bb,@cc,@dd,@ee,@ff;
select @aa,@bb,@cc,@dd,@ee,@ff;
+create or replace procedure my_pro()
+as
+declare outfile int default 0;
+begin
+select 10 into outfile;
+end;
+/
+call my_pro();
--procedure stmt 1
create or replace procedure my_pro()
as
@@ -195,8 +201,44 @@ select @num;
drop trigger tri_delete_after on t1;
-\c regression
-drop database if exists test;
-\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=off" >/dev/null 2>&1
+select 10,@v1/2 into @v1,@v2;
+select @v1,@v2;
+
+select 10,@v1/2 into @v1,@v2;
+select @v1,@v2;
+
+select 10 into @value;
+select @value;
+select sha(@value) into @sha_value;
+select @sha_value;
+
+select -1,'hello' into @v1, @v2;
+select @v1, @v2;
+select @v1 + 1, abs(@v1), concat(@v2, ' world!') into @v3, @abs, @concat;
+select @v3, @abs, @concat;
+
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "b_format_behavior_compat_options=''" >/dev/null 2>&1
\! sleep 1
-show enable_set_variable_b_format;
+create table t2(a int, b int);
+set b_format_behavior_compat_options="enable_set_variables";
+create or replace procedure test(a int, b int) as
+declare
+ num3 int := a;
+ num4 int := b;
+ pragma autonomous_transaction;
+
+begin
+ set b_format_behavior_compat_options="enable_set_variables";
+ select num3,num4 into @v1,@v2;
+ commit;
+ insert into t2 values(@v1, @v2);
+ rollback;
+ set b_format_behavior_compat_options="enable_set_variables";
+ insert into t2 values(@v1-1, @v2+1);
+end;
+/
+call test(1,1);
+select * from t2;
+
+\c regression
+drop database if exists test_select_into_var;
\ No newline at end of file
diff --git a/src/test/regress/input/set_system_variables_test.source b/src/test/regress/input/set_system_variables_test.source
index f9ed41f55..e05b69ac9 100644
--- a/src/test/regress/input/set_system_variables_test.source
+++ b/src/test/regress/input/set_system_variables_test.source
@@ -284,6 +284,14 @@ raise info 'v2:%', v2;
end;
/
+-- test b_format_behavior_compat_options param
+\c test_set
+show b_format_behavior_compat_options;
+set b_format_behavior_compat_options = 'enable_set_variables, enable_set_variables';
+show b_format_behavior_compat_options;
+set @v1 = 1;
+select @v1;
+
\c regression
drop database if exists test_set;
diff --git a/src/test/regress/input/set_user_defined_variables_test.source b/src/test/regress/input/set_user_defined_variables_test.source
index c84bd2097..212395c20 100644
--- a/src/test/regress/input/set_user_defined_variables_test.source
+++ b/src/test/regress/input/set_user_defined_variables_test.source
@@ -585,6 +585,50 @@ set enable_set_variable_b_format = on;
set @v1 := 1, @v2 := 2, @v3 := @v4 := 3;
select @v1, @v2, @v3, @v4;
+\c test_set
+set @v2 := 'aaa';
+set @V4 :=(SELECT @v2 + 1);
+select @v2, @v4;
+set @aa = 10;
+set @bb = (select sha(@aa));
+select @aa, @bb;
+
+\c test_set
+set b_format_behavior_compat_options="enable_set_variables";
+drop table if exists t2;
+create table t2(a int, b int);
+insert into t2 values(1,2);
+
+create or replace procedure test(a int, b int) as
+declare
+ num3 int := a;
+ num4 int := b;
+ pragma autonomous_transaction;
+
+begin
+ set b_format_behavior_compat_options="enable_set_variables";
+ set @v1 := num3, @v2 := num4;
+ insert into t2 values(@v1, @v2);
+ rollback;
+ set b_format_behavior_compat_options="enable_set_variables";
+ insert into t2 values(@v1-1, @v2+1);
+end;
+/
+
+call test(1,1);
+
+\c test_set
+set @a := 1, @b := @a;
+select @a, @b;
+set @a := @c := 2, @b := @d := @a;
+select @a, @b, @c, @d;
+
+\c test_set
+set @a := 1, @b := @a;
+select @a, @b;
+set @a := @c := 2, @b := @d := @a, @@session_timeout = 700, @e := @f := @a;
+select @a, @b, @c, @d, @e, @f, @@session_timeout;
+
\c regression
drop database if exists test_set;
diff --git a/src/test/regress/input/single_node_user_mapping.source b/src/test/regress/input/single_node_user_mapping.source
index bcde6742e..f30007ce2 100644
--- a/src/test/regress/input/single_node_user_mapping.source
+++ b/src/test/regress/input/single_node_user_mapping.source
@@ -1,6 +1,9 @@
--clear audit log
SELECT pg_delete_audit('1012-11-10', '3012-11-11');
--- prepare
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.cipher
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.rand
+\! echo $GAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $?
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.cipher
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.rand
\! echo $OLDGAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $?
@@ -13,13 +16,23 @@ CREATE SERVER dummy_srv2 FOREIGN DATA WRAPPER dummy;
CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password 'shouldBeEncrypt');
CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
-ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBeEncrypt');
-ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBeEncrypt2');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBe''''''''''''''''''''''''''''''Encrypt');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBe''''''''''''''''''''''''''''''Encrypt2');
ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password);
DROP USER MAPPING FOR current_user SERVER dummy_srv2;
DROP USER MAPPING FOR current_user SERVER dummy_srv;
+-- test with password keyword double-quote
+CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp');
+CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(SET "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp2');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp2');
+
+DROP USER MAPPING FOR current_user SERVER dummy_srv2;
+DROP USER MAPPING FOR current_user SERVER dummy_srv;
+
DROP SERVER dummy_srv;
DROP SERVER dummy_srv2;
diff --git a/src/test/regress/input/slow_sql.source b/src/test/regress/input/slow_sql.source
new file mode 100644
index 000000000..695c3787a
--- /dev/null
+++ b/src/test/regress/input/slow_sql.source
@@ -0,0 +1,45 @@
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "instr_unique_sql_count=10000"
+\c postgres
+
+create schema slow_sql;
+SET search_path = slow_sql, public;
+create table slow_sql.test(col1 int, col2 numeric, col3 text);
+create index index1 on slow_sql.test(col1);
+create index index2 on slow_sql.test(col2);
+create index index3 on slow_sql.test(col3);
+insert into slow_sql.test values (generate_series(1,100), generate_series(101,200), generate_series(201,300));
+delete from statement_history;
+set track_stmt_stat_level='L0,L0';
+-- test col1(integer)
+select col1 from slow_sql.test where col1 = 12;
+select col2 from slow_sql.test where col1 = 123456781234567812345678;
+select col3 from slow_sql.test where col1 = '12';
+select * from slow_sql.test where col1::numeric = 12;
+select * from slow_sql.test where col1::text = '12';
+-- test col2(numeric)
+select col1 from slow_sql.test where col2 = 123456781234567812345678;
+select col2 from slow_sql.test where col2 = 12;
+select col3 from slow_sql.test where col2 = '123456781234567812345678';
+select * from slow_sql.test where col2::integer = 123456781234567812345678;
+select * from slow_sql.test where col2::text = '123456781234567812345678';
+-- test col3(text)
+select col1 from slow_sql.test where col3 = '12';
+select col2 from slow_sql.test where col3 = 12;
+select col3 from slow_sql.test where col3 = 123456781234567812345678;
+select * from slow_sql.test where col3::integer = 12;
+select * from slow_sql.test where col3::numeric = 12;
+-- test limit
+select col1 from slow_sql.test where col1 = 12 limit 4999;
+select col2 from slow_sql.test where col1 = 12 limit 5000;
+select col3 from slow_sql.test where col1 = 12 limit 5001;
+-- test all
+select col1,col2 from slow_sql.test where col1 = 12 limit 4999;
+select col1,col3 from slow_sql.test where col1 = 123456781234567812345678 limit 4999;
+select col2,col3 from slow_sql.test where col1 = 12 limit 5000;
+select col1,col2,col3 from slow_sql.test where col1 = 123456781234567812345678 limit 5000;
+set track_stmt_stat_level='OFF,L0';
+--wait insert history
+insert into slow_sql.test values (generate_series(1,10000), generate_series(10001,20000), generate_series(20001,30000));
+select schema_name, query, advise from statement_history order by start_time;
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "instr_unique_sql_count=100"
+
diff --git a/src/test/regress/input/subscription.source b/src/test/regress/input/subscription.source
index a4be16dee..3d43b99d4 100644
--- a/src/test/regress/input/subscription.source
+++ b/src/test/regress/input/subscription.source
@@ -11,6 +11,9 @@ SELECT pg_delete_audit('1012-11-10', '3012-11-11');
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=16777215" > /dev/null 2>&1
select pg_sleep(1);
--- prepare
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.cipher
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.rand
+\! echo $GAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $?
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.cipher
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.rand
\! echo $OLDGAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $?
@@ -85,9 +88,22 @@ COMMIT;
BEGIN;
ALTER SUBSCRIPTION testsub_rename REFRESH PUBLICATION;
COMMIT;
+
+-- success, password len with 999
+CREATE SUBSCRIPTION sub_len_999 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*lei' PUBLICATION insert_only WITH (connect = false);
+
+-- fail, password len with 1000
+CREATE SUBSCRIPTION sub_len_1000 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid' PUBLICATION insert_only WITH (enabled = false);
+
+-- fail, set password len with 1000
+ALTER SUBSCRIPTION sub_len_999 SET (conninfo='host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid');
+-- fail, set password len with 1000
+ALTER SUBSCRIPTION sub_len_999 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid';
+
--- drop subscription
DROP SUBSCRIPTION IF EXISTS testsub_rename;
DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo;
+DROP SUBSCRIPTION IF EXISTS sub_len_999;
--- cleanup
RESET SESSION AUTHORIZATION;
DROP ROLE regress_subscription_user;
diff --git a/src/test/regress/output/area.source b/src/test/regress/output/area.source
index ef50cef10..aa6ba522e 100644
--- a/src/test/regress/output/area.source
+++ b/src/test/regress/output/area.source
@@ -36,14 +36,14 @@ END;
$$
;
call decode_area_proc('mppdb_decoding');
- decode_area_proc
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+--?.*
+--?.*
{"table_name":"public.area_example1","op_type":"INSERT","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["1","1","'1'"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]}
{"table_name":"public.area_example1","op_type":"INSERT","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["2","1","'2'"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]}
- {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["1","10","'1'"],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["1","1","'1'"]}
- {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["2","10","'2'"],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["2","1","'2'"]}
- {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["1","10","'1'"]}
- {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["2","10","'2'"]}
+ {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["1","10","'1'"],"old_keys_name":["id"],"old_keys_type":["integer"],"old_keys_val":["1"]}
+ {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["2","10","'2'"],"old_keys_name":["id"],"old_keys_type":["integer"],"old_keys_val":["2"]}
+ {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id"],"old_keys_type":["integer"],"old_keys_val":["1"]}
+ {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id"],"old_keys_type":["integer"],"old_keys_val":["2"]}
{"table_name":"public.area_example2","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","1","1"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]}
{"table_name":"public.area_example2","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["2","2","2"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]}
{"table_name":"public.area_example2","op_type":"UPDATE","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","10","1"],"old_keys_name":["a"],"old_keys_type":["integer"],"old_keys_val":["1"]}
@@ -55,14 +55,14 @@ call decode_area_proc('mppdb_decoding');
(14 rows)
call decode_area_proc('sql_decoding');
- decode_area_proc
--------------------------------------------------------------------------------------------------------------------------------------
+--?.*
+--?.*
insert into public.area_example1 values (3, 1, '1');
insert into public.area_example1 values (4, 1, '2');
- delete from public.area_example1 where id = 3 and somedata = 1 and text = '1';insert into public.area_example1 values (3, 10, '1');
- delete from public.area_example1 where id = 4 and somedata = 1 and text = '2';insert into public.area_example1 values (4, 10, '2');
- delete from public.area_example1 where id = 3 and somedata = 10 and text = '1';
- delete from public.area_example1 where id = 4 and somedata = 10 and text = '2';
+ delete from public.area_example1 where id = 3;insert into public.area_example1 values (3, 10, '1');
+ delete from public.area_example1 where id = 4;insert into public.area_example1 values (4, 10, '2');
+ delete from public.area_example1 where id = 3;
+ delete from public.area_example1 where id = 4;
insert into public.area_example2 values (1, 1, 1);
insert into public.area_example2 values (2, 2, 2);
delete from public.area_example2 where a = 1;insert into public.area_example2 values (1, 10, 1);
diff --git a/src/test/regress/output/charset_b_format.source b/src/test/regress/output/charset_b_format.source
new file mode 100755
index 000000000..01034c5c7
--- /dev/null
+++ b/src/test/regress/output/charset_b_format.source
@@ -0,0 +1,1019 @@
+-- in A format, should report error.
+create database d_charset dbcompatibility 'A';
+\c d_charset;
+create schema s_charset_1 charset utf8mb4 collate utf8mb4_unicode_ci;
+ERROR: specifying character sets and collations is supported only in B-format database
+create table t_charset_1 (c1 varchar(20)) charset utf8mb4 collate utf8mb4_unicode_ci;
+ERROR: specifying character sets and collations is supported only in B-format database
+create table a_charset_1 (a1 varchar(20) charset utf8mb4 collate utf8mb4_general_ci);
+ERROR: specifying character sets and collations is supported only in B-format database
+create table t_charset_2 (c1 varchar(20)) with(collate = 1537); -- error
+ERROR: Un-support feature
+DETAIL: Forbid to set or change "collate" in non-B format
+\c regression;
+drop database d_charset;
+create database d_charset dbcompatibility 'B';
+create database d_charset_bak dbcompatibility 'B';
+\c d_charset;
+create table t_charset_0 (c1 varchar(20));
+select pg_get_tabledef('t_charset_0');
+ pg_get_tabledef
+-----------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_charset_0 ( +
+ c1 character varying(20) +
+ ) +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+set b_format_behavior_compat_options = 'default_collation';
+-- schema level charset and collate
+create schema s_charset_1 charset utf8mb4 collate utf8mb4_unicode_ci;
+create schema s_charset_2 charset = utf8mb4 collate = utf8mb4_unicode_ci;
+create schema s_charset_3 charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_4 charset = utf8mb4;
+create schema s_charset_5 charset = utf8;
+create schema s_charset_6 charset = gbk; -- error
+ERROR: default collation for encoding "GBK" does not exist
+create schema s_charset_6;
+create schema s_charset_7 default charset = utf8mb4 default collate = utf8mb4_unicode_ci;
+create schema s_charset_8 charset = "binary";
+create schema s_charset_9 character set = utf8mb4;
+create schema s_charset_10 collate = utf8mb4_general_ci;
+create schema s_charset_11 collate = utf8mb4_bin;
+create schema s_charset_12 collate = binary;
+create schema s_charset_13 collate = "binary";
+create schema s_charset_14 charset = binary;
+create schema s_charset_16 charset = gbk collate = utf8mb4_general_ci; -- error
+ERROR: collation "utf8mb4_general_ci" for encoding "GBK" does not exist
+create schema s_charset_16 default charset utf8mb4 default collate utf8mb4_unicode_ci;
+create schema s_charset_17 CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+create schema s_charset_18 collate = utf8mb4_unicode_ci charset = utf8mb4;
+create schema s_charset_19 collate = utf8mb4_unicode_ci collate = utf8mb4_general_ci;
+create schema s_charset_20 charset = gbk charset = utf8mb4;
+create schema s_charset_21 collate = utf8mb4_unicode_ci charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_22 collate = "zh_CN.gbk" charset = utf8mb4 collate = utf8mb4_general_ci;
+create schema s_charset_23 charset utf8mb4 collate "aa_DJ.utf8"; -- error
+ERROR: this collation only cannot be specified here
+create schema s_charset_23 collate "aa_DJ"; -- error
+ERROR: there is more than one collation "aa_DJ" with the same name
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+ nspname | nspowner | nsptimeline | nspacl | in_redistribution | nspblockchain | nspcollation
+--------------+----------+-------------+--------+-------------------+---------------+--------------
+ s_charset_1 | 10 | 0 | | n | f | 1538
+ s_charset_10 | 10 | 0 | | n | f | 1537
+ s_charset_11 | 10 | 0 | | n | f | 1539
+ s_charset_12 | 10 | 0 | | n | f | 1026
+ s_charset_13 | 10 | 0 | | n | f | 1026
+ s_charset_14 | 10 | 0 | | n | f | 1026
+ s_charset_16 | 10 | 0 | | n | f | 1538
+ s_charset_17 | 10 | 0 | | n | f | 1538
+ s_charset_18 | 10 | 0 | | n | f | 1538
+ s_charset_19 | 10 | 0 | | n | f | 1537
+ s_charset_2 | 10 | 0 | | n | f | 1538
+ s_charset_20 | 10 | 0 | | n | f | 1537
+ s_charset_21 | 10 | 0 | | n | f | 1537
+ s_charset_22 | 10 | 0 | | n | f | 1537
+ s_charset_3 | 10 | 0 | | n | f | 1537
+ s_charset_4 | 10 | 0 | | n | f | 1537
+ s_charset_5 | 10 | 0 | | n | f | 1537
+ s_charset_6 | 10 | 0 | | n | f |
+ s_charset_7 | 10 | 0 | | n | f | 1538
+ s_charset_8 | 10 | 0 | | n | f | 1026
+ s_charset_9 | 10 | 0 | | n | f | 1537
+(21 rows)
+
+create schema s_charset_14 collate = "zh_CN.gbk"; -- error
+ERROR: this collation only cannot be specified here
+create schema s_charset_15 charset = gbk collate = "zh_CN.gbk"; -- error
+ERROR: this collation only cannot be specified here
+alter schema s_charset_1 charset utf8mb4 collate utf8mb4_general_ci;
+alter schema s_charset_2 charset = utf8mb4 collate = utf8mb4_general_ci;
+alter schema s_charset_3 collate = utf8mb4_unicode_ci;
+alter schema s_charset_5 charset = gbk; -- error
+ERROR: default collation for encoding "GBK" does not exist
+alter schema s_charset_5 charset = gbk collate = "zh_CN.gbk"; -- error
+ERROR: this collation only cannot be specified here
+alter schema s_charset_9 character set = utf8 collate = utf8mb4_unicode_ci;
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+ nspname | nspowner | nsptimeline | nspacl | in_redistribution | nspblockchain | nspcollation
+--------------+----------+-------------+--------+-------------------+---------------+--------------
+ s_charset_1 | 10 | 0 | | n | f | 1537
+ s_charset_10 | 10 | 0 | | n | f | 1537
+ s_charset_11 | 10 | 0 | | n | f | 1539
+ s_charset_12 | 10 | 0 | | n | f | 1026
+ s_charset_13 | 10 | 0 | | n | f | 1026
+ s_charset_14 | 10 | 0 | | n | f | 1026
+ s_charset_16 | 10 | 0 | | n | f | 1538
+ s_charset_17 | 10 | 0 | | n | f | 1538
+ s_charset_18 | 10 | 0 | | n | f | 1538
+ s_charset_19 | 10 | 0 | | n | f | 1537
+ s_charset_2 | 10 | 0 | | n | f | 1537
+ s_charset_20 | 10 | 0 | | n | f | 1537
+ s_charset_21 | 10 | 0 | | n | f | 1537
+ s_charset_22 | 10 | 0 | | n | f | 1537
+ s_charset_3 | 10 | 0 | | n | f | 1538
+ s_charset_4 | 10 | 0 | | n | f | 1537
+ s_charset_5 | 10 | 0 | | n | f | 1537
+ s_charset_6 | 10 | 0 | | n | f |
+ s_charset_7 | 10 | 0 | | n | f | 1538
+ s_charset_8 | 10 | 0 | | n | f | 1026
+ s_charset_9 | 10 | 0 | | n | f | 1538
+(21 rows)
+
+-- relation level charset and collate
+create table t_charset_1 (c1 varchar(20)) charset utf8mb4 collate utf8mb4_unicode_ci;
+create table t_charset_2 (c1 varchar(20)) charset = utf8mb4 collate = utf8mb4_unicode_ci;
+create table t_charset_3 (c1 varchar(20)) charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_4 (c1 varchar(20)) charset = utf8mb4;
+create table t_charset_5 (c1 varchar(20)) charset = utf8;
+create table t_charset_6 (c1 varchar(20)) charset = gbk; -- error
+ERROR: default collation for encoding "GBK" does not exist
+create table t_charset_6 (c1 varchar(20));
+create table t_charset_7 (c1 varchar(20)) default charset = utf8mb4 default collate = utf8mb4_unicode_ci;
+create table t_charset_8 (c1 varchar(20)) charset = binary; -- error
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+create table t_charset_8 (c1 text) charset = binary;
+select pg_get_tabledef('t_charset_8');
+ pg_get_tabledef
+------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_charset_8 ( +
+ c1 blob +
+ ) +
+ CHARACTER SET = "SQL_ASCII" COLLATE = "binary"+
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table t_charset_9 (c1 varchar(20)) character set = utf8mb4;
+create table t_charset_10(c1 varchar(20)) collate = utf8mb4_general_ci;
+create table t_charset_11(c1 varchar(20)) collate = utf8mb4_bin;
+create table t_charset_12(c1 varchar(20)) collate = binary;
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+create table t_charset_12(c1 varchar(20)) default charset utf8mb4 default collate utf8mb4_unicode_ci;
+create table t_charset_13(c1 varchar(20)) collate = "binary";
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+create table t_charset_16(c1 varchar(20)) charset = gbk collate = utf8mb4_general_ci; -- error
+ERROR: collation "utf8mb4_general_ci" for encoding "GBK" does not exist
+create table t_charset_17(c1 varchar(20)) CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+create table t_charset_18(c1 varchar(20)) collate = utf8mb4_unicode_ci charset = utf8mb4;
+create table t_charset_19(c1 varchar(20)) collate = utf8mb4_unicode_ci collate = utf8mb4_general_ci;
+create table t_charset_20(c1 varchar(20)) charset = gbk charset = utf8mb4;
+create table t_charset_21(c1 varchar(20)) collate = utf8mb4_unicode_ci charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_22(c1 varchar(20)) collate = "zh_CN.gbk" charset = utf8mb4 collate = utf8mb4_general_ci;
+create table t_charset_23(like t_charset_22);
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='t_charset_23';
+ relname | reloptions | attcollation
+--------------+-----------------------------------------------+--------------
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 0
+ t_charset_23 | {orientation=row,compression=no,collate=1537} | 1537
+(8 rows)
+
+create table t_charset_24(c1 varchar(20) character set binary); -- error
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+create table t_charset_24(c1 varchar(20) character set "binary"); -- error
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+create table t_charset_25(c1 varchar(20)) with(collate = 7);
+ERROR: this collation only cannot be specified here
+create table t_charset_26(c1 varchar(20)) charset utf8mb4 collate "aa_DJ.utf8"; -- error
+ERROR: this collation only cannot be specified here
+create table t_charset_26(c1 varchar(20)) collate "aa_DJ"; -- error
+ERROR: there is more than one collation "aa_DJ" with the same name
+select relname, reloptions from pg_class where relname like 't_charset_%' order by 1;
+ relname | reloptions
+--------------+-----------------------------------------------
+ t_charset_0 | {orientation=row,compression=no}
+ t_charset_1 | {orientation=row,compression=no,collate=1538}
+ t_charset_10 | {orientation=row,compression=no,collate=1537}
+ t_charset_11 | {orientation=row,compression=no,collate=1539}
+ t_charset_12 | {orientation=row,compression=no,collate=1538}
+ t_charset_17 | {orientation=row,compression=no,collate=1538}
+ t_charset_18 | {orientation=row,compression=no,collate=1538}
+ t_charset_19 | {orientation=row,compression=no,collate=1537}
+ t_charset_2 | {orientation=row,compression=no,collate=1538}
+ t_charset_20 | {orientation=row,compression=no,collate=1537}
+ t_charset_21 | {orientation=row,compression=no,collate=1537}
+ t_charset_22 | {orientation=row,compression=no,collate=1537}
+ t_charset_23 | {orientation=row,compression=no,collate=1537}
+ t_charset_3 | {orientation=row,compression=no,collate=1537}
+ t_charset_4 | {orientation=row,compression=no,collate=1537}
+ t_charset_5 | {orientation=row,compression=no,collate=1537}
+ t_charset_6 | {orientation=row,compression=no,collate=1537}
+ t_charset_7 | {orientation=row,compression=no,collate=1538}
+ t_charset_8 | {orientation=row,compression=no,collate=1026}
+ t_charset_9 | {orientation=row,compression=no,collate=1537}
+(20 rows)
+
+alter table t_charset_1 convert to charset binary; -- error
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+alter table t_charset_1 convert to charset utf8mb4;
+alter table t_charset_1 convert to character set utf8mb4;
+alter table t_charset_1 convert to character set utf8mb4 collate utf8mb4_general_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+ relname | reloptions
+-------------+-----------------------------------------------
+ t_charset_1 | {orientation=row,compression=no,collate=1537}
+(1 row)
+
+alter table t_charset_1 convert to character set default collate utf8mb4_unicode_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+ relname | reloptions
+-------------+-----------------------------------------------
+ t_charset_1 | {orientation=row,compression=no,collate=1538}
+(1 row)
+
+alter table t_charset_1 charset utf8mb4;
+alter table t_charset_1 character set utf8mb4;
+alter table t_charset_1 character set utf8mb4 collate utf8mb4_bin;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+ relname | reloptions
+-------------+-----------------------------------------------
+ t_charset_1 | {orientation=row,compression=no,collate=1539}
+(1 row)
+
+alter table t_charset_1 collate utf8mb4_unicode_ci;
+select relname, reloptions from pg_class where relname = 't_charset_1';
+ relname | reloptions
+-------------+-----------------------------------------------
+ t_charset_1 | {orientation=row,compression=no,collate=1538}
+(1 row)
+
+alter table t_charset_1 change c1 c2 varchar(30) charset gbk collate utf8mb4_bin; -- error
+ERROR: collation "utf8mb4_bin" for encoding "GBK" does not exist
+alter table t_charset_1 change c1 c2 varchar(30) charset utf8mb4 collate utf8mb4_bin;
+select pg_get_tabledef('t_charset_1');
+ pg_get_tabledef
+-----------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_charset_1 ( +
+ c2 character varying(30) CHARACTER SET "UTF8" COLLATE utf8mb4_bin+
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_unicode_ci" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+-- attribute level charset and collate
+create table a_charset_1 (
+a1 varchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a2 varchar(20) charset utf8mb4 collate utf8mb4_unicode_ci,
+a3 varchar(20) charset utf8mb4 collate utf8mb4_bin
+)
+charset utf8mb4 collate utf8mb4_general_ci;
+insert into a_charset_1 values('中国','中国','中国');
+insert into a_charset_1 select a2,a1 from a_charset_1;
+select *,rawtohex(a1),rawtohex(a2),length(a1),length(a2),length(a3),lengthb(a1),lengthb(a2),lengthb(a3) from a_charset_1;
+ a1 | a2 | a3 | rawtohex | rawtohex | length | length | length | lengthb | lengthb | lengthb
+------+------+------+--------------+--------------+--------+--------+--------+---------+---------+---------
+ 中国 | 中国 | 中国 | e4b8ade59bbd | e4b8ade59bbd | 2 | 2 | 2 | 6 | 6 | 6
+ 中国 | 中国 | | e4b8ade59bbd | e4b8ade59bbd | 2 | 2 | | 6 | 6 |
+(2 rows)
+
+alter table a_charset_1 convert to charset gbk collate "zh_CN.gbk";
+ERROR: this collation only cannot be specified here
+select rawtohex(a1),rawtohex(a2),rawtohex(a3) from a_charset_1;
+ rawtohex | rawtohex | rawtohex
+--------------+--------------+--------------
+ e4b8ade59bbd | e4b8ade59bbd | e4b8ade59bbd
+ e4b8ade59bbd | e4b8ade59bbd |
+(2 rows)
+
+alter table a_charset_1 convert to charset utf8mb4;
+select rawtohex(a1),rawtohex(a2),rawtohex(a3) from a_charset_1;
+ rawtohex | rawtohex | rawtohex
+--------------+--------------+--------------
+ e4b8ade59bbd | e4b8ade59bbd | e4b8ade59bbd
+ e4b8ade59bbd | e4b8ade59bbd |
+(2 rows)
+
+create table a_charset_2(
+a1 character(20) charset utf8mb4 collate utf8mb4_general_ci,
+a2 char(20) charset utf8mb4 collate utf8mb4_general_ci,
+a3 nchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a4 varchar(20) charset utf8mb4 collate utf8mb4_general_ci,
+a5 character varying(20) charset utf8mb4 collate utf8mb4_general_ci,
+a6 varchar2(20) charset utf8mb4 collate utf8mb4_general_ci,
+a7 nvarchar2(20) charset utf8mb4 collate utf8mb4_general_ci,
+a8 text,
+a9 blob
+) charset binary;
+alter table a_charset_2 add a8 varchar(20) charset utf8mb4;
+ERROR: column "a8" of relation "a_charset_2" already exists
+alter table a_charset_2 add a9 varchar(20) character set utf8mb4;
+ERROR: column "a9" of relation "a_charset_2" already exists
+alter table a_charset_2 add a10 varchar(20) character set utf8mb4 collate utf8mb4_unicode_ci;
+alter table a_charset_2 add a11 varchar(20) collate utf8mb4_bin;
+alter table a_charset_2 add a12 varchar(20);
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+alter table a_charset_2 add a13 int;
+alter table a_charset_2 add a14 varchar(20) charset utf8mb4 collate "aa_DJ.utf8";
+alter table a_charset_2 add a15 varchar(20) collate "aa_DJ.utf8";
+alter table a_charset_2 add a16 varchar(20) collate "aa_DJ";
+alter table a_charset_2 add a17 text charset utf8mb4 collate utf8mb4_general_ci;
+alter table a_charset_2 add a18 clob charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type clob not support set charset
+alter table a_charset_2 add a19 name charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type name not support set charset
+alter table a_charset_2 add a20 "char" charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type "char" not support set charset
+alter table a_charset_2 add a21 BLOB charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type blob not support set charset
+alter table a_charset_2 add a22 RAW charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type raw not support set charset
+alter table a_charset_2 add a23 BYTEA charset utf8mb4 collate utf8mb4_general_ci; -- error
+ERROR: type bytea not support set charset
+alter table a_charset_2 add a24 varchar(20) collate "zh_CN.gbk"; -- error;
+ERROR: difference between the charset and the database encoding has not supported
+select pg_get_tabledef('a_charset_2');
+ pg_get_tabledef
+--------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE a_charset_2 ( +
+ a1 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a2 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a3 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a4 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a5 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a6 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a7 nvarchar2(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a8 blob, +
+ a9 blob, +
+ a10 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_unicode_ci,+
+ a11 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_bin, +
+ a13 integer, +
+ a14 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a15 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a16 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ", +
+ a17 text CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci +
+ ) +
+ CHARACTER SET = "SQL_ASCII" COLLATE = "binary" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+alter table a_charset_2 add a8 varchar(20) charset utf8mb4 charset utf8mb4; -- error
+ERROR: syntax error at or near "charset"
+LINE 1: ...le a_charset_2 add a8 varchar(20) charset utf8mb4 charset ut...
+ ^
+alter table a_charset_2 modify a1 int;
+alter table a_charset_2 modify a9 varchar(20) character set gbk; -- error
+ERROR: default collation for encoding "GBK" does not exist
+alter table a_charset_2 modify a10 varchar(20);
+ERROR: Un-support feature
+DETAIL: type varchar cannot be set to binary collation currently
+alter table a_charset_2 modify a11 varchar(20) collate utf8mb4_unicode_ci;
+alter table a_charset_2 modify a12 varchar(20) charset utf8mb4;
+ERROR: column "a12" of relation "a_charset_2" does not exist
+select pg_get_tabledef('a_charset_2');
+ pg_get_tabledef
+--------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE a_charset_2 ( +
+ a1 integer, +
+ a2 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a3 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a4 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a5 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a6 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a7 nvarchar2(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a8 blob, +
+ a9 blob, +
+ a10 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_unicode_ci,+
+ a11 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_unicode_ci,+
+ a13 integer, +
+ a14 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a15 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a16 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ", +
+ a17 text CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci +
+ ) +
+ CHARACTER SET = "SQL_ASCII" COLLATE = "binary" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table a_charset_3(
+a1 varchar(20) collate "C",
+a2 varchar(20) collate "default",
+a3 varchar(20) collate "POSIX"
+);
+create table a_charset_4(a1 blob);
+-- divergence test
+\h create schema;
+Command: CREATE SCHEMA
+Description: define a new schema
+Syntax:
+CREATE SCHEMA [ IF NOT EXISTS ] schema_name
+ [ AUTHORIZATION user_name ] [WITH BLOCKCHAIN] [ schema_element [ ... ] ];
+CREATE SCHEMA schema_name
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ];
+NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
+
+\h alter schema;
+Command: ALTER SCHEMA
+Description: change the definition of a schema
+Syntax:
+ALTER SCHEMA schema_name
+ RENAME TO new_name;
+ALTER SCHEMA schema_name
+ OWNER TO new_owner;
+ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN;
+ALTER SCHEMA schema_name
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ];
+NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
+
+\h create table;
+Command: CREATE TABLE
+Description: define a new table
+Syntax:
+CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name
+ ( { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
+ | table_constraint
+ | LIKE source_table [ like_option [...] ] }
+ [, ... ])
+ [ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]
+ [ WITH ( {storage_parameter = value} [, ... ] ) ]
+ [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
+ [ COMPRESS | NOCOMPRESS ]
+ [ TABLESPACE tablespace_name ];
+
+where column_constraint can be:
+[ CONSTRAINT constraint_name ]
+{ NOT NULL |
+ NULL |
+ CHECK ( expression ) |
+ DEFAULT default_expr |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
+ AUTO_INCREMENT |
+ UNIQUE [KEY] index_parameters |
+ PRIMARY KEY index_parameters |
+ ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
+ REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
+ [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
+where table_constraint can be:
+[ CONSTRAINT [ constraint_name ] ]
+{ CHECK ( expression ) |
+ UNIQUE [ index_name ] [ USING method ] ( { { column_name | ( expression ) } [ ASC | DESC ] } [, ... ] ) index_parameters [ VISIBLE | INVISIBLE ] |
+ PRIMARY KEY [ USING method ] ( { column_name [ ASC | DESC ] } [, ... ] ) index_parameters [ VISIBLE | INVISIBLE ] |
+ PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
+ FOREIGN KEY [ index_name ] ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
+ [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+where compress_mode can be:
+{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
+where like_option can be:
+{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | DISTRIBUTION | ALL }
+where index_parameters can be:
+[ WITH ( {storage_parameter = value} [, ... ] ) ]
+[ USING INDEX TABLESPACE tablespace_name ]
+where range_distribution_rules can be:
+[ ( SLICE name VALUES LESS THAN (expression | MAXVALUE [, ... ]) [DATANODE datanode_name]
+ [, ... ] ) |
+ ( SLICE name START (expression) END (expression) EVERY (expression) [DATANODE datanode_name]
+ [, ... ] ) |
+ SLICE REFERENCES table_name
+]
+where list_distribution_rules can be:
+[ ( SLICE name VALUES (expression [, ... ]) [DATANODE datanode_name]
+ [, ... ] ) |
+ ( SLICE name VALUES (DEFAULT) [DATANODE datanode_name] ) |
+ SLICE REFERENCES table_name
+]
+
+NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ VISIBLE | INVISIBLE ]' is only avaliable in CENTRALIZED mode and B-format database!
+
+\h create table partition;
+Command: CREATE TABLE PARTITION
+Description: define a new table partition
+Syntax:
+CREATE TABLE [ IF NOT EXISTS ] partition_table_name
+( [
+ { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ COLLATE collation ] [ column_constraint [ ... ] ]
+ | table_constraint
+ | LIKE source_table [ like_option [...] ] }
+ [, ... ]
+] )
+ [ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ]
+ [ WITH ( {storage_parameter = value} [, ... ] ) ]
+ [ COMPRESS | NOCOMPRESS ]
+ [ TABLESPACE tablespace_name ]
+ [ DISTRIBUTE BY { REPLICATION | { [ HASH ] ( column_name ) } } ]
+ NOTICE: DISTRIBUTE BY is only avaliable in DISTRIBUTED mode!
+ [ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]
+ PARTITION BY {
+ {VALUES (partition_key)} |
+ {RANGE [ COLUMNS ] (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] [ PARTITIONS integer ] ( partition_less_than_item [, ... ] )} |
+ {RANGE [ COLUMNS ] (partition_key) [ INTERVAL ('interval_expr') [ STORE IN ( tablespace_name [, ...] ) ] ] [ PARTITIONS integer ] ( partition_start_end_item [, ... ] )} |
+ {{{LIST [ COLUMNS ]} | HASH | KEY} (partition_key) [ PARTITIONS integer ] (PARTITION partition_name [ VALUES [ IN ] (list_values_clause) ] opt_table_space ) }
+ } [ { ENABLE | DISABLE } ROW MOVEMENT ];
+
+NOTICE: [ COLUMNS ] is only available in B-format database!
+NOTICE: [ PARTITIONS integer ] in RANGE/LIST partition is only available in B-format database!
+NOTICE: [ IN ] is only available in B-format database!
+NOTICE: KEY is only available in B-format database!
+
+where column_constraint can be:
+[ CONSTRAINT constraint_name ]
+{ NOT NULL |
+ NULL |
+ CHECK ( expression ) |
+ DEFAULT default_expr |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
+ AUTO_INCREMENT |
+ UNIQUE [KEY] index_parameters |
+ PRIMARY KEY index_parameters |
+ REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
+ [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
+where table_constraint can be:
+[ CONSTRAINT [ constraint_name ] ]
+{ CHECK ( expression ) |
+ UNIQUE [ index_name ] [ USING method ] ( { column_name [ ASC | DESC ] } [, ... ] ) index_parameters |
+ PRIMARY KEY [ USING method ] ( { column_name [ ASC | DESC ] } [, ... ] ) index_parameters |
+ FOREIGN KEY [ index_name ] ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
+ [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+where index_parameters can be:
+[ WITH ( {storage_parameter = value} [, ... ] ) ]
+[ USING INDEX TABLESPACE tablespace_name ]
+where like_option can be:
+{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | DISTRIBUTION | ALL }
+where partition_less_than_item can be:
+PARTITION partition_name VALUES LESS THAN { ( { partition_value | MAXVALUE } [, ... ] ) | MAXVALUE } [TABLESPACE [=] tablespace_name]
+NOTICE: MAXVALUE without parentheses is only available in B-format database!
+where partition_start_end_item can be:
+PARTITION partition_name {
+ {START(partition_value) END (partition_value) EVERY (interval_value)} |
+ {START(partition_value) END ({partition_value | MAXVALUE})} |
+ {START(partition_value)} |
+ {END({partition_value | MAXVALUE})}
+} [TABLESPACE tablespace_name]
+
+NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
+
+\h create table subpartition;
+Command: CREATE TABLE SUBPARTITION
+Description: define a new table subpartition
+Syntax:
+CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name
+( { column_name data_type [ CHARACTER SET | CHARSET charset ]
+ [ COLLATE collation ] [ column_constraint [ ... ] ]
+ | table_constraint
+ | LIKE source_table [ like_option [...] ] }
+ [, ... ]
+)
+ [ AUTO_INCREMENT [ = ] value ]
+ [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ]
+ [ WITH ( {storage_parameter = value} [, ... ] ) ]
+ [ COMPRESS | NOCOMPRESS ]
+ [ TABLESPACE tablespace_name ]
+ PARTITION BY {RANGE [ COLUMNS ] | LIST [ COLUMNS ] | HASH | KEY} (partition_key) [ PARTITIONS integer ] SUBPARTITION BY {RANGE | LIST | HASH | KEY} (subpartition_key) [ SUBPARTITIONS integer ]
+ (
+ PARTITION partition_name1 [ VALUES LESS THAN { (val1 | MAXVALUE) | MAXVALUE } | VALUES [ IN ] (val1[, ...]) ] [ TABLESPACE [=] tablespace ]
+ (
+ { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE [=] tablespace ] } [, ...]
+ )
+ [, ...]
+ ) [ { ENABLE | DISABLE } ROW MOVEMENT ];
+NOTICE: [ COLUMNS ] is only available in B-format database!
+NOTICE: [ PARTITIONS integer ] in RANGE/LIST partition is only available in B-format database!
+NOTICE: [ IN ] is only available in B-format database!
+NOTICE: KEY is only available in B-format database!
+NOTICE: MAXVALUE without parentheses is only available in B-format database!
+
+where column_constraint can be:
+[ CONSTRAINT constraint_name ]
+{ NOT NULL |
+ NULL |
+ CHECK ( expression ) |
+ DEFAULT default_expr |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
+ AUTO_INCREMENT |
+ UNIQUE [KEY] index_parameters |
+ PRIMARY KEY index_parameters |
+ REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
+ [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
+where table_constraint can be:
+[ CONSTRAINT [ constraint_name ] ]
+{ CHECK ( expression ) |
+ UNIQUE [ index_name ] [ USING method ] ( { column_name [ ASC | DESC ] } [, ... ] ) index_parameters |
+ PRIMARY KEY [ USING method ] ( { column_name [ ASC | DESC ] } [, ... ] ) index_parameters |
+ FOREIGN KEY [ index_name ] ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
+ [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
+[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+where like_option can be:
+{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | ALL }
+where index_parameters can be:
+[ WITH ( {storage_parameter = value} [, ... ] ) ]
+[ USING INDEX TABLESPACE tablespace_name ]
+
+NOTICE: 'CREATE TABLE SUBPARTITION' is only available in CENTRALIZED mode!
+NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database!
+
+\h alter table;
+Command: ALTER TABLE
+Description: change the definition of a table
+Syntax:
+ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
+ action [, ... ];
+ALTER TABLE [ IF EXISTS ] table_name
+ ADD ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]} [, ...] );
+ALTER TABLE [ IF EXISTS ] table_name
+ MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] );
+ALTER TABLE [ IF EXISTS ] table_name
+ RENAME TO new_table_name;
+ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
+ RENAME [ COLUMN ] column_name TO new_column_name;
+ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )}
+ RENAME CONSTRAINT constraint_name TO new_constraint_name;
+ALTER TABLE [ IF EXISTS ] table_name
+ SET SCHEMA new_schema;
+
+where action can be:
+column_clause
+ | ADD table_constraint [ NOT VALID ]
+ | ADD table_constraint_using_index
+ | VALIDATE CONSTRAINT constraint_name
+ | DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ]
+ | CLUSTER ON index_name
+ | SET WITHOUT CLUSTER
+ | SET ( {storage_parameter = value} [, ... ] )
+ | RESET ( storage_parameter [, ... ] )
+ | OWNER TO new_owner
+ | SET TABLESPACE new_tablespace
+ | SET {COMPRESS|NOCOMPRESS}
+ | TO { GROUP groupname | NODE ( nodename [, ... ] ) }
+ | ADD NODE ( nodename [, ... ] )
+ | DELETE NODE ( nodename [, ... ] )
+ | UPDATE SLICE LIKE table_name
+ | DISABLE TRIGGER [ trigger_name | ALL | USER ]
+ | ENABLE TRIGGER [ trigger_name | ALL | USER ]
+ | ENABLE REPLICA TRIGGER trigger_name
+ | ENABLE ALWAYS TRIGGER trigger_name
+ | ENABLE ROW LEVEL SECURITY
+ | DISABLE ROW LEVEL SECURITY
+ | FORCE ROW LEVEL SECURITY
+ | NO FORCE ROW LEVEL SECURITY
+ | ENCRYPTION KEY ROTATION
+ | AUTO_INCREMENT [ = ] value
+ | ALTER INDEX index_name [ VISBLE | INVISIBLE ]
+ | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]
+ | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ]
+NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database!
+where column_clause can be:
+ADD [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] [ FIRST | AFTER column_name ]
+ | MODIFY column_name data_type
+ | MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ]
+ | MODIFY column_name [ CONSTRAINT constraint_name ] NULL
+ | MODIFY [ COLUMN ] column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
+ | CHANGE [ COLUMN ] column_name new_column_name data_type [ CHARACTER SET | CHARSET charset ] [ COLLATE collation ] [ column_constraint [ ... ] ] [FIRST | AFTER column_name]
+ | DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ]
+ | ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ]
+ | ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT }
+ | ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL
+ | ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer
+ | ADD STATISTICS (( column_1_name, column_2_name [, ...] ))
+ | DELETE STATISTICS (( column_1_name, column_2_name [, ...] ))
+ | ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] )
+ | ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] )
+ | ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
+NOTICE: 'MODIFY [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'CHANGE [ COLUMN ] ...' action is only available in CENTRALIZED mode and B-format database!
+where column_constraint can be:
+[ CONSTRAINT constraint_name ]
+ { NOT NULL |
+ NULL |
+ CHECK ( expression ) |
+ DEFAULT default_expr |
+ GENERATED ALWAYS AS ( generation_expr ) [STORED] |
+ AUTO_INCREMENT |
+ UNIQUE [KEY] index_parameters |
+ PRIMARY KEY index_parameters |
+ ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) |
+ REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
+ [ ON DELETE action ] [ ON UPDATE action ] }
+ [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+NOTICE: 'UNIQUE KEY' in table_constraint is only available in CENTRALIZED mode and B-format database!
+where compress_mode can be:
+{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }
+where table_constraint can be:
+[ CONSTRAINT [ constraint_name ] ]
+ { CHECK ( expression ) |
+ UNIQUE [ idx_name ] [ USING method ] ( { { column_name | ( expression ) } [ ASC | DESC ] } [, ... ] ) index_parameters [ VISIBLE | INVISIBLE ] |
+ PRIMARY KEY [ USING method ] ( { column_name [ ASC | DESC ] }[, ... ] ) index_parameters [ VISIBLE | INVISIBLE ] |
+ PARTIAL CLUSTER KEY ( column_name [, ... ] ) |
+ FOREIGN KEY [ idx_name ] ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
+ [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] }
+ [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+where index_parameters can be:
+[ WITH ( {storage_parameter = value} [, ... ] ) ]
+ [ USING INDEX TABLESPACE tablespace_name ]
+where table_constraint_using_index can be:
+[ CONSTRAINT constraint_name ]
+ { UNIQUE | PRIMARY KEY } USING INDEX index_name
+ [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
+
+NOTICE: '[ constraint_name ]' in table_constraint is optional in CENTRALIZED mode and B-format database, it is mandatory in other scenarios.
+NOTICE: '[ index_name ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ USING method ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ ASC | DESC ]' in table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: '( expression )' in 'UNIQUE' clause of table_constraint is only available in CENTRALIZED mode and B-format database!
+NOTICE: 'AUTO_INCREMENT' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ FIRST | AFTER column_name ]' clause is only available in CENTRALIZED mode!
+NOTICE: '[ FIRST | AFTER column_name ]' in 'MODIFY | CHANGE [ COLUMN ] ...' clause is only available in B-format database!
+NOTICE: '[ CHARACTER SET | CHARSET charset ]' is only available in CENTRALIZED mode and B-format database!
+NOTICE: '[ VISIBLE | INVISIBLE ]' is only avaliable in CENTRALIZED mode and B-format database!
+
+alter session set current_schema = s_charset_1;
+create table s_t_charset_1(s1 varchar(20));
+select pg_get_tabledef('s_t_charset_1');
+ pg_get_tabledef
+------------------------------------------------------------------------------
+ SET search_path = s_charset_1; +
+ CREATE TABLE s_t_charset_1 ( +
+ s1 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci+
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_general_ci" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table s_t_charset_2(s1 varchar(20));
+select pg_get_tabledef('s_t_charset_2');
+ pg_get_tabledef
+------------------------------------------------------------------------------
+ SET search_path = s_charset_1; +
+ CREATE TABLE s_t_charset_2 ( +
+ s1 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci+
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_general_ci" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table s_t_charset_3 (like s_t_charset_1);
+select pg_get_tabledef('s_t_charset_3');
+ pg_get_tabledef
+------------------------------------------------------------------------------
+ SET search_path = s_charset_1; +
+ CREATE TABLE s_t_charset_3 ( +
+ s1 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci+
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_general_ci" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table s_t_charset_4(s1 varchar(20) charset utf8mb4 collate "aa_DJ");
+create table s_t_charset_5(s1 varchar(20) collate "aa_DJ");
+create table s_t_charset_6(s1 int);
+alter table s_t_charset_6 charset binary;
+alter table s_t_charset_6 convert to charset default collate binary; -- error
+ERROR: collation "binary" for encoding "UTF8" does not exist
+alter table s_t_charset_6 convert to charset default collate utf8mb4_bin;
+select pg_get_tabledef('s_t_charset_6');
+ pg_get_tabledef
+------------------------------------------------
+ SET search_path = s_charset_1; +
+ CREATE TABLE s_t_charset_6 ( +
+ s1 integer +
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_bin"+
+ WITH (orientation=row, compression=no);
+(1 row)
+
+create table s_t_charset_7 as table s_t_charset_1;
+\d+ s_t_charset_7;
+ Table "s_charset_1.s_t_charset_7"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+----------------------------+----------+--------------+-------------
+ s1 | character varying(20) | collate utf8mb4_general_ci | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, collate=1537
+
+create table s_t_charset_8 as select '123';
+\d+ s_t_charset_8;
+ Table "s_charset_1.s_t_charset_8"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+------+-----------+----------+--------------+-------------
+ ?column? | text | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no, collate=1537
+
+alter session set current_schema = s_charset_12;
+create table s_t_charset_9(s1 varchar(20) charset utf8mb4);
+alter table s_t_charset_9 convert to charset default collate utf8mb4_bin; -- error
+ERROR: collation "utf8mb4_bin" for encoding "SQL_ASCII" does not exist
+alter session set current_schema = s_charset_1;
+-- partition table
+create table p_charset_1(c1 varchar(20),c2 varchar(20),c3 int)
+character set = utf8mb4 collate = utf8mb4_general_ci
+partition by hash(c1)
+(
+partition p1,
+partition p2
+);
+select * from pg_get_tabledef('p_charset_1');
+ pg_get_tabledef
+-------------------------------------------------------------------------------
+ SET search_path = s_charset_1; +
+ CREATE TABLE p_charset_1 ( +
+ c1 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci,+
+ c2 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci,+
+ c3 integer +
+ ) +
+ CHARACTER SET = "UTF8" COLLATE = "utf8mb4_general_ci" +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (c1) +
+ ( +
+ PARTITION p1 TABLESPACE pg_default, +
+ PARTITION p2 TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+alter table p_charset_1 convert to character set utf8mb4;
+alter table p_charset_1 collate utf8mb4_unicode_ci;
+insert into p_charset_1 values('a中国a');
+select * from p_charset_1;
+ c1 | c2 | c3
+--------+----+----
+ a中国a | |
+(1 row)
+
+\d+ p_charset_1;
+ Table "s_charset_1.p_charset_1"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+-----------------------+----------------------------+----------+--------------+-------------
+ c1 | character varying(20) | collate utf8mb4_general_ci | extended | |
+ c2 | character varying(20) | collate utf8mb4_general_ci | extended | |
+ c3 | integer | | plain | |
+Partition By HASH(c1)
+Number of partitions: 2 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no, collate=1538
+
+-- temporary table
+create temporary table tem_charset_1(c1 varchar(20),c2 varchar(20),c3 int) character set = utf8mb4;
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='tem_charset_1';
+ relname | reloptions | attcollation
+---------------+-----------------------------------------------+--------------
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 1537
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 1537
+ tem_charset_1 | {orientation=row,compression=no,collate=1537} | 0
+(10 rows)
+
+alter table tem_charset_1 convert to character set utf8mb4;
+alter table tem_charset_1 collate utf8mb4_unicode_ci;
+insert into tem_charset_1 values('a中国a');
+select r.relname,r.reloptions,a.attcollation from pg_class r,pg_attribute a where r.oid=a.attrelid and r.relname='tem_charset_1';
+ relname | reloptions | attcollation
+---------------+-----------------------------------------------+--------------
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 1537
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 1537
+ tem_charset_1 | {orientation=row,compression=no,collate=1538} | 0
+(10 rows)
+
+\! @abs_bindir@/gs_dump d_charset -p @portstring@ -f @abs_bindir@/d_charset.tar -F t >/dev/null 2>&1; echo $?
+0
+\! @abs_bindir@/gs_restore -d d_charset_bak -p @portstring@ @abs_bindir@/d_charset.tar >/dev/null 2>&1; echo $?
+0
+\c d_charset_bak;
+select * from pg_namespace where nspname like 's_charset_%' order by 1;
+ nspname | nspowner | nsptimeline | nspacl | in_redistribution | nspblockchain | nspcollation
+--------------+----------+-------------+--------+-------------------+---------------+--------------
+ s_charset_1 | 10 | 0 | | n | f | 1537
+ s_charset_10 | 10 | 0 | | n | f | 1537
+ s_charset_11 | 10 | 0 | | n | f | 1539
+ s_charset_12 | 10 | 0 | | n | f | 1026
+ s_charset_13 | 10 | 0 | | n | f | 1026
+ s_charset_14 | 10 | 0 | | n | f | 1026
+ s_charset_16 | 10 | 0 | | n | f | 1538
+ s_charset_17 | 10 | 0 | | n | f | 1538
+ s_charset_18 | 10 | 0 | | n | f | 1538
+ s_charset_19 | 10 | 0 | | n | f | 1537
+ s_charset_2 | 10 | 0 | | n | f | 1537
+ s_charset_20 | 10 | 0 | | n | f | 1537
+ s_charset_21 | 10 | 0 | | n | f | 1537
+ s_charset_22 | 10 | 0 | | n | f | 1537
+ s_charset_3 | 10 | 0 | | n | f | 1538
+ s_charset_4 | 10 | 0 | | n | f | 1537
+ s_charset_5 | 10 | 0 | | n | f | 1537
+ s_charset_6 | 10 | 0 | | n | f |
+ s_charset_7 | 10 | 0 | | n | f | 1538
+ s_charset_8 | 10 | 0 | | n | f | 1026
+ s_charset_9 | 10 | 0 | | n | f | 1538
+(21 rows)
+
+select relname, reloptions from pg_class where relname like 't_charset_%' order by 1;
+ relname | reloptions
+--------------+-----------------------------------------------
+ t_charset_0 | {orientation=row,compression=no}
+ t_charset_1 | {orientation=row,compression=no,collate=1538}
+ t_charset_10 | {orientation=row,compression=no,collate=1537}
+ t_charset_11 | {orientation=row,compression=no,collate=1539}
+ t_charset_12 | {orientation=row,compression=no,collate=1538}
+ t_charset_17 | {orientation=row,compression=no,collate=1538}
+ t_charset_18 | {orientation=row,compression=no,collate=1538}
+ t_charset_19 | {orientation=row,compression=no,collate=1537}
+ t_charset_2 | {orientation=row,compression=no,collate=1538}
+ t_charset_20 | {orientation=row,compression=no,collate=1537}
+ t_charset_21 | {orientation=row,compression=no,collate=1537}
+ t_charset_22 | {orientation=row,compression=no,collate=1537}
+ t_charset_23 | {orientation=row,compression=no,collate=1537}
+ t_charset_3 | {orientation=row,compression=no,collate=1537}
+ t_charset_4 | {orientation=row,compression=no,collate=1537}
+ t_charset_5 | {orientation=row,compression=no,collate=1537}
+ t_charset_6 | {orientation=row,compression=no,collate=1537}
+ t_charset_7 | {orientation=row,compression=no,collate=1538}
+ t_charset_8 | {orientation=row,compression=no,collate=1026}
+ t_charset_9 | {orientation=row,compression=no,collate=1537}
+(20 rows)
+
+select *,rawtohex(a1),rawtohex(a2),length(a1),length(a2),lengthb(a1),lengthb(a2) from a_charset_1;
+ a1 | a2 | a3 | rawtohex | rawtohex | length | length | lengthb | lengthb
+------+------+------+--------------+--------------+--------+--------+---------+---------
+ 中国 | 中国 | 中国 | e4b8ade59bbd | e4b8ade59bbd | 2 | 2 | 6 | 6
+ 中国 | 中国 | | e4b8ade59bbd | e4b8ade59bbd | 2 | 2 | 6 | 6
+(2 rows)
+
+select pg_get_tabledef('a_charset_2');
+ pg_get_tabledef
+--------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE a_charset_2 ( +
+ a1 integer, +
+ a2 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a3 character(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a4 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a5 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a6 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a7 nvarchar2(20) CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci, +
+ a8 blob, +
+ a9 blob, +
+ a10 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_unicode_ci,+
+ a11 character varying(20) CHARACTER SET "UTF8" COLLATE utf8mb4_unicode_ci,+
+ a13 integer, +
+ a14 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a15 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ.utf8", +
+ a16 character varying(20) CHARACTER SET "UTF8" COLLATE "aa_DJ", +
+ a17 text CHARACTER SET "UTF8" COLLATE utf8mb4_general_ci +
+ ) +
+ CHARACTER SET = "SQL_ASCII" COLLATE = "binary" +
+ WITH (orientation=row, compression=no);
+(1 row)
+
+set b_format_behavior_compat_options = '';
+\c regression
+drop database if exists d_charset_bak;
+drop database if exists d_charset;
diff --git a/src/test/regress/output/dump_auto_increment.source b/src/test/regress/output/dump_auto_increment.source
index d31afbaf2..f986bbdc6 100644
--- a/src/test/regress/output/dump_auto_increment.source
+++ b/src/test/regress/output/dump_auto_increment.source
@@ -30,11 +30,6 @@ NOTICE: CREATE TABLE will create implicit sequence "test_dump_autoinc_unlog_col
NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_dump_autoinc_unlog_col1_key" for table "test_dump_autoinc_unlog"
INSERT INTO test_dump_autoinc_unlog VALUES(0,0);
INSERT INTO test_dump_autoinc_unlog VALUES(0,0);
-CREATE TABLE test_dump_autoinc_pk_gencol(col1 int auto_increment PRIMARY KEY, col2 int generated always as(2*col1) stored) AUTO_INCREMENT = 1000000;
-NOTICE: CREATE TABLE will create implicit sequence "test_dump_autoinc_pk_gencol_col1_seq" for serial column "test_dump_autoinc_pk_gencol.col1"
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_dump_autoinc_pk_gencol_pkey" for table "test_dump_autoinc_pk_gencol"
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
CREATE TABLE test_dump_autoinc_range_list
(
col_1 int auto_increment primary key,
@@ -195,26 +190,6 @@ select col1,col2 from test_dump_autoinc_unlog order by 1,2;
100002 | 0
(3 rows)
-\d+ test_dump_autoinc_pk_gencol
- Table "public.test_dump_autoinc_pk_gencol"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+---------+-----------------------------------------+---------+--------------+-------------
- col1 | integer | not null AUTO_INCREMENT | plain | |
- col2 | integer | generated always as ((2 * col1)) stored | plain | |
-Indexes:
- "test_dump_autoinc_pk_gencol_pkey" PRIMARY KEY, btree (col1) TABLESPACE pg_default
-Has OIDs: no
-Options: orientation=row, compression=no
-
-INSERT INTO test_dump_autoinc_pk_gencol VALUES(0);
-select col1,col2 from test_dump_autoinc_pk_gencol order by 1,2;
- col1 | col2
----------+---------
- 1000000 | 2000000
- 1000001 | 2000002
- 1000002 | 0
-(3 rows)
-
\d+ test_dump_autoinc_range_list
Table "public.test_dump_autoinc_range_list"
Column | Type | Modifiers | Storage | Stats target | Description
diff --git a/src/test/regress/output/dump_partition_b_db.source b/src/test/regress/output/dump_partition_b_db.source
new file mode 100644
index 000000000..d045df184
--- /dev/null
+++ b/src/test/regress/output/dump_partition_b_db.source
@@ -0,0 +1,275 @@
+drop database if exists dump_partition_db;
+NOTICE: database "dump_partition_db" does not exist, skipping
+drop database if exists restore_partition_db;
+NOTICE: database "restore_partition_db" does not exist, skipping
+create database dump_partition_db with dbcompatibility = 'B';
+create database restore_partition_db with dbcompatibility = 'B';
+\c dump_partition_db
+CREATE TABLE t_single_key_list (a int, b int, c int)
+PARTITION BY list(a)
+(
+ PARTITION p1 VALUES (100),
+ PARTITION p2 VALUES (200),
+ PARTITION p3 VALUES (300),
+ PARTITION p4 VALUES (400)
+);
+SELECT pg_get_tabledef('t_single_key_list'::regclass);
+ pg_get_tabledef
+------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_single_key_list ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a) +
+ ( +
+ PARTITION p1 VALUES (100) TABLESPACE pg_default,+
+ PARTITION p2 VALUES (200) TABLESPACE pg_default,+
+ PARTITION p3 VALUES (300) TABLESPACE pg_default,+
+ PARTITION p4 VALUES (400) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+CREATE TABLE t_multi_keys_list_null (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (NULL,NULL) )
+);
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+ pg_get_tabledef
+--------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_multi_keys_list_null ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b) +
+ ( +
+ PARTITION p2 VALUES ((0,1),(0,2),(0,3),(1,1),(1,2)) TABLESPACE pg_default,+
+ PARTITION p1 VALUES ((0,NULL)) TABLESPACE pg_default, +
+ PARTITION p4 VALUES ((3,2),(NULL,NULL)) TABLESPACE pg_default, +
+ PARTITION p3 VALUES ((NULL,0),(2,1)) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+CREATE TABLE t_multi_keys_list (a varchar(8), b int, c DATE, d int DEFAULT 0)
+PARTITION BY LIST COLUMNS(a,b,c)
+(
+ PARTITION p1 VALUES IN ( ('0',0,'2022-12-31')),
+ PARTITION p2 VALUES IN ( ('{',1,'2022-12-31'), ('''',2,'2022-12-31'), ('0',3,'2022-12-31'), (',',1,'2022-12-31'), (NULL,2,'2022-12-31') ),
+ PARTITION p3 VALUES IN ( ('NULL',0,'2022-12-31'), ('}',1,'2022-12-31') ),
+ PARTITION p4 VALUES IN ( ('{',2,'2022-12-31'), ('3',3,'2022-12-31') ),
+ PARTITION pd VALUES IN (DEFAULT)
+);
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_multi_keys_list ( +
+ a character varying(8), +
+ b integer, +
+ c date, +
+ d integer DEFAULT 0 +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b, c) +
+ ( +
+ PARTITION p1 VALUES (('0',0,'2022-12-31')) TABLESPACE pg_default, +
+ PARTITION p2 VALUES (('{',1,'2022-12-31'),('''',2,'2022-12-31'),('0',3,'2022-12-31'),(',',1,'2022-12-31'),(NULL,2,'2022-12-31')) TABLESPACE pg_default,+
+ PARTITION p4 VALUES (('{',2,'2022-12-31'),('3',3,'2022-12-31')) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default, +
+ PARTITION p3 VALUES (('NULL',0,'2022-12-31'),('}',1,'2022-12-31')) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+INSERT INTO t_multi_keys_list VALUES('{',1,'2022-12-31');
+INSERT INTO t_multi_keys_list VALUES(',',1,'2022-12-31');
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY a,b,c;
+ a | b | c | d
+---+---+------------+---
+ , | 1 | 12-31-2022 | 0
+ { | 1 | 12-31-2022 | 0
+(2 rows)
+
+CREATE TABLE t_part_by_key_num (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 5 SUBPARTITION BY KEY(c) SUBPARTITIONS 3;
+CREATE TABLE t_multi_keys_list_tmtz (a DATE, b timestamp with time zone, c int, d int DEFAULT 0)
+PARTITION BY LIST (a,b,c)
+(
+ PARTITION p1 VALUES ( ('2022-01-01','2022-01-01 12:00:00 pst',1)),
+ PARTITION p2 VALUES ( ('2022-02-01','2022-02-01 12:00:00 pst',2), ('2022-02-02','2022-02-02 12:00:00 pst',2), ('2022-02-03','2022-02-03 12:00:00 pst',2)),
+ PARTITION p3 VALUES ( ('2022-03-01','2022-03-01 12:00:00 pst',3), ('2022-03-02','2022-03-02 12:00:00 pst',3) ),
+ PARTITION pd VALUES (DEFAULT)
+);
+SELECT pg_get_tabledef('t_multi_keys_list_tmtz'::regclass);
+ pg_get_tabledef
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_multi_keys_list_tmtz ( +
+ a date, +
+ b timestamp with time zone, +
+ c integer, +
+ d integer DEFAULT 0 +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b, c) +
+ ( +
+ PARTITION p1 VALUES (('2022-01-01','2022-01-01 12:00:00 pst',1)) TABLESPACE pg_default, +
+ PARTITION p2 VALUES (('2022-02-01','2022-02-01 12:00:00 pst',2),('2022-02-02','2022-02-02 12:00:00 pst',2),('2022-02-03','2022-02-03 12:00:00 pst',2)) TABLESPACE pg_default,+
+ PARTITION p3 VALUES (('2022-03-01','2022-03-01 12:00:00 pst',3),('2022-03-02','2022-03-02 12:00:00 pst',3)) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-01-01','2022-01-01 12:00:00 pst',1);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-01','2022-02-01 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-02','2022-02-02 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-02-03','2022-02-03 12:00:00 pst',2);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-03-01','2022-03-01 12:00:00 pst',3);
+INSERT INTO t_multi_keys_list_tmtz VALUES('2022-03-02','2022-03-02 12:00:00 pst',3);
+SELECT * FROM t_multi_keys_list_tmtz PARTITION(p2) ORDER BY a,b,c;
+ a | b | c | d
+------------+------------------------------+---+---
+ 02-01-2022 | Tue Feb 01 12:00:00 2022 PST | 2 | 0
+ 02-02-2022 | Wed Feb 02 12:00:00 2022 PST | 2 | 0
+ 02-03-2022 | Thu Feb 03 12:00:00 2022 PST | 2 | 0
+(3 rows)
+
+\! @abs_bindir@/gs_dump dump_partition_db -p @portstring@ -f @abs_bindir@/dump_listpart_test.tar -F t >/dev/null 2>&1; echo $?
+0
+\! @abs_bindir@/gs_restore -d restore_partition_db -p @portstring@ @abs_bindir@/dump_listpart_test.tar >/dev/null 2>&1; echo $?
+0
+\c restore_partition_db
+\d+ t_multi_keys_list
+ Table "public.t_multi_keys_list"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ a | character varying(8) | | extended | |
+ b | integer | | plain | |
+ c | date | | plain | |
+ d | integer | default 0 | plain | |
+Partition By LIST(a, b, c)
+Number of partitions: 5 (View pg_partition to check each partition range.)
+Has OIDs: no
+Options: orientation=row, compression=no
+
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+ pg_get_tabledef
+-------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_multi_keys_list ( +
+ a character varying(8), +
+ b integer, +
+ c date, +
+ d integer DEFAULT 0 +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b, c) +
+ ( +
+ PARTITION p1 VALUES (('0',0,'2022-12-31')) TABLESPACE pg_default, +
+ PARTITION p2 VALUES (('{',1,'2022-12-31'),('''',2,'2022-12-31'),('0',3,'2022-12-31'),(',',1,'2022-12-31'),(NULL,2,'2022-12-31')) TABLESPACE pg_default,+
+ PARTITION p4 VALUES (('{',2,'2022-12-31'),('3',3,'2022-12-31')) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default, +
+ PARTITION p3 VALUES (('NULL',0,'2022-12-31'),('}',1,'2022-12-31')) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+INSERT INTO t_multi_keys_list VALUES('''',2,'2022-12-31');
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY b,a,c;
+ a | b | c | d
+---+---+------------+---
+ , | 1 | 12-31-2022 | 0
+ { | 1 | 12-31-2022 | 0
+ ' | 2 | 12-31-2022 | 0
+(3 rows)
+
+SELECT pg_get_tabledef('t_part_by_key_num'::regclass);
+ pg_get_tabledef
+---------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_part_by_key_num ( +
+ a integer, +
+ b integer, +
+ c integer +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY HASH (a) SUBPARTITION BY HASH (c) +
+ ( +
+ PARTITION p0 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p0sp0 TABLESPACE pg_default,+
+ SUBPARTITION p0sp1 TABLESPACE pg_default,+
+ SUBPARTITION p0sp2 TABLESPACE pg_default +
+ ), +
+ PARTITION p1 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p1sp0 TABLESPACE pg_default,+
+ SUBPARTITION p1sp1 TABLESPACE pg_default,+
+ SUBPARTITION p1sp2 TABLESPACE pg_default +
+ ), +
+ PARTITION p2 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p2sp0 TABLESPACE pg_default,+
+ SUBPARTITION p2sp1 TABLESPACE pg_default,+
+ SUBPARTITION p2sp2 TABLESPACE pg_default +
+ ), +
+ PARTITION p3 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p3sp0 TABLESPACE pg_default,+
+ SUBPARTITION p3sp1 TABLESPACE pg_default,+
+ SUBPARTITION p3sp2 TABLESPACE pg_default +
+ ), +
+ PARTITION p4 TABLESPACE pg_default +
+ ( +
+ SUBPARTITION p4sp0 TABLESPACE pg_default,+
+ SUBPARTITION p4sp1 TABLESPACE pg_default,+
+ SUBPARTITION p4sp2 TABLESPACE pg_default +
+ ) +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+SELECT pg_get_tabledef('t_multi_keys_list_tmtz'::regclass);
+ pg_get_tabledef
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SET search_path = public; +
+ CREATE TABLE t_multi_keys_list_tmtz ( +
+ a date, +
+ b timestamp with time zone, +
+ c integer, +
+ d integer DEFAULT 0 +
+ ) +
+ WITH (orientation=row, compression=no) +
+ PARTITION BY LIST (a, b, c) +
+ ( +
+ PARTITION p1 VALUES (('2022-01-01','2022-01-01 12:00:00 pst',1)) TABLESPACE pg_default, +
+ PARTITION p2 VALUES (('2022-02-01','2022-02-01 12:00:00 pst',2),('2022-02-02','2022-02-02 12:00:00 pst',2),('2022-02-03','2022-02-03 12:00:00 pst',2)) TABLESPACE pg_default,+
+ PARTITION p3 VALUES (('2022-03-01','2022-03-01 12:00:00 pst',3),('2022-03-02','2022-03-02 12:00:00 pst',3)) TABLESPACE pg_default, +
+ PARTITION pd VALUES (DEFAULT) TABLESPACE pg_default +
+ ) +
+ ENABLE ROW MOVEMENT;
+(1 row)
+
+SELECT * FROM t_multi_keys_list_tmtz PARTITION(p2) ORDER BY a,b,c;
+ a | b | c | d
+------------+------------------------------+---+---
+ 02-01-2022 | Tue Feb 01 12:00:00 2022 PST | 2 | 0
+ 02-02-2022 | Wed Feb 02 12:00:00 2022 PST | 2 | 0
+ 02-03-2022 | Thu Feb 03 12:00:00 2022 PST | 2 | 0
+(3 rows)
+
+\c regression
+drop database if exists restore_partition_db;
+drop database if exists dump_partition_db;
diff --git a/src/test/regress/output/event_dump_audit.source b/src/test/regress/output/event_dump_audit.source
new file mode 100644
index 000000000..fd960c507
--- /dev/null
+++ b/src/test/regress/output/event_dump_audit.source
@@ -0,0 +1,68 @@
+--audit test
+drop database if exists event_audit_b;
+NOTICE: database "event_audit_b" does not exist, skipping
+create database event_audit_b with dbcompatibility 'b';
+\c event_audit_b
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=268435455" > /dev/null 2>&1
+\! sleep 1s
+drop event if exists e;
+NOTICE: event "e" is not exists, skipping
+show audit_system_object;
+ audit_system_object
+---------------------
+ 268435455
+(1 row)
+
+create event e on schedule at '3000-01-01 00:00:00' disable do select 1;
+select pg_sleep(10);
+ pg_sleep
+----------
+
+(1 row)
+
+select detail_info from pg_query_audit(trunc((localtimestamp - interval '1' minute), 'second'), trunc(localtimestamp, 'second'))
+where database = 'event_audit_b' AND type='ddl_event';
+ detail_info
+--------------------------------------------------------------------------
+ drop event if exists e;
+ create event e on schedule at '3000-01-01 00:00:00' disable do select 1;
+(2 rows)
+
+drop event if exists e;
+\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+\c regression
+drop database if exists event_audit_b;
+--gs_dump
+drop database if exists dump_ev;
+NOTICE: database "dump_ev" does not exist, skipping
+create database dump_ev with dbcompatibility 'b';
+\c dump_ev
+create user event_dump_a with sysadmin password 'Test_event1';
+set role event_dump_a password 'Test_event1';
+create event e on schedule at '3000-01-01 00:00:00' disable do select 1;
+create event public.ea on schedule at '3000-01-01 00:00:00' disable do select 1;
+select job_name from pg_job where dbname='dump_ev';
+ job_name
+----------
+ e
+ ea
+(2 rows)
+
+\c dump_ev
+\! @abs_bindir@/gs_dump dump_ev -p @portstring@ -f @abs_bindir@/dump_ev.tar -n public -F t >/dev/null 2>&1; echo $?
+0
+drop database if exists restore_event_dump_db;
+NOTICE: database "restore_event_dump_db" does not exist, skipping
+create database restore_event_dump_db with dbcompatibility 'b';
+\! @abs_bindir@/gs_restore -d restore_event_dump_db -p @portstring@ @abs_bindir@/dump_ev.tar >/dev/null 2>&1; echo $?
+0
+\c restore_event_dump_db
+select job_name, job_status,failure_msg from pg_job where dbname='restore_event_dump_db';
+ job_name | job_status | failure_msg
+----------+------------+-------------
+--?.*
+(1 row)
+
+\c regression
+drop database if exists dump_ev;
+drop database if exists restore_event_dump_db;
diff --git a/src/test/regress/output/gs_global_config_audit.source b/src/test/regress/output/gs_global_config_audit.source
index 60bb738cb..128fd0d2b 100644
--- a/src/test/regress/output/gs_global_config_audit.source
+++ b/src/test/regress/output/gs_global_config_audit.source
@@ -1,36 +1,29 @@
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_enabled=on" > /dev/null 2>&1
\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=33554431" > /dev/null 2>&1
-SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig';
- object_name | detail_info
--------------+-------------
-(0 rows)
-
-select * from gs_global_config;
- name | value
--------------+-------
- buckets_len | 16384
+SELECT * FROM pg_delete_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00');
+ pg_delete_audit
+-----------------
+
(1 row)
ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2);
ALTER GLOBAL CONFIGURATION with(last_catchup_threshold=5000);
-select * from gs_global_config;
+select * from gs_global_config where name like '%lockwait%' or name like '%last_catchup_threshold%';
name | value
------------------------+-------
- buckets_len | 16384
lockwait_timeout | 2000
lockwait_interval | 2
last_catchup_threshold | 5000
-(4 rows)
+(3 rows)
DROP GLOBAL CONFIGURATION lockwait_timeout;
DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval;
-select * from gs_global_config;
- name | value
--------------+-------
- buckets_len | 16384
-(1 row)
+select * from gs_global_config where name like '%lockwait%' or name like '%last_catchup_threshold%';
+ name | value
+------+-------
+(0 rows)
-SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig';
+SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig' and (detail_info like '%lockwait%' or detail_info like '%last_catchup_threshold%');
object_name | detail_info
------------------------+------------------------------------------------------------------------------
lockwait_timeout | ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2);
diff --git a/src/test/regress/output/hw_audit_client.source b/src/test/regress/output/hw_audit_client.source
new file mode 100644
index 000000000..fc1f54700
--- /dev/null
+++ b/src/test/regress/output/hw_audit_client.source
@@ -0,0 +1,141 @@
+CREATE DATABASE db_audit_client;
+\c db_audit_client
+-- set guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=134217727" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+-- set no audit client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client ='test_gsql@[local], gsql@[local]_test,test_gsql@[local]_test'" > /dev/null 2>&1
+\! sleep 1
+-- crerate table
+DROP TABLE IF EXISTS t_audit_client;
+NOTICE: table "t_audit_client" does not exist, skipping
+CREATE TABLE t_audit_client (id INTEGER, col1 VARCHAR(20));
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+ count_gsql
+------------
+ t
+(1 row)
+
+-- set no audit client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client =',, gsql@[local] , ,'" > /dev/null 2>&1
+\! sleep 1
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+-- query audit log, count = 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+ count_gsql
+------------
+ f
+(1 row)
+
+-- set no_audit_client
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client ='audit gsql@[local]'" > /dev/null 2>&1
+\! sleep 1
+-- change current application name
+SET application_name TO 'audit gsql';
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+-- query audit log, count = 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'audit gsql@[local]') > 0 AS count_gsql;
+ count_gsql
+------------
+ f
+(1 row)
+
+-- change current application name
+SET application_name TO 'gsql';
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'gsql@[local]') > 0 AS count_gsql;
+ count_gsql
+------------
+ t
+(1 row)
+
+-- change current application name
+SET application_name TO audit;
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+-- generate audit logs
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..10
+ LOOP
+ execute 'INSERT INTO t_audit_client VALUES (' || i || ', ''audit'');';
+ execute 'SELECT * FROM t_audit_client;';
+ END LOOP;
+END$$;
+-- query audit log, count > 0
+SELECT (SELECT count(detail_info) FROM pg_query_audit(current_date,current_date + interval '24 hours') WHERE client_conninfo = 'audit@[local]') > 0 AS count_gsql;
+ count_gsql
+------------
+ t
+(1 row)
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=511" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "no_audit_client" > /dev/null 2>&1
+-- clean env
+DROP TABLE IF EXISTS t_audit_client_client;
+NOTICE: table "t_audit_client_client" does not exist, skipping
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_client;
+DROP DATABASE db_audit_client;
diff --git a/src/test/regress/output/hw_audit_full.source b/src/test/regress/output/hw_audit_full.source
new file mode 100644
index 000000000..3e047c23e
--- /dev/null
+++ b/src/test/regress/output/hw_audit_full.source
@@ -0,0 +1,188 @@
+CREATE DATABASE db_audit_full;
+\c db_audit_full
+-- close all the audit options
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_login_logout=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_locked=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_violation=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_grant_revoke=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_copy_exec=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_process_set_parameter=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=0" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_copy_server_files=1" > /dev/null 2>&1
+\! users="user_audit1, test_user_audit2, user_audit2_test test_user_audit2_test , user_audit3 , $USER, user_audit4 user5" && cmd="full_audit_users='$users'" && @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "$cmd" > /dev/null 2>&1
+\! sleep 1
+-- clear audit log
+SELECT pg_delete_audit(current_date, current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+
+-- superuser, create table
+CREATE TABLE t_audit_super (id INTEGER, col1 VARCHAR(20));
+DO $$DECLARE i record;
+BEGIN
+ FOR i IN 1..100
+ LOOP
+ execute 'INSERT INTO t_audit_super VALUES (' || i || ', ''audit'');';
+ END LOOP;
+END$$;
+-- superuser, create user
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1 CASCADE; CREATE USER user_audit1 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit1;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit2 CASCADE; CREATE USER user_audit2 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit2;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit3 CASCADE; CREATE USER user_audit3 identified by 'audit@2023';" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit4 CASCADE; CREATE USER user_audit4 identified by 'audit@2023'; GRANT ALL PRIVILEGES TO user_audit4;" > /dev/null 2>&1
+
+-- user1, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit1 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+
+-- user_audit2, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit2 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+-- user_audit4, do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit4 -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+-- 用户登录、注销审计 audit_login_logout
+-- login_success
+-- user_logout
+-- user_audit3, login_failed
+\! @abs_bindir@/gsql -d postgres -p @portstring@ -U user_audit3 -W audit@2022 -c "CREATE TABLE t_audit_login (id INTEGER, col1 VARCHAR(20));"
+gsql: FATAL: Invalid username/password,login denied.
+
+-- 用户访问越权审计 audit_user_violation
+-- user_audit3, user_violation
+\! @abs_bindir@/gsql -d postgres -p @portstring@ -U user_audit3 -W audit@2023 -c "select * from pg_query_audit(current_date,current_date + interval '24 hours');"
+ERROR: permission denied to query audit
+
+-- superuser only, ddl_directory
+CREATE OR REPLACE DIRECTORY dir as '/tmp/';
+
+-- superuser only, ddl_globalconfig
+ALTER GLOBAL CONFIGURATION with(audit_xid_info=1);
+ALTER GLOBAL CONFIGURATION with(audit_xid_info=0);
+-- COPY审计 audit_copy_exec
+-- superuser only, copy_to
+COPY t_audit_super TO '@abs_srcdir@/data/t_audit.data';
+-- superuser only, copy_from
+CREATE TABLE t_audit_super_copy (id INTEGER, col1 VARCHAR(20));
+COPY t_audit_super_copy FROM '@abs_srcdir@/data/t_audit.data';
+-- 数据库启动、停止、恢复和切换审计 audit_database_process superuser only
+-- null user, system_stop
+\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1/ > /dev/null 2>&1
+-- null user, system_start
+\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1/ > /dev/null 2>&1
+-- null user, system_recover
+-- null user, system_switch
+--\! @abs_bindir@/gs_ctl switchover -f -m fast
+
+-- superuser, create query function and scale
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -f @abs_srcdir@/data/audit_full_superuser.sql > /dev/null 2>&1
+-- audit query
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit1', 49) AS is_audit_type_exist_user_audit1 FROM t_audit_type WHERE id = 1;"
+NOTICE: table "t_result" does not exist, skipping
+CONTEXT: SQL statement "DROP TABLE IF EXISTS t_result"
+PL/pgSQL function func_count_audit(text[],text,integer) line 5 at SQL statement
+referenced column: is_audit_type_exist_user_audit1
+ is_audit_type_exist_user_audit1
+---------------------------------
+ (ddl_database,t)
+ (ddl_datasource,t)
+ (ddl_function,t)
+ (ddl_index,t)
+ (ddl_key,t)
+ (ddl_model,t)
+ (ddl_package,t)
+ (ddl_rowlevelsecurity,t)
+ (ddl_schema,t)
+ (ddl_sequence,t)
+ (ddl_serverforhadoop,t)
+ (ddl_sql_patch,t)
+ (ddl_synonym,t)
+ (ddl_table,t)
+ (ddl_tablespace,t)
+ (ddl_textsearch,t)
+ (ddl_trigger,t)
+ (ddl_type,t)
+ (ddl_user,t)
+ (ddl_view,t)
+ (dml_action,t)
+ (dml_action_select,t)
+ (function_exec,t)
+ (grant_role,t)
+ (lock_user,t)
+ (login_success,t)
+ (revoke_role,t)
+ (set_parameter,t)
+ (unlock_user,t)
+ (user_logout,t)
+(30 rows)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit2', 49) AS is_audit_type_exist_user_audit2 FROM t_audit_type WHERE id = 1;"
+ is_audit_type_exist_user_audit2
+---------------------------------
+(0 rows)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit4', 49) AS is_audit_type_exist_user_audit4 FROM t_audit_type WHERE id = 1;"
+ is_audit_type_exist_user_audit4
+---------------------------------
+(0 rows)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit3', 2) AS is_audit_type_exist_user_audit3 FROM t_audit_type WHERE id = 2;"
+ is_audit_type_exist_user_audit3
+---------------------------------
+ (login_failed,t)
+ (user_violation,t)
+(2 rows)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, (select current_user), 11) AS is_audit_type_exist_superuser FROM t_audit_type WHERE id = 3;"
+ is_audit_type_exist_superuser
+-------------------------------
+ (copy_from,t)
+ (copy_to,t)
+ (ddl_directory,t)
+ (ddl_globalconfig,t)
+ (internal_event,t)
+(5 rows)
+
+-- rename user_audit1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "ALTER USER user_audit1 RENAME TO user_audit1_new"
+ALTER ROLE
+-- user_audit1_new do sql execution
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -U user_audit1_new -W audit@2023 -C -f @abs_srcdir@/data/audit_full_execute.sql > /dev/null 2>&1
+-- audit query
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "SELECT func_count_audit(content, 'user_audit1_new', 49) AS is_audit_type_exist_user_audit1_new FROM t_audit_type WHERE id = 1;"
+ is_audit_type_exist_user_audit1_new
+-------------------------------------
+(0 rows)
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_login_logout" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_database_process" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_locked" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_user_violation=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_grant_revoke" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_copy_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_process_set_parameter=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_copy_server_files" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "full_audit_users" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit1_new CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit2 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit3 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP USER IF EXISTS user_audit4 CASCADE;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_type;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_super;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP TABLE IF EXISTS t_audit_super_copy;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_full -C -c "DROP FUNCTION IF EXISTS func_count_audit;" > /dev/null 2>&1
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_full;
+DROP DATABASE db_audit_full;
diff --git a/src/test/regress/output/hw_audit_system_func.source b/src/test/regress/output/hw_audit_system_func.source
new file mode 100644
index 000000000..c1ada51f9
--- /dev/null
+++ b/src/test/regress/output/hw_audit_system_func.source
@@ -0,0 +1,507 @@
+CREATE DATABASE db_audit_system_func;
+\c db_audit_system_func
+-- set guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=134217727" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_cbm_tracking=on" > /dev/null 2>&1
+-- 系统管理函数 配置设置函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT set_config('log_statement_stats', 'off', false);
+ set_config
+------------
+ off
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%set_config%';
+ type | object_name | detail_info
+----------------------+-------------------------------+-----------------------------------------------------------------------------
+ system_function_exec | set_config(text,text,boolean) | Execute system function(oid = 2078). args = (log_statement_stats,off,false)
+(1 row)
+
+-- 系统管理函数 服务器信号函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT pg_cancel_backend(139989266462464);
+WARNING: PID 139989266462464 is not a gaussdb server thread
+CONTEXT: referenced column: pg_cancel_backend
+ pg_cancel_backend
+-------------------
+ f
+(1 row)
+
+SELECT pg_cancel_session(139856237819648, 139856237819648);
+WARNING: PID 139856237819648 is not a gaussdb server thread
+CONTEXT: referenced column: pg_cancel_session
+ pg_cancel_session
+-------------------
+ f
+(1 row)
+
+SELECT pg_reload_conf();
+ pg_reload_conf
+----------------
+ t
+(1 row)
+
+SELECT pg_rotate_logfile();
+ pg_rotate_logfile
+-------------------
+ t
+(1 row)
+
+SELECT pg_terminate_session(139855736600320, 139855736600320);
+WARNING: PID 139855736600320 is not a gaussdb server thread
+CONTEXT: referenced column: pg_terminate_session
+ pg_terminate_session
+----------------------
+ f
+(1 row)
+
+SELECT pg_terminate_backend(140298793514752);
+WARNING: PID 140298793514752 is not a gaussdb server thread
+CONTEXT: referenced column: pg_terminate_backend
+ pg_terminate_backend
+----------------------
+ f
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cancel_backend%';
+ type | object_name | detail_info
+----------------------+---------------------------+---------------------------------------------------------------
+ system_function_exec | pg_cancel_backend(bigint) | Execute system function(oid = 2171). args = (139989266462464)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cancel_session%';
+ type | object_name | detail_info
+----------------------+----------------------------------+-------------------------------------------------------------------------------
+ system_function_exec | pg_cancel_session(bigint,bigint) | Execute system function(oid = 3991). args = (139856237819648,139856237819648)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_reload_conf%';
+ type | object_name | detail_info
+----------------------+------------------+------------------------------------------------
+ system_function_exec | pg_reload_conf() | Execute system function(oid = 2621). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_rotate_logfile%';
+ type | object_name | detail_info
+----------------------+---------------------+------------------------------------------------
+ system_function_exec | pg_rotate_logfile() | Execute system function(oid = 2622). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_terminate_session%';
+ type | object_name | detail_info
+----------------------+-------------------------------------+-------------------------------------------------------------------------------
+ system_function_exec | pg_terminate_session(bigint,bigint) | Execute system function(oid = 2099). args = (139855736600320,139855736600320)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_terminate_backend%';
+ type | object_name | detail_info
+----------------------+------------------------------+---------------------------------------------------------------
+ system_function_exec | pg_terminate_backend(bigint) | Execute system function(oid = 2096). args = (140298793514752)
+(1 row)
+
+-- 系统管理函数 备份恢复控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_create_restore_point('restore_audit');" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_start_backup('restore_audit');" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_stop_backup();" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_switch_xlog();" > /dev/null 2>&1
+SELECT pg_cbm_get_merged_file('0/0', '0/0');
+WARNING: Start lsn equals end lsn, nothing to merge.
+CONTEXT: referenced column: pg_cbm_get_merged_file
+ pg_cbm_get_merged_file
+------------------------
+
+(1 row)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT gs_roach_switch_xlog();" > /dev/null 2>&1
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_create_restore_point%';
+ type | object_name | detail_info
+----------------------+-------------------------------+-------------------------------------------------------------
+ system_function_exec | pg_create_restore_point(text) | Execute system function(oid = 3098). args = (restore_audit)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_start_backup%';
+ type | object_name | detail_info
+----------------------+-------------------------------+-------------------------------------------------------------------
+ system_function_exec | pg_start_backup(text,boolean) | Execute system function(oid = 2172). args = (restore_audit,false)
+(1 row)
+
+SELECT type, object_name from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_stop_backup%';
+ type | object_name
+----------------------+------------------
+ system_function_exec | pg_stop_backup()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_switch_xlog%';
+ type | object_name | detail_info
+----------------------+------------------+------------------------------------------------
+ system_function_exec | pg_switch_xlog() | Execute system function(oid = 2848). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_cbm_get_merged_file%';
+ type | object_name | detail_info
+----------------------+-----------------------------------+-------------------------------------------------------
+ system_function_exec | pg_cbm_get_merged_file(text,text) | Execute system function(oid = 4652). args = (0/0,0/0)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_roach_switch_xlog%';
+ type | object_name | detail_info
+------+-------------+-------------
+(0 rows)
+
+-- 系统管理函数 恢复控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT pg_last_xlog_receive_location();
+ pg_last_xlog_receive_location
+-------------------------------
+
+(1 row)
+
+SELECT gs_pitr_clean_history_global_barriers('1489739011');
+ gs_pitr_clean_history_global_barriers
+---------------------------------------
+ NULL
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_last_xlog_receive_location%';
+ type | object_name | detail_info
+----------------------+---------------------------------+------------------------------------------------
+ system_function_exec | pg_last_xlog_receive_location() | Execute system function(oid = 3820). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_pitr_clean_history_global_barriers%';
+ type | object_name | detail_info
+----------------------+------------------------------------------------+----------------------------------------------------------
+ system_function_exec | gs_pitr_clean_history_global_barriers(cstring) | Execute system function(oid = 4581). args = (1489739011)
+(1 row)
+
+-- 系统管理函数 双集群容灾控制函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/obsserver.key.cipher
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/obsserver.key.rand
+\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o obsserver > /dev/null 2>&1
+SELECT * from pg_create_physical_replication_slot_extern('prs_audit', false, 'NAS;/data/nas/media/openGauss_uuid/dn1;0;0', false);
+ slotname | xlog_position
+-----------+---------------
+ prs_audit |
+(1 row)
+
+SELECT gs_set_obs_delete_location('0/54000000');
+ gs_set_obs_delete_location
+-----------------------------
+ 000000010000000000000054_00
+(1 row)
+
+SELECT gs_hadr_do_switchover();
+ gs_hadr_do_switchover
+-----------------------
+ f
+(1 row)
+
+SELECT gs_set_obs_delete_location_with_slotname('0/0', '0/0');
+ gs_set_obs_delete_location_with_slotname
+------------------------------------------
+ 000000010000000000000000_00
+(1 row)
+
+SELECT gs_streaming_dr_in_switchover();
+ gs_streaming_dr_in_switchover
+-------------------------------
+ f
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_create_physical_replication_slot_extern%';
+ type | object_name | detail_info
+----------------------+-----------------------------------------------------------------------+--------------------------------------------------------
+ system_function_exec | pg_create_physical_replication_slot_extern(name,boolean,text,boolean) | Execute system function(oid = 3790). args = (********)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_set_obs_delete_location%';
+ type | object_name | detail_info
+----------------------+-----------------------------------------------------------+----------------------------------------------------------
+ system_function_exec | gs_set_obs_delete_location(text) | Execute system function(oid = 9031). args = (0/54000000)
+ system_function_exec | gs_set_obs_delete_location_with_slotname(cstring,cstring) | Execute system function(oid = 9035). args = (0/0,0/0)
+(2 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_hadr_do_switchover%';
+ type | object_name | detail_info
+----------------------+-------------------------+------------------------------------------------
+ system_function_exec | gs_hadr_do_switchover() | Execute system function(oid = 9136). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_set_obs_delete_location_with_slotname%';
+ type | object_name | detail_info
+----------------------+-----------------------------------------------------------+-------------------------------------------------------
+ system_function_exec | gs_set_obs_delete_location_with_slotname(cstring,cstring) | Execute system function(oid = 9035). args = (0/0,0/0)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_streaming_dr_in_switchover%';
+ type | object_name | detail_info
+----------------------+---------------------------------+------------------------------------------------
+ system_function_exec | gs_streaming_dr_in_switchover() | Execute system function(oid = 9140). args = ()
+(1 row)
+
+-- 系统管理函数 双集群容灾查询函数
+-- 系统管理函数 咨询锁函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT pg_advisory_lock(123);
+ pg_advisory_lock
+------------------
+
+(1 row)
+
+SELECT pg_advisory_lock_shared(123);
+ pg_advisory_lock_shared
+-------------------------
+
+(1 row)
+
+SELECT pg_advisory_unlock(123);
+ pg_advisory_unlock
+--------------------
+ t
+(1 row)
+
+SELECT pg_advisory_unlock_shared(123);
+ pg_advisory_unlock_shared
+---------------------------
+ t
+(1 row)
+
+SELECT pg_advisory_unlock_all();
+ pg_advisory_unlock_all
+------------------------
+
+(1 row)
+
+SELECT pg_advisory_xact_lock(123);
+ pg_advisory_xact_lock
+-----------------------
+
+(1 row)
+
+SELECT pg_advisory_xact_lock_shared(123);
+ pg_advisory_xact_lock_shared
+------------------------------
+
+(1 row)
+
+SELECT pg_try_advisory_lock(123);
+ pg_try_advisory_lock
+----------------------
+ t
+(1 row)
+
+SELECT pg_try_advisory_lock_shared(123);
+ pg_try_advisory_lock_shared
+-----------------------------
+ t
+(1 row)
+
+SELECT pg_try_advisory_xact_lock(123);
+ pg_try_advisory_xact_lock
+---------------------------
+ t
+(1 row)
+
+SELECT pg_try_advisory_xact_lock_shared(123);
+ pg_try_advisory_xact_lock_shared
+----------------------------------
+ t
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_lock%';
+ type | object_name | detail_info
+----------------------+---------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_lock(bigint) | Execute system function(oid = 2880). args = (123)
+ system_function_exec | pg_advisory_lock_shared(bigint) | Execute system function(oid = 2881). args = (123)
+(2 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_lock_shared%';
+ type | object_name | detail_info
+----------------------+---------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_lock_shared(bigint) | Execute system function(oid = 2881). args = (123)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock%';
+ type | object_name | detail_info
+----------------------+-----------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_unlock(bigint) | Execute system function(oid = 2884). args = (123)
+ system_function_exec | pg_advisory_unlock_shared(bigint) | Execute system function(oid = 2885). args = (123)
+ system_function_exec | pg_advisory_unlock_all() | Execute system function(oid = 2892). args = ()
+(3 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock_shared%';
+ type | object_name | detail_info
+----------------------+-----------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_unlock_shared(bigint) | Execute system function(oid = 2885). args = (123)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_unlock_all%';
+ type | object_name | detail_info
+----------------------+--------------------------+------------------------------------------------
+ system_function_exec | pg_advisory_unlock_all() | Execute system function(oid = 2892). args = ()
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_xact_lock%';
+ type | object_name | detail_info
+----------------------+--------------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_xact_lock(bigint) | Execute system function(oid = 3089). args = (123)
+ system_function_exec | pg_advisory_xact_lock_shared(bigint) | Execute system function(oid = 3090). args = (123)
+(2 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_advisory_xact_lock_shared%';
+ type | object_name | detail_info
+----------------------+--------------------------------------+---------------------------------------------------
+ system_function_exec | pg_advisory_xact_lock_shared(bigint) | Execute system function(oid = 3090). args = (123)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_lock%';
+ type | object_name | detail_info
+----------------------+-------------------------------------+---------------------------------------------------
+ system_function_exec | pg_try_advisory_lock(bigint) | Execute system function(oid = 2882). args = (123)
+ system_function_exec | pg_try_advisory_lock_shared(bigint) | Execute system function(oid = 2883). args = (123)
+(2 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_lock_shared%';
+ type | object_name | detail_info
+----------------------+-------------------------------------+---------------------------------------------------
+ system_function_exec | pg_try_advisory_lock_shared(bigint) | Execute system function(oid = 2883). args = (123)
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_xact_lock%';
+ type | object_name | detail_info
+----------------------+------------------------------------------+---------------------------------------------------
+ system_function_exec | pg_try_advisory_xact_lock(bigint) | Execute system function(oid = 3091). args = (123)
+ system_function_exec | pg_try_advisory_xact_lock_shared(bigint) | Execute system function(oid = 3092). args = (123)
+(2 rows)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%pg_try_advisory_xact_lock_shared%';
+ type | object_name | detail_info
+----------------------+------------------------------------------+---------------------------------------------------
+ system_function_exec | pg_try_advisory_xact_lock_shared(bigint) | Execute system function(oid = 3092). args = (123)
+(1 row)
+
+-- 系统管理函数 段页式存储函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+CREATE TABLESPACE tsp_audit_sysfunc RELATIVE LOCATION 'audit_tablespace/audit_tablespace_1';
+SELECT local_space_shrink('tsp_audit_sysfunc', (SELECT current_database()));
+ local_space_shrink
+--------------------
+ 0
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%local_space_shrink%';
+ type | object_name | detail_info
+----------------------+-------------------------------+--------------------------------------------------------------------------------------
+ system_function_exec | local_space_shrink(text,text) | Execute system function(oid = 7006). args = (tsp_audit_sysfunc,db_audit_system_func)
+(1 row)
+
+-- 故障注入系统函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT gs_fault_inject(1,'1','1','1','1','1');
+ gs_fault_inject
+-----------------
+ 0
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%gs_fault_inject%';
+ type | object_name | detail_info
+----------------------+--------------------------------------------------+-----------------------------------------------------------
+ system_function_exec | gs_fault_inject(bigint,text,text,text,text,text) | Execute system function(oid = 4000). args = (1,1,1,1,1,1)
+(1 row)
+
+-- 数据损坏检测修复函数
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT * from local_clear_bad_block_info();
+ result
+--------
+ t
+(1 row)
+
+SELECT type, object_name, detail_info from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec' and object_name like '%local_clear_bad_block_info%';
+ type | object_name | detail_info
+----------------------+------------------------------+------------------------------------------------
+ system_function_exec | local_clear_bad_block_info() | Execute system function(oid = 4568). args = ()
+(1 row)
+
+-- 非白名单系统函数 不审计
+SELECT * from pg_delete_audit(current_date,current_date + interval '24 hours');
+ pg_delete_audit
+-----------------
+
+(1 row)
+
+SELECT current_setting('audit_thread_num');
+ current_setting
+-----------------
+ 1
+(1 row)
+
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT pg_current_xlog_location();" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "pg_database_size('db_audit_system_func');" > /dev/null 2>&1
+SELECT count(*) from pg_query_audit(current_date, current_date + interval '24 hours') where type='system_function_exec';
+ count
+-------
+ 0
+(1 row)
+
+--reset guc parameter
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_function_exec=1" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_function_exec" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "wal_level" > /dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_cbm_tracking=on" > /dev/null 2>&1
+--clean env
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "DROP TABLESPACE IF EXISTS tsp_audit_sysfunc;" > /dev/null 2>&1
+\! @abs_bindir@/gsql -r -p @portstring@ -d db_audit_system_func -C -c "SELECT * from pg_drop_replication_slot('prs_audit');" > /dev/null 2>&1
+\c regression
+CLEAN CONNECTION TO ALL FORCE FOR DATABASE db_audit_system_func;
+DROP DATABASE db_audit_system_func;
diff --git a/src/test/regress/output/hw_partition_hash_exchange.source b/src/test/regress/output/hw_partition_hash_exchange.source
index 9c08f7b2f..84164dc26 100755
--- a/src/test/regress/output/hw_partition_hash_exchange.source
+++ b/src/test/regress/output/hw_partition_hash_exchange.source
@@ -827,10 +827,10 @@ explain(verbose on, costs off) select a from test_exchange_index_ht where a=1;
Partitioned Bitmap Heap Scan on public.test_exchange_index_ht
Output: a
Recheck Cond: (test_exchange_index_ht.a = 1)
- Selected Partitions: 1
+ Selected Partitions: 2
-> Partitioned Bitmap Index Scan on test_exchange_index_ht_a
Index Cond: (test_exchange_index_ht.a = 1)
- Selected Partitions: 1
+ Selected Partitions: 2
(7 rows)
select a from test_exchange_index_ht where a=1;
@@ -878,10 +878,10 @@ explain(verbose on, costs off) select a from test_exchange_index_ht where a=3;
Partitioned Bitmap Heap Scan on public.test_exchange_index_ht
Output: a
Recheck Cond: (test_exchange_index_ht.a = 3)
- Selected Partitions: 2
+ Selected Partitions: 1
-> Partitioned Bitmap Index Scan on test_exchange_index_ht_a
Index Cond: (test_exchange_index_ht.a = 3)
- Selected Partitions: 2
+ Selected Partitions: 1
(7 rows)
select a from test_exchange_index_ht where a=3;
@@ -1112,10 +1112,10 @@ explain(ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) sel
-----------------------------------------------------------------------
Partitioned Bitmap Heap Scan on test_exchange_verbose_ht
Recheck Cond: (b = 5)
- Selected Partitions: 1
+ Selected Partitions: 3
-> Partitioned Bitmap Index Scan on test_exchange_verbose_ht_index
Index Cond: (b = 5)
- Selected Partitions: 1
+ Selected Partitions: 3
(6 rows)
select b from test_exchange_verbose_ht where b=5 order by 1;
@@ -1139,10 +1139,10 @@ explain(ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) sel
-----------------------------------------------------------------------
Partitioned Bitmap Heap Scan on test_exchange_verbose_ht
Recheck Cond: (b = 1)
- Selected Partitions: 3
+ Selected Partitions: 1
-> Partitioned Bitmap Index Scan on test_exchange_verbose_ht_index
Index Cond: (b = 1)
- Selected Partitions: 3
+ Selected Partitions: 1
(6 rows)
select b from test_exchange_verbose_ht where b=1 order by 1;
@@ -1174,10 +1174,10 @@ explain(ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) sel
-----------------------------------------------------------------------
Partitioned Bitmap Heap Scan on test_exchange_verbose_ht
Recheck Cond: (b = 5)
- Selected Partitions: 1
+ Selected Partitions: 3
-> Partitioned Bitmap Index Scan on test_exchange_verbose_ht_index
Index Cond: (b = 5)
- Selected Partitions: 1
+ Selected Partitions: 3
(6 rows)
select b from test_exchange_verbose_ht where b=5 order by 1;
@@ -1206,10 +1206,10 @@ explain(ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) sel
-----------------------------------------------------------------------
Partitioned Bitmap Heap Scan on test_exchange_verbose_ht
Recheck Cond: (b = 3)
- Selected Partitions: 1
+ Selected Partitions: 3
-> Partitioned Bitmap Index Scan on test_exchange_verbose_ht_index
Index Cond: (b = 3)
- Selected Partitions: 1
+ Selected Partitions: 3
(6 rows)
select b from test_exchange_verbose_ht where b=3 order by 1;
@@ -1222,10 +1222,10 @@ explain(ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) sel
-----------------------------------------------------------------------
Partitioned Bitmap Heap Scan on test_exchange_verbose_ht
Recheck Cond: (b = 4)
- Selected Partitions: 3
+ Selected Partitions: 1
-> Partitioned Bitmap Index Scan on test_exchange_verbose_ht_index
Index Cond: (b = 4)
- Selected Partitions: 3
+ Selected Partitions: 1
(6 rows)
select b from test_exchange_verbose_ht where b=4 order by 1;
@@ -1667,8 +1667,8 @@ select * from test_exchange_func_ord;
select * from test_exchange_func_ht;
a
---
- 2
3
+ 2
(2 rows)
select * from test_exchange_func_ht partition (p1);
diff --git a/src/test/regress/output/hw_partition_interval_dump_restore.source b/src/test/regress/output/hw_partition_interval_dump_restore.source
old mode 100644
new mode 100755
index f1ff322ba..2c4688ec6
--- a/src/test/regress/output/hw_partition_interval_dump_restore.source
+++ b/src/test/regress/output/hw_partition_interval_dump_restore.source
@@ -66,6 +66,7 @@ SET
SET
SET
SET
+SET
CREATE TABLE
ALTER TABLE
CREATE INDEX
@@ -142,6 +143,7 @@ SET
SET
SET
SET
+SET
CREATE TABLE
ALTER TABLE
CREATE INDEX
@@ -237,6 +239,7 @@ SET
SET
SET
SET
+SET
CREATE TABLE
ALTER TABLE
CREATE INDEX
diff --git a/src/test/regress/output/hw_partition_list_exchange.source b/src/test/regress/output/hw_partition_list_exchange.source
index 247b6375d..388fc9c47 100755
--- a/src/test/regress/output/hw_partition_list_exchange.source
+++ b/src/test/regress/output/hw_partition_list_exchange.source
@@ -652,7 +652,7 @@ partition by list(a)
insert into test_exchange_validation_ord values (1), (10);
--ERROR
alter table test_exchange_validation_lt exchange partition (p1) with table test_exchange_validation_ord;
-ERROR: some rows in table do not qualify for specified partition
+ERROR: Can't find list partition oid when checking tuple is in the partition.
drop table test_exchange_validation_ord;
drop table test_exchange_validation_lt;
--b.with validation
@@ -666,7 +666,7 @@ partition by list(a)
insert into test_exchange_validation_ord values (1), (10);
--ERROR
alter table test_exchange_validation_lt exchange partition (p1) with table test_exchange_validation_ord with validation;
-ERROR: some rows in table do not qualify for specified partition
+ERROR: Can't find list partition oid when checking tuple is in the partition.
drop table test_exchange_validation_ord;
drop table test_exchange_validation_lt;
--c.without validation
@@ -1014,7 +1014,7 @@ insert into test_exchange_verbose_ord values(generate_series(1, 6));
insert into test_exchange_verbose_ord values(7);
--ERROR
alter table test_exchange_verbose_lt exchange partition (test_exchange_verbose_lt_p1) with table test_exchange_verbose_ord verbose;
-ERROR: inserted partition key does not map to any table partition
+ERROR: Can't find list partition oid when checking tuple is in the partition.
drop table test_exchange_verbose_ord;
drop table test_exchange_verbose_lt;
-- c.index
diff --git a/src/test/regress/output/hw_subpartition_tablespace.source b/src/test/regress/output/hw_subpartition_tablespace.source
index 9fe20d299..7a4a1a916 100644
--- a/src/test/regress/output/hw_subpartition_tablespace.source
+++ b/src/test/regress/output/hw_subpartition_tablespace.source
@@ -2078,6 +2078,131 @@ SELECT pg_get_tabledef('t_hash_list4');
(1 row)
DROP TABLE t_hash_list4;
+--
+----test create index with tablespace----
+--
+CREATE TABLE t_range_list(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1
+PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2)
+(
+ PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1
+ (
+ SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20)
+ ),
+ PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2
+ (
+ SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE2_3 VALUES (DEFAULT)
+ ),
+ PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3
+ (
+ SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20)
+ ),
+ PARTITION P_RANGE4 VALUES LESS THAN (20)
+ (
+ SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION P_RANGE4_2 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts2
+ ),
+ PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3,
+ PARTITION P_RANGE6 VALUES LESS THAN (30)
+);
+CREATE INDEX t_range_list_idx ON t_range_list(c1,c2) LOCAL
+(
+ PARTITION idx_p1(
+ SUBPARTITION idx_p1_1 TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION idx_p1_2 TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION idx_p1_3 TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION idx_p1_4
+ ),
+ PARTITION idx_p2 TABLESPACE hw_subpartition_tablespace_ts2(
+ SUBPARTITION idx_p2_1,
+ SUBPARTITION idx_p2_2,
+ SUBPARTITION idx_p2_3
+ ),
+ PARTITION idx_p3 TABLESPACE hw_subpartition_tablespace_ts2(
+ SUBPARTITION idx_p3_1 TABLESPACE hw_subpartition_tablespace_ts1,
+ SUBPARTITION idx_p3_2 TABLESPACE hw_subpartition_tablespace_ts2,
+ SUBPARTITION idx_p3_3 TABLESPACE hw_subpartition_tablespace_ts3,
+ SUBPARTITION idx_p3_4
+ ),
+ PARTITION idx_p4(
+ SUBPARTITION idx_p4_1,
+ SUBPARTITION idx_p4_2 TABLESPACE hw_subpartition_tablespace_ts2
+ ),
+ PARTITION idx_p5 TABLESPACE hw_subpartition_tablespace_ts3(
+ SUBPARTITION idx_p5_1
+ ),
+ PARTITION idx_p6(
+ SUBPARTITION idx_p6_1 TABLESPACE hw_subpartition_tablespace_ts2
+ )
+) TABLESPACE hw_subpartition_tablespace_ts1;
+SELECT p.relname, t.spcname FROM pg_partition p, pg_class c, pg_namespace n, pg_tablespace t
+WHERE p.parentid = c.oid
+ AND c.relname='t_range_list_idx'
+ AND c.relnamespace=n.oid
+ AND n.nspname=CURRENT_SCHEMA
+ AND p.reltablespace = t.oid
+ORDER BY p.relname;
+ relname | spcname
+----------+--------------------------------
+ idx_p1_1 | hw_subpartition_tablespace_ts1
+ idx_p1_2 | hw_subpartition_tablespace_ts2
+ idx_p1_3 | hw_subpartition_tablespace_ts3
+ idx_p1_4 | hw_subpartition_tablespace_ts1
+ idx_p2_1 | hw_subpartition_tablespace_ts2
+ idx_p2_2 | hw_subpartition_tablespace_ts2
+ idx_p2_3 | hw_subpartition_tablespace_ts2
+ idx_p3_1 | hw_subpartition_tablespace_ts1
+ idx_p3_2 | hw_subpartition_tablespace_ts2
+ idx_p3_3 | hw_subpartition_tablespace_ts3
+ idx_p3_4 | hw_subpartition_tablespace_ts2
+ idx_p4_1 | hw_subpartition_tablespace_ts1
+ idx_p4_2 | hw_subpartition_tablespace_ts2
+ idx_p5_1 | hw_subpartition_tablespace_ts3
+ idx_p6_1 | hw_subpartition_tablespace_ts2
+(15 rows)
+
+SELECT pg_get_indexdef('hw_subpartition_tablespace.t_range_list_idx'::regclass);
+ pg_get_indexdef
+---------------------------------------------------------------------------
+ CREATE INDEX t_range_list_idx ON t_range_list USING btree (c1, c2) LOCAL(+
+ PARTITION partition_name( +
+ SUBPARTITION idx_p1_1 TABLESPACE hw_subpartition_tablespace_ts1, +
+ SUBPARTITION idx_p1_2 TABLESPACE hw_subpartition_tablespace_ts2, +
+ SUBPARTITION idx_p1_3 TABLESPACE hw_subpartition_tablespace_ts3, +
+ SUBPARTITION idx_p1_4 TABLESPACE hw_subpartition_tablespace_ts1 +
+ ), +
+ PARTITION partition_name( +
+ SUBPARTITION idx_p2_1 TABLESPACE hw_subpartition_tablespace_ts2, +
+ SUBPARTITION idx_p2_2 TABLESPACE hw_subpartition_tablespace_ts2, +
+ SUBPARTITION idx_p2_3 TABLESPACE hw_subpartition_tablespace_ts2 +
+ ), +
+ PARTITION partition_name( +
+ SUBPARTITION idx_p3_1 TABLESPACE hw_subpartition_tablespace_ts1, +
+ SUBPARTITION idx_p3_2 TABLESPACE hw_subpartition_tablespace_ts2, +
+ SUBPARTITION idx_p3_3 TABLESPACE hw_subpartition_tablespace_ts3, +
+ SUBPARTITION idx_p3_4 TABLESPACE hw_subpartition_tablespace_ts2 +
+ ), +
+ PARTITION partition_name( +
+ SUBPARTITION idx_p4_1 TABLESPACE hw_subpartition_tablespace_ts1, +
+ SUBPARTITION idx_p4_2 TABLESPACE hw_subpartition_tablespace_ts2 +
+ ), +
+ PARTITION partition_name( +
+ SUBPARTITION idx_p5_1 TABLESPACE hw_subpartition_tablespace_ts3 +
+ ), +
+ PARTITION partition_name( +
+ SUBPARTITION idx_p6_1 TABLESPACE hw_subpartition_tablespace_ts2 +
+ ) +
+ ) TABLESPACE hw_subpartition_tablespace_ts1
+(1 row)
+
+DROP TABLE t_range_list;
--finish
drop tablespace hw_subpartition_tablespace_ts1;
drop tablespace hw_subpartition_tablespace_ts2;
diff --git a/src/test/regress/output/ledger_table_case.source b/src/test/regress/output/ledger_table_case.source
index 55374220b..beeb151a2 100644
--- a/src/test/regress/output/ledger_table_case.source
+++ b/src/test/regress/output/ledger_table_case.source
@@ -352,8 +352,8 @@ SELECT dbname, username, relnsp, relname, relhash, globalhash, txcommand FROM gs
-- DROP blockchain SCHEMA
DROP SCHEMA ledgernsp CASCADE;
SELECT * FROM pg_namespace WHERE nspname = 'ledgernsp';
- nspname | nspowner | nsptimeline | nspacl | in_redistribution | nspblockchain
----------+----------+-------------+--------+-------------------+---------------
+ nspname | nspowner | nsptimeline | nspacl | in_redistribution | nspblockchain | nspcollation
+---------+----------+-------------+--------+-------------------+---------------+--------------
(0 rows)
----------------------------------------------------------------------
diff --git a/src/test/regress/output/mysql_function.source b/src/test/regress/output/mysql_function.source
old mode 100644
new mode 100755
index 0c1ffd0ed..4e7293933
--- a/src/test/regress/output/mysql_function.source
+++ b/src/test/regress/output/mysql_function.source
@@ -119,6 +119,7 @@ CREATE DATABASE mysqltestbak DBCOMPATIBILITY 'B';
--? .*
--? .*
--? .*
+--? .*
\c mysqltestbak
\sf proc_definer1
CREATE DEFINER = testusr1 PROCEDURE public.proc_definer1()
diff --git a/src/test/regress/output/postgres_fdw.source b/src/test/regress/output/postgres_fdw.source
deleted file mode 100644
index 8669b9dc6..000000000
--- a/src/test/regress/output/postgres_fdw.source
+++ /dev/null
@@ -1,3346 +0,0 @@
--- ===================================================================
--- create FDW objects
--- ===================================================================
-CREATE EXTENSION postgres_fdw;
-CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
-CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
- OPTIONS (dbname 'regression', port '@portstring@');
-CREATE USER MAPPING FOR public SERVER testserver1
- OPTIONS (user 'value', password 'value');
-CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
--- ===================================================================
--- create objects used through FDW loopback server
--- ===================================================================
-CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
-CREATE SCHEMA "S 1";
-CREATE TABLE "S 1"."T 1" (
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10),
- c8 user_enum,
- CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
-);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "T 1"
-CREATE TABLE "S 1"."T 2" (
- c1 int NOT NULL,
- c2 text,
- CONSTRAINT t2_pkey PRIMARY KEY (c1)
-);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t2_pkey" for table "T 2"
-CREATE TABLE "S 1"."T 3" (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- CONSTRAINT t3_pkey PRIMARY KEY (c1)
-);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t3_pkey" for table "T 3"
-CREATE TABLE "S 1"."T 4" (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- CONSTRAINT t4_pkey PRIMARY KEY (c1)
-);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t4_pkey" for table "T 4"
-INSERT INTO "S 1"."T 1"
- SELECT id,
- id % 10,
- to_char(id, 'FM00000'),
- '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
- '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
- id % 10,
- id % 10,
- 'foo'::user_enum
- FROM generate_series(1, 1000) id;
-INSERT INTO "S 1"."T 2"
- SELECT id,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-INSERT INTO "S 1"."T 3"
- SELECT id,
- id + 1,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-DELETE FROM "S 1"."T 3" WHERE c1 % 2 != 0; -- delete for outer join tests
-INSERT INTO "S 1"."T 4"
- SELECT id,
- id + 1,
- 'AAA' || to_char(id, 'FM000')
- FROM generate_series(1, 100) id;
-DELETE FROM "S 1"."T 4" WHERE c1 % 3 != 0; -- delete for outer join tests
-ANALYZE "S 1"."T 1";
-ANALYZE "S 1"."T 2";
-ANALYZE "S 1"."T 3";
-ANALYZE "S 1"."T 4";
--- ===================================================================
--- create local tables to check whether the grammer is support
--- ===================================================================
-CREATE TABLE local_ft1 (
- c1 int NOT NULL,
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft1',
- c8 user_enum
-);
-CREATE TABLE local_ft2 (
- c1 int NOT NULL,
- "C 1" int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft2',
- c8 user_enum
-);
--- ===================================================================
--- create foreign tables
--- ===================================================================
-CREATE FOREIGN TABLE ft1 (
- c0 int,
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft1',
- c8 user_enum
-) SERVER loopback;
-ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
-CREATE FOREIGN TABLE ft2 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- cx int,
- c3 text,
- c4 timestamptz,
- c5 timestamp,
- c6 varchar(10),
- c7 char(10) default 'ft2',
- c8 user_enum
-) SERVER loopback;
-ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
-CREATE FOREIGN TABLE ft4 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text
-) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 3');
-CREATE FOREIGN TABLE ft5 (
- c1 int NOT NULL,
- c2 int NOT NULL,
- c3 text
-) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 4');
--- ===================================================================
--- tests for validator
--- ===================================================================
--- requiressl and some other parameters are omitted because
--- valid values for them depend on configure options
-ALTER SERVER testserver1 OPTIONS (
- use_remote_estimate 'false',
- updatable 'true',
- fdw_startup_cost '123.456',
- fdw_tuple_cost '0.123',
- service 'value',
- connect_timeout 'value',
- dbname 'value',
- host 'value',
- hostaddr 'value',
- port 'value',
- --client_encoding 'value',
- application_name 'value',
- --fallback_application_name 'value',
- keepalives 'value',
- keepalives_idle 'value',
- keepalives_interval 'value',
- -- requiressl 'value',
- sslcompression 'value',
- sslmode 'value',
- sslcert 'value',
- sslkey 'value',
- sslrootcert 'value',
- sslcrl 'value',
- --requirepeer 'value',
- krbsrvname 'value'
- --gsslib 'value'
- --replication 'value'
-);
-ALTER USER MAPPING FOR public SERVER testserver1
- OPTIONS (DROP user, DROP password);
-ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
-ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
-ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
-\det+
- List of foreign tables
- Schema | Table | Server | FDW Options | Description
---------+-------+----------+---------------------------------------+-------------
- public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
- public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
- public | ft4 | loopback | (schema_name 'S 1', table_name 'T 3') |
- public | ft5 | loopback | (schema_name 'S 1', table_name 'T 4') |
-(4 rows)
-
--- Test that alteration of server options causes reconnection
--- Remote's errors might be non-English, so hide them to ensure stable results
-\set VERBOSITY terse
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work
- c3 | c4
--------+------------------------------
- 00001 | Fri Jan 02 00:00:00 1970 PST
-(1 row)
-
-ALTER SERVER loopback OPTIONS (SET dbname 'no such database');
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
-ERROR: could not connect to server "loopback"
-DO $d$
- BEGIN
- EXECUTE $$ALTER SERVER loopback
- OPTIONS (SET dbname '$$||current_database()||$$')$$;
- END;
-$d$;
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
- c3 | c4
--------+------------------------------
- 00001 | Fri Jan 02 00:00:00 1970 PST
-(1 row)
-
--- Test that alteration of user mapping options causes reconnection
-ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
- OPTIONS (ADD user 'no such user');
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
-ERROR: could not connect to server "loopback"
-ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
- OPTIONS (DROP user);
-SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
- c3 | c4
--------+------------------------------
- 00001 | Fri Jan 02 00:00:00 1970 PST
-(1 row)
-
-\set VERBOSITY default
--- Now we should be able to run ANALYZE.
--- To exercise multiple code paths, we use local stats on ft1
--- and remote-estimate mode on ft2.
-ANALYZE ft1;
-ANALYZE ft4;
-ANALYZE ft5;
-ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
--- ===================================================================
--- simple queries
--- ===================================================================
--- single table, with/without alias
-EXPLAIN (COSTS false) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
----------------------------------
- Limit
- -> Sort
- Sort Key: c3, c1
- -> Foreign Scan on ft1
-(4 rows)
-
-SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
- 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
- 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
-(10 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------
- Limit
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- -> Sort
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Sort Key: t1.c3, t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(8 rows)
-
-SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
- 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
- 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
-(10 rows)
-
--- whole-row reference
-EXPLAIN (VERBOSE, COSTS false) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------
- Limit
- Output: t1.*, c3, c1
- -> Sort
- Output: t1.*, c3, c1
- Sort Key: t1.c3, t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.*, c3, c1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(8 rows)
-
-SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- t1
---------------------------------------------------------------------------------------------
- (101,1,00101,"Fri Jan 02 00:00:00 1970 PST","Fri Jan 02 00:00:00 1970",1,"1 ",foo)
- (102,2,00102,"Sat Jan 03 00:00:00 1970 PST","Sat Jan 03 00:00:00 1970",2,"2 ",foo)
- (103,3,00103,"Sun Jan 04 00:00:00 1970 PST","Sun Jan 04 00:00:00 1970",3,"3 ",foo)
- (104,4,00104,"Mon Jan 05 00:00:00 1970 PST","Mon Jan 05 00:00:00 1970",4,"4 ",foo)
- (105,5,00105,"Tue Jan 06 00:00:00 1970 PST","Tue Jan 06 00:00:00 1970",5,"5 ",foo)
- (106,6,00106,"Wed Jan 07 00:00:00 1970 PST","Wed Jan 07 00:00:00 1970",6,"6 ",foo)
- (107,7,00107,"Thu Jan 08 00:00:00 1970 PST","Thu Jan 08 00:00:00 1970",7,"7 ",foo)
- (108,8,00108,"Fri Jan 09 00:00:00 1970 PST","Fri Jan 09 00:00:00 1970",8,"8 ",foo)
- (109,9,00109,"Sat Jan 10 00:00:00 1970 PST","Sat Jan 10 00:00:00 1970",9,"9 ",foo)
- (110,0,00110,"Sun Jan 11 00:00:00 1970 PST","Sun Jan 11 00:00:00 1970",0,"0 ",foo)
-(10 rows)
-
--- empty result
-SELECT * FROM ft1 WHERE false;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+----+----+----+----+----+----
-(0 rows)
-
--- with WHERE clause
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c7 >= '1'::bpchar)) AND (("C 1" = 101)) AND ((c6 = '1'::text))
-(3 rows)
-
-SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
--- with FOR UPDATE/SHARE
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------
- LockRows
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- -> Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE
-(5 rows)
-
-SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------
- LockRows
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- -> Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE
-(5 rows)
-
-SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
-(1 row)
-
--- aggregate
-SELECT COUNT(*) FROM ft1 t1;
- count
--------
- 1000
-(1 row)
-
--- join two tables
-SELECT t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- c1
------
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
-(10 rows)
-
--- subquery
-SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
- 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
-(10 rows)
-
--- subquery+MAX
-SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+----+-------+------------------------------+--------------------------+----+------------+-----
- 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 | 0 | 0 | foo
-(1 row)
-
--- used in CTE
-WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
- c1 | c2 | c3 | c4
-----+----+-------+------------------------------
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
- 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
- 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST
- 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
- 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST
- 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
-(10 rows)
-
--- fixed values
-SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
- ?column? | ?column?
-----------+----------
- fixed |
-(1 row)
-
--- user-defined operator/function
-CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
-BEGIN
-RETURN abs($1);
-END
-$$ LANGUAGE plpgsql IMMUTABLE;
-CREATE OPERATOR === (
- LEFTARG = int,
- RIGHTARG = int,
- PROCEDURE = int4eq,
- COMMUTATOR = ===,
- NEGATOR = !==
-);
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
- QUERY PLAN
--------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c1 = postgres_fdw_abs(t1.c2))
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2;
- QUERY PLAN
--------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c1 === t1.c2)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
- QUERY PLAN
----------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = abs(c2)))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = c2))
-(3 rows)
-
--- ===================================================================
--- WHERE with remotely-executable conditions
--- ===================================================================
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
- QUERY PLAN
----------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 100)) AND ((c2 = 0))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" IS NULL))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
- QUERY PLAN
--------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((round(abs("C 1"), 0) = 1::numeric))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
- QUERY PLAN
------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = (- "C 1")))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((1::numeric = ("C 1" !)))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" IS NOT NULL) IS DISTINCT FROM ("C 1" IS NOT NULL)))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ANY (ARRAY[c2, 1, ("C 1" + 0)])))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ((ARRAY["C 1", c2, 3])[1])))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c6 = E'foo''s\\bar'::text))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
- QUERY PLAN
--------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = 'foo'::user_enum)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(4 rows)
-
--- parameterized remote path
-EXPLAIN (VERBOSE, COSTS false)
- SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------
- Nested Loop
- Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
- Join Filter: (a.c2 = b.c1)
- -> Foreign Scan on public.ft2 a
- Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 47))
- -> Foreign Scan on public.ft2 b
- Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(9 rows)
-
-SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----
- 47 | 7 | 00047 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo | 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
-(1 row)
-
--- check both safe and unsafe join conditions
-EXPLAIN (VERBOSE, COSTS false)
- SELECT * FROM ft2 a, ft2 b
- WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------
- Hash Join
- Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
- Hash Cond: ((b.c1 = a.c1) AND ((b.c7)::text = upper((a.c7)::text)))
- -> Foreign Scan on public.ft2 b
- Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
- -> Hash
- Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
- -> Foreign Scan on public.ft2 a
- Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
- Filter: (a.c8 = 'foo'::user_enum)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c2 = 6))
-(12 rows)
-
-SELECT * FROM ft2 a, ft2 b
-WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----+-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
- 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
- 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
- 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
- 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
- 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
- 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
- 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
- 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
- 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
-(100 rows)
-
--- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
-SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
-(4 rows)
-
-SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
-(4 rows)
-
--- bug #15613: bad plan for foreign table scan with lateral reference
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM ft1 AS ref_1) AS subq_0
- RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-ERROR: syntax error at or near "SELECT"
-LINE 6: SELECT ref_0."C 1" c1, subq_0.*
- ^
--- use local table to check whether this sql supported
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM local_ft1 AS ref_1) AS subq_0
- RIGHT JOIN local_ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-ERROR: syntax error at or near "SELECT"
-LINE 6: SELECT ref_0."C 1" c1, subq_0.*
- ^
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM ft1 AS ref_1) AS subq_0
- RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-ERROR: syntax error at or near "SELECT"
-LINE 5: SELECT ref_0."C 1" c1, subq_0.*
- ^
--- use local table to check whether this sql supported
-SELECT ref_0.c2, subq_1.*
-FROM
- "S 1"."T 1" AS ref_0,
- LATERAL (
- SELECT ref_0."C 1" c1, subq_0.*
- FROM (SELECT ref_0.c2, ref_1.c3
- FROM local_ft1 AS ref_1) AS subq_0
- RIGHT JOIN local_ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
- ) AS subq_1
-WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
-ORDER BY ref_0."C 1";
-ERROR: syntax error at or near "SELECT"
-LINE 5: SELECT ref_0."C 1" c1, subq_0.*
- ^
--- ===================================================================
--- parameterized queries
--- ===================================================================
--- simple join
-PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2);
- QUERY PLAN
-------------------------------------------------------------------------------
- Nested Loop
- Output: t1.c3, t2.c3
- -> Foreign Scan on public.ft1 t1
- Output: t1.c3
- Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
- -> Foreign Scan on public.ft2 t2
- Output: t2.c3
- Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(8 rows)
-
-EXECUTE st1(1, 1);
- c3 | c3
--------+-------
- 00001 | 00001
-(1 row)
-
-EXECUTE st1(101, 101);
- c3 | c3
--------+-------
- 00101 | 00101
-(1 row)
-
--- subquery using stable function (can't be sent to remote)
-PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND (c4) = '1970-01-17'::date) ORDER BY c1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
- Sort
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Sort Key: t1.c1
- -> Hash Semi Join
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Hash Cond: (t1.c3 = t2.c3)
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < $1::integer))
- -> Hash
- Output: t2.c3
- -> Foreign Scan on public.ft2 t2
- Output: t2.c3
- Filter: (t2.c4 = 'Sat Jan 17 00:00:00 1970'::timestamp(0) without time zone)
- Remote SQL: SELECT c3, c4 FROM "S 1"."T 1" WHERE (("C 1" > $1::integer))
-(15 rows)
-
-EXECUTE st2(10, 20);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
-(1 row)
-
-EXECUTE st2(101, 121);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
------+----+-------+------------------------------+--------------------------+----+------------+-----
- 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
-(1 row)
-
--- subquery using immutable function (can be sent to remote)
-PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND (c5) = '1970-01-17'::date) ORDER BY c1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Sort Key: t1.c1
- -> Hash Semi Join
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Hash Cond: (t1.c3 = t2.c3)
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < $1::integer))
- -> Hash
- Output: t2.c3
- -> Foreign Scan on public.ft2 t2
- Output: t2.c3
- Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" > $1::integer)) AND ((c5 = '1970-01-17 00:00:00'::timestamp(0) without time zone))
-(14 rows)
-
-EXECUTE st3(10, 20);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
-(1 row)
-
-EXECUTE st3(20, 30);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+----+----+----+----+----+----
-(0 rows)
-
--- custom plan should be chosen initially
-PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
--- once we try it enough times, should switch to generic plan
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(3 rows)
-
--- value of $1 should not be sent to remote
-PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Filter: (t1.c8 = $1)
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
-(4 rows)
-
-EXECUTE st5('foo', 1);
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
--- altering FDW options requires replanning
-PREPARE st6 AS SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = c2))
-(3 rows)
-
-PREPARE st7 AS INSERT INTO ft1 (c1,c2,c3) VALUES (1001,101,'foo');
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Insert on public.ft1
- -> Result
- Output: NULL::integer, 1001, 101, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft1 '::character(10), NULL::user_enum
-(3 rows)
-
-ALTER TABLE "S 1"."T 1" RENAME TO "T 0";
-ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 0');
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 0" WHERE (("C 1" = c2))
-(3 rows)
-
-EXECUTE st6;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
-(9 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Insert on public.ft1
- -> Result
- Output: NULL::integer, 1001, 101, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft1 '::character(10), NULL::user_enum
-(3 rows)
-
-ALTER TABLE "S 1"."T 0" RENAME TO "T 1";
-ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 1');
--- cleanup
-DEALLOCATE st1;
-DEALLOCATE st2;
-DEALLOCATE st3;
-DEALLOCATE st4;
-DEALLOCATE st5;
-DEALLOCATE st6;
-DEALLOCATE st7;
--- System columns, except ctid, should not be sent to remote
-EXPLAIN (VERBOSE, COSTS false)
-SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1;
-ERROR: column t1.tableoid does not exist
-LINE 2: SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclas...
- ^
-SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1;
-ERROR: column t1.tableoid does not exist
-LINE 1: SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIM...
- ^
-EXPLAIN (VERBOSE, COSTS false)
-SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
-ERROR: column "tableoid" does not exist
-LINE 2: SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
- ^
-CONTEXT: referenced column: tableoid
-SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
-ERROR: column "tableoid" does not exist
-LINE 1: SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
- ^
-CONTEXT: referenced column: tableoid
-EXPLAIN (VERBOSE, COSTS false)
-SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
-ERROR: column t1.ctid does not exist
-LINE 2: SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
- ^
-SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
-ERROR: column t1.ctid does not exist
-LINE 1: SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
- ^
-EXPLAIN (VERBOSE, COSTS false)
-SELECT ctid, * FROM ft1 t1 LIMIT 1;
-ERROR: column "ctid" does not exist
-LINE 2: SELECT ctid, * FROM ft1 t1 LIMIT 1;
- ^
-CONTEXT: referenced column: ctid
-SELECT ctid, * FROM ft1 t1 LIMIT 1;
-ERROR: column "ctid" does not exist
-LINE 1: SELECT ctid, * FROM ft1 t1 LIMIT 1;
- ^
-CONTEXT: referenced column: ctid
--- ===================================================================
--- used in pl/pgsql function
--- ===================================================================
-CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
-DECLARE
- v_c1 int;
-BEGIN
- SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
- PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
- RETURN v_c1;
-END;
-$$ LANGUAGE plpgsql;
-SELECT f_test(100);
- f_test
---------
- 100
-(1 row)
-
-DROP FUNCTION f_test(int);
--- ===================================================================
--- conversion error
--- ===================================================================
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
-SELECT * FROM ft1 WHERE c1 = 1; -- ERROR
-ERROR: invalid input syntax for integer: "foo"
-CONTEXT: column "c8" of foreign table "ft1"
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
--- ===================================================================
--- subtransaction
--- + local/remote error doesn't break cursor
--- ===================================================================
-BEGIN;
-DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
-FETCH c;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
-SAVEPOINT s;
-ERROR OUT; -- ERROR
-ERROR: syntax error at or near "ERROR"
-LINE 1: ERROR OUT;
- ^
-ROLLBACK TO s;
-FETCH c;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
-(1 row)
-
-SAVEPOINT s;
-SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
-ERROR: division by zero
-CONTEXT: Remote SQL command: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((1 / ("C 1" - 1)) > 0::double precision))
-ROLLBACK TO s;
-FETCH c;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
-(1 row)
-
-SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+-------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
-COMMIT;
--- ===================================================================
--- test handling of collations
--- ===================================================================
-create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique);
-NOTICE: CREATE TABLE / UNIQUE will create implicit index "loct3_f1_key" for table "loct3"
-NOTICE: CREATE TABLE / UNIQUE will create implicit index "loct3_f3_key" for table "loct3"
-create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10))
- server loopback options (table_name 'loct3', use_remote_estimate 'true');
--- can be sent to remote
-explain (verbose, costs off) select * from ft3 where f1 = 'foo';
- QUERY PLAN
-------------------------------------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text))
-(3 rows)
-
-explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
- QUERY PLAN
-------------------------------------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text))
-(3 rows)
-
-explain (verbose, costs off) select * from ft3 where f2 = 'foo';
- QUERY PLAN
-------------------------------------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f2 = 'foo'::text))
-(3 rows)
-
-explain (verbose, costs off) select * from ft3 where f3 = 'foo';
- QUERY PLAN
-------------------------------------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f3 = 'foo'::text))
-(3 rows)
-
-explain (verbose, costs off) select * from ft3 f, loct3 l
- where f.f3 = l.f3 and l.f1 = 'foo';
- QUERY PLAN
----------------------------------------------------------
- Nested Loop
- Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3
- Join Filter: ((f.f3)::text = (l.f3)::text)
- -> Index Scan using loct3_f1_key on public.loct3 l
- Output: l.f1, l.f2, l.f3
- Index Cond: (l.f1 = 'foo'::text)
- -> Foreign Scan on public.ft3 f
- Output: f.f1, f.f2, f.f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(9 rows)
-
--- can't be sent to remote
-explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
- QUERY PLAN
----------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Filter: ((ft3.f1)::text = 'foo'::text)
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(4 rows)
-
-explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
- QUERY PLAN
----------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Filter: (ft3.f1 = 'foo'::text COLLATE "C")
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(4 rows)
-
-explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
- QUERY PLAN
----------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Filter: ((ft3.f2)::text = 'foo'::text)
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(4 rows)
-
-explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
- QUERY PLAN
----------------------------------------------------
- Foreign Scan on public.ft3
- Output: f1, f2, f3
- Filter: (ft3.f2 = 'foo'::text COLLATE "C")
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(4 rows)
-
-explain (verbose, costs off) select * from ft3 f, loct3 l
- where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo';
- QUERY PLAN
----------------------------------------------------------
- Nested Loop
- Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3
- Join Filter: ((f.f3)::text = (l.f3)::text)
- -> Index Scan using loct3_f1_key on public.loct3 l
- Output: l.f1, l.f2, l.f3
- Index Cond: (l.f1 = 'foo'::text)
- -> Foreign Scan on public.ft3 f
- Output: f.f1, f.f2, f.f3
- Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(9 rows)
-
--- ===================================================================
--- test writable foreign table stuff
--- ===================================================================
-EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Insert on public.ft2
- -> Subquery Scan on "*SELECT*"
- Output: "*SELECT*"."?column?", "*SELECT*"."?column?", NULL::integer, "*SELECT*"."?column?", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
- -> Limit
- Output: ((public.ft2.c1 + 1000)), ((public.ft2.c2 + 100)), ((public.ft2.c3 || public.ft2.c3))
- -> Foreign Scan on public.ft2
- Output: (public.ft2.c1 + 1000), (public.ft2.c2 + 100), (public.ft2.c3 || public.ft2.c3)
- Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
-(8 rows)
-
-INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
-INSERT INTO ft2 (c1,c2,c3)
- VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+-----+-----+----+----+----+------------+----
- 1101 | 201 | aaa | | | | ft2 |
- 1102 | 202 | bbb | | | | ft2 |
- 1103 | 203 | ccc | | | | ft2 |
-(3 rows)
-
-INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
-UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
-UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+-----+--------------------+------------------------------+--------------------------+----+------------+-----
- 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
- 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
- 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
- 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
- 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
- 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
- 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
- 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
- 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
- 1007 | 507 | 0000700007_update7 | | | | ft2 |
- 1017 | 507 | 0001700017_update7 | | | | ft2 |
-(102 rows)
-
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
- FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.ft2
- -> Nested Loop
- Output: ft2.c1, (ft2.c2 + 500), NULL::integer, (ft2.c3 || '_update9'::text), ft2.c4, ft2.c5, ft2.c6, 'ft2 '::character(10), ft2.c8, ft2.ctid, ft1.*
- Join Filter: (ft2.c2 = ft1.c1)
- -> Foreign Scan on public.ft2
- Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c8, ft2.ctid
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c8, ctid FROM "S 1"."T 1" WHERE (((c2 % 10) = 9)) FOR UPDATE
- -> Materialize
- Output: ft1.*, ft1.c1
- -> Foreign Scan on public.ft1
- Output: ft1.*, ft1.c1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 9))
-(12 rows)
-
-UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
- FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
-EXPLAIN (verbose, costs off)
- DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
- QUERY PLAN
-----------------------------------------------------------------------------------------
- Delete on public.ft2
- Output: c1, c4
- -> Foreign Scan on public.ft2
- Output: ctid
- Remote SQL: SELECT ctid FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 5)) FOR UPDATE
-(5 rows)
-
-DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
- c1 | c4
-------+------------------------------
- 5 | Tue Jan 06 00:00:00 1970 PST
- 15 | Fri Jan 16 00:00:00 1970 PST
- 25 | Mon Jan 26 00:00:00 1970 PST
- 35 | Thu Feb 05 00:00:00 1970 PST
- 45 | Sun Feb 15 00:00:00 1970 PST
- 55 | Wed Feb 25 00:00:00 1970 PST
- 65 | Sat Mar 07 00:00:00 1970 PST
- 75 | Tue Mar 17 00:00:00 1970 PST
- 85 | Fri Mar 27 00:00:00 1970 PST
- 95 | Mon Apr 06 00:00:00 1970 PST
- 105 | Tue Jan 06 00:00:00 1970 PST
- 115 | Fri Jan 16 00:00:00 1970 PST
- 125 | Mon Jan 26 00:00:00 1970 PST
- 135 | Thu Feb 05 00:00:00 1970 PST
- 145 | Sun Feb 15 00:00:00 1970 PST
- 155 | Wed Feb 25 00:00:00 1970 PST
- 165 | Sat Mar 07 00:00:00 1970 PST
- 175 | Tue Mar 17 00:00:00 1970 PST
- 185 | Fri Mar 27 00:00:00 1970 PST
- 195 | Mon Apr 06 00:00:00 1970 PST
- 205 | Tue Jan 06 00:00:00 1970 PST
- 215 | Fri Jan 16 00:00:00 1970 PST
- 225 | Mon Jan 26 00:00:00 1970 PST
- 235 | Thu Feb 05 00:00:00 1970 PST
- 245 | Sun Feb 15 00:00:00 1970 PST
- 255 | Wed Feb 25 00:00:00 1970 PST
- 265 | Sat Mar 07 00:00:00 1970 PST
- 275 | Tue Mar 17 00:00:00 1970 PST
- 285 | Fri Mar 27 00:00:00 1970 PST
- 295 | Mon Apr 06 00:00:00 1970 PST
- 305 | Tue Jan 06 00:00:00 1970 PST
- 315 | Fri Jan 16 00:00:00 1970 PST
- 325 | Mon Jan 26 00:00:00 1970 PST
- 335 | Thu Feb 05 00:00:00 1970 PST
- 345 | Sun Feb 15 00:00:00 1970 PST
- 355 | Wed Feb 25 00:00:00 1970 PST
- 365 | Sat Mar 07 00:00:00 1970 PST
- 375 | Tue Mar 17 00:00:00 1970 PST
- 385 | Fri Mar 27 00:00:00 1970 PST
- 395 | Mon Apr 06 00:00:00 1970 PST
- 405 | Tue Jan 06 00:00:00 1970 PST
- 415 | Fri Jan 16 00:00:00 1970 PST
- 425 | Mon Jan 26 00:00:00 1970 PST
- 435 | Thu Feb 05 00:00:00 1970 PST
- 445 | Sun Feb 15 00:00:00 1970 PST
- 455 | Wed Feb 25 00:00:00 1970 PST
- 465 | Sat Mar 07 00:00:00 1970 PST
- 475 | Tue Mar 17 00:00:00 1970 PST
- 485 | Fri Mar 27 00:00:00 1970 PST
- 495 | Mon Apr 06 00:00:00 1970 PST
- 505 | Tue Jan 06 00:00:00 1970 PST
- 515 | Fri Jan 16 00:00:00 1970 PST
- 525 | Mon Jan 26 00:00:00 1970 PST
- 535 | Thu Feb 05 00:00:00 1970 PST
- 545 | Sun Feb 15 00:00:00 1970 PST
- 555 | Wed Feb 25 00:00:00 1970 PST
- 565 | Sat Mar 07 00:00:00 1970 PST
- 575 | Tue Mar 17 00:00:00 1970 PST
- 585 | Fri Mar 27 00:00:00 1970 PST
- 595 | Mon Apr 06 00:00:00 1970 PST
- 605 | Tue Jan 06 00:00:00 1970 PST
- 615 | Fri Jan 16 00:00:00 1970 PST
- 625 | Mon Jan 26 00:00:00 1970 PST
- 635 | Thu Feb 05 00:00:00 1970 PST
- 645 | Sun Feb 15 00:00:00 1970 PST
- 655 | Wed Feb 25 00:00:00 1970 PST
- 665 | Sat Mar 07 00:00:00 1970 PST
- 675 | Tue Mar 17 00:00:00 1970 PST
- 685 | Fri Mar 27 00:00:00 1970 PST
- 695 | Mon Apr 06 00:00:00 1970 PST
- 705 | Tue Jan 06 00:00:00 1970 PST
- 715 | Fri Jan 16 00:00:00 1970 PST
- 725 | Mon Jan 26 00:00:00 1970 PST
- 735 | Thu Feb 05 00:00:00 1970 PST
- 745 | Sun Feb 15 00:00:00 1970 PST
- 755 | Wed Feb 25 00:00:00 1970 PST
- 765 | Sat Mar 07 00:00:00 1970 PST
- 775 | Tue Mar 17 00:00:00 1970 PST
- 785 | Fri Mar 27 00:00:00 1970 PST
- 795 | Mon Apr 06 00:00:00 1970 PST
- 805 | Tue Jan 06 00:00:00 1970 PST
- 815 | Fri Jan 16 00:00:00 1970 PST
- 825 | Mon Jan 26 00:00:00 1970 PST
- 835 | Thu Feb 05 00:00:00 1970 PST
- 845 | Sun Feb 15 00:00:00 1970 PST
- 855 | Wed Feb 25 00:00:00 1970 PST
- 865 | Sat Mar 07 00:00:00 1970 PST
- 875 | Tue Mar 17 00:00:00 1970 PST
- 885 | Fri Mar 27 00:00:00 1970 PST
- 895 | Mon Apr 06 00:00:00 1970 PST
- 905 | Tue Jan 06 00:00:00 1970 PST
- 915 | Fri Jan 16 00:00:00 1970 PST
- 925 | Mon Jan 26 00:00:00 1970 PST
- 935 | Thu Feb 05 00:00:00 1970 PST
- 945 | Sun Feb 15 00:00:00 1970 PST
- 955 | Wed Feb 25 00:00:00 1970 PST
- 965 | Sat Mar 07 00:00:00 1970 PST
- 975 | Tue Mar 17 00:00:00 1970 PST
- 985 | Fri Mar 27 00:00:00 1970 PST
- 995 | Mon Apr 06 00:00:00 1970 PST
- 1005 |
- 1015 |
- 1105 |
-(103 rows)
-
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
- Delete on public.ft2
- -> Nested Loop
- Output: ft2.ctid, ft1.*
- Join Filter: (ft2.c2 = ft1.c1)
- -> Foreign Scan on public.ft2
- Output: ft2.ctid, ft2.c2
- Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" WHERE (((c2 % 10) = 2)) FOR UPDATE
- -> Materialize
- Output: ft1.*, ft1.c1
- -> Foreign Scan on public.ft1
- Output: ft1.*, ft1.c1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 2))
-(12 rows)
-
-DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
-SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
- c1 | c2 | c3 | c4
-------+-----+--------------------+------------------------------
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
- 3 | 303 | 00003_update3 | Sun Jan 04 00:00:00 1970 PST
- 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
- 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST
- 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
- 9 | 509 | 00009_update9 | Sat Jan 10 00:00:00 1970 PST
- 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
- 11 | 1 | 00011 | Mon Jan 12 00:00:00 1970 PST
- 13 | 303 | 00013_update3 | Wed Jan 14 00:00:00 1970 PST
- 14 | 4 | 00014 | Thu Jan 15 00:00:00 1970 PST
- 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST
- 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST
- 18 | 8 | 00018 | Mon Jan 19 00:00:00 1970 PST
- 19 | 509 | 00019_update9 | Tue Jan 20 00:00:00 1970 PST
- 20 | 0 | 00020 | Wed Jan 21 00:00:00 1970 PST
- 21 | 1 | 00021 | Thu Jan 22 00:00:00 1970 PST
- 23 | 303 | 00023_update3 | Sat Jan 24 00:00:00 1970 PST
- 24 | 4 | 00024 | Sun Jan 25 00:00:00 1970 PST
- 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST
- 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST
- 28 | 8 | 00028 | Thu Jan 29 00:00:00 1970 PST
- 29 | 509 | 00029_update9 | Fri Jan 30 00:00:00 1970 PST
- 30 | 0 | 00030 | Sat Jan 31 00:00:00 1970 PST
- 31 | 1 | 00031 | Sun Feb 01 00:00:00 1970 PST
- 33 | 303 | 00033_update3 | Tue Feb 03 00:00:00 1970 PST
- 34 | 4 | 00034 | Wed Feb 04 00:00:00 1970 PST
- 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST
- 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST
- 38 | 8 | 00038 | Sun Feb 08 00:00:00 1970 PST
- 39 | 509 | 00039_update9 | Mon Feb 09 00:00:00 1970 PST
- 40 | 0 | 00040 | Tue Feb 10 00:00:00 1970 PST
- 41 | 1 | 00041 | Wed Feb 11 00:00:00 1970 PST
- 43 | 303 | 00043_update3 | Fri Feb 13 00:00:00 1970 PST
- 44 | 4 | 00044 | Sat Feb 14 00:00:00 1970 PST
- 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST
- 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST
- 48 | 8 | 00048 | Wed Feb 18 00:00:00 1970 PST
- 49 | 509 | 00049_update9 | Thu Feb 19 00:00:00 1970 PST
- 50 | 0 | 00050 | Fri Feb 20 00:00:00 1970 PST
- 51 | 1 | 00051 | Sat Feb 21 00:00:00 1970 PST
- 53 | 303 | 00053_update3 | Mon Feb 23 00:00:00 1970 PST
- 54 | 4 | 00054 | Tue Feb 24 00:00:00 1970 PST
- 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST
- 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST
- 58 | 8 | 00058 | Sat Feb 28 00:00:00 1970 PST
- 59 | 509 | 00059_update9 | Sun Mar 01 00:00:00 1970 PST
- 60 | 0 | 00060 | Mon Mar 02 00:00:00 1970 PST
- 61 | 1 | 00061 | Tue Mar 03 00:00:00 1970 PST
- 63 | 303 | 00063_update3 | Thu Mar 05 00:00:00 1970 PST
- 64 | 4 | 00064 | Fri Mar 06 00:00:00 1970 PST
- 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST
- 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST
- 68 | 8 | 00068 | Tue Mar 10 00:00:00 1970 PST
- 69 | 509 | 00069_update9 | Wed Mar 11 00:00:00 1970 PST
- 70 | 0 | 00070 | Thu Mar 12 00:00:00 1970 PST
- 71 | 1 | 00071 | Fri Mar 13 00:00:00 1970 PST
- 73 | 303 | 00073_update3 | Sun Mar 15 00:00:00 1970 PST
- 74 | 4 | 00074 | Mon Mar 16 00:00:00 1970 PST
- 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST
- 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST
- 78 | 8 | 00078 | Fri Mar 20 00:00:00 1970 PST
- 79 | 509 | 00079_update9 | Sat Mar 21 00:00:00 1970 PST
- 80 | 0 | 00080 | Sun Mar 22 00:00:00 1970 PST
- 81 | 1 | 00081 | Mon Mar 23 00:00:00 1970 PST
- 83 | 303 | 00083_update3 | Wed Mar 25 00:00:00 1970 PST
- 84 | 4 | 00084 | Thu Mar 26 00:00:00 1970 PST
- 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST
- 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST
- 88 | 8 | 00088 | Mon Mar 30 00:00:00 1970 PST
- 89 | 509 | 00089_update9 | Tue Mar 31 00:00:00 1970 PST
- 90 | 0 | 00090 | Wed Apr 01 00:00:00 1970 PST
- 91 | 1 | 00091 | Thu Apr 02 00:00:00 1970 PST
- 93 | 303 | 00093_update3 | Sat Apr 04 00:00:00 1970 PST
- 94 | 4 | 00094 | Sun Apr 05 00:00:00 1970 PST
- 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST
- 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST
- 98 | 8 | 00098 | Thu Apr 09 00:00:00 1970 PST
- 99 | 509 | 00099_update9 | Fri Apr 10 00:00:00 1970 PST
- 100 | 0 | 00100 | Thu Jan 01 00:00:00 1970 PST
- 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST
- 103 | 303 | 00103_update3 | Sun Jan 04 00:00:00 1970 PST
- 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST
- 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST
- 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST
- 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST
- 109 | 509 | 00109_update9 | Sat Jan 10 00:00:00 1970 PST
- 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST
- 111 | 1 | 00111 | Mon Jan 12 00:00:00 1970 PST
- 113 | 303 | 00113_update3 | Wed Jan 14 00:00:00 1970 PST
- 114 | 4 | 00114 | Thu Jan 15 00:00:00 1970 PST
- 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST
- 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST
- 118 | 8 | 00118 | Mon Jan 19 00:00:00 1970 PST
- 119 | 509 | 00119_update9 | Tue Jan 20 00:00:00 1970 PST
- 120 | 0 | 00120 | Wed Jan 21 00:00:00 1970 PST
- 121 | 1 | 00121 | Thu Jan 22 00:00:00 1970 PST
- 123 | 303 | 00123_update3 | Sat Jan 24 00:00:00 1970 PST
- 124 | 4 | 00124 | Sun Jan 25 00:00:00 1970 PST
- 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST
- 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST
- 128 | 8 | 00128 | Thu Jan 29 00:00:00 1970 PST
- 129 | 509 | 00129_update9 | Fri Jan 30 00:00:00 1970 PST
- 130 | 0 | 00130 | Sat Jan 31 00:00:00 1970 PST
- 131 | 1 | 00131 | Sun Feb 01 00:00:00 1970 PST
- 133 | 303 | 00133_update3 | Tue Feb 03 00:00:00 1970 PST
- 134 | 4 | 00134 | Wed Feb 04 00:00:00 1970 PST
- 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST
- 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST
- 138 | 8 | 00138 | Sun Feb 08 00:00:00 1970 PST
- 139 | 509 | 00139_update9 | Mon Feb 09 00:00:00 1970 PST
- 140 | 0 | 00140 | Tue Feb 10 00:00:00 1970 PST
- 141 | 1 | 00141 | Wed Feb 11 00:00:00 1970 PST
- 143 | 303 | 00143_update3 | Fri Feb 13 00:00:00 1970 PST
- 144 | 4 | 00144 | Sat Feb 14 00:00:00 1970 PST
- 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST
- 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST
- 148 | 8 | 00148 | Wed Feb 18 00:00:00 1970 PST
- 149 | 509 | 00149_update9 | Thu Feb 19 00:00:00 1970 PST
- 150 | 0 | 00150 | Fri Feb 20 00:00:00 1970 PST
- 151 | 1 | 00151 | Sat Feb 21 00:00:00 1970 PST
- 153 | 303 | 00153_update3 | Mon Feb 23 00:00:00 1970 PST
- 154 | 4 | 00154 | Tue Feb 24 00:00:00 1970 PST
- 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST
- 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST
- 158 | 8 | 00158 | Sat Feb 28 00:00:00 1970 PST
- 159 | 509 | 00159_update9 | Sun Mar 01 00:00:00 1970 PST
- 160 | 0 | 00160 | Mon Mar 02 00:00:00 1970 PST
- 161 | 1 | 00161 | Tue Mar 03 00:00:00 1970 PST
- 163 | 303 | 00163_update3 | Thu Mar 05 00:00:00 1970 PST
- 164 | 4 | 00164 | Fri Mar 06 00:00:00 1970 PST
- 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST
- 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST
- 168 | 8 | 00168 | Tue Mar 10 00:00:00 1970 PST
- 169 | 509 | 00169_update9 | Wed Mar 11 00:00:00 1970 PST
- 170 | 0 | 00170 | Thu Mar 12 00:00:00 1970 PST
- 171 | 1 | 00171 | Fri Mar 13 00:00:00 1970 PST
- 173 | 303 | 00173_update3 | Sun Mar 15 00:00:00 1970 PST
- 174 | 4 | 00174 | Mon Mar 16 00:00:00 1970 PST
- 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST
- 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST
- 178 | 8 | 00178 | Fri Mar 20 00:00:00 1970 PST
- 179 | 509 | 00179_update9 | Sat Mar 21 00:00:00 1970 PST
- 180 | 0 | 00180 | Sun Mar 22 00:00:00 1970 PST
- 181 | 1 | 00181 | Mon Mar 23 00:00:00 1970 PST
- 183 | 303 | 00183_update3 | Wed Mar 25 00:00:00 1970 PST
- 184 | 4 | 00184 | Thu Mar 26 00:00:00 1970 PST
- 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST
- 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST
- 188 | 8 | 00188 | Mon Mar 30 00:00:00 1970 PST
- 189 | 509 | 00189_update9 | Tue Mar 31 00:00:00 1970 PST
- 190 | 0 | 00190 | Wed Apr 01 00:00:00 1970 PST
- 191 | 1 | 00191 | Thu Apr 02 00:00:00 1970 PST
- 193 | 303 | 00193_update3 | Sat Apr 04 00:00:00 1970 PST
- 194 | 4 | 00194 | Sun Apr 05 00:00:00 1970 PST
- 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST
- 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST
- 198 | 8 | 00198 | Thu Apr 09 00:00:00 1970 PST
- 199 | 509 | 00199_update9 | Fri Apr 10 00:00:00 1970 PST
- 200 | 0 | 00200 | Thu Jan 01 00:00:00 1970 PST
- 201 | 1 | 00201 | Fri Jan 02 00:00:00 1970 PST
- 203 | 303 | 00203_update3 | Sun Jan 04 00:00:00 1970 PST
- 204 | 4 | 00204 | Mon Jan 05 00:00:00 1970 PST
- 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST
- 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST
- 208 | 8 | 00208 | Fri Jan 09 00:00:00 1970 PST
- 209 | 509 | 00209_update9 | Sat Jan 10 00:00:00 1970 PST
- 210 | 0 | 00210 | Sun Jan 11 00:00:00 1970 PST
- 211 | 1 | 00211 | Mon Jan 12 00:00:00 1970 PST
- 213 | 303 | 00213_update3 | Wed Jan 14 00:00:00 1970 PST
- 214 | 4 | 00214 | Thu Jan 15 00:00:00 1970 PST
- 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST
- 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST
- 218 | 8 | 00218 | Mon Jan 19 00:00:00 1970 PST
- 219 | 509 | 00219_update9 | Tue Jan 20 00:00:00 1970 PST
- 220 | 0 | 00220 | Wed Jan 21 00:00:00 1970 PST
- 221 | 1 | 00221 | Thu Jan 22 00:00:00 1970 PST
- 223 | 303 | 00223_update3 | Sat Jan 24 00:00:00 1970 PST
- 224 | 4 | 00224 | Sun Jan 25 00:00:00 1970 PST
- 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST
- 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST
- 228 | 8 | 00228 | Thu Jan 29 00:00:00 1970 PST
- 229 | 509 | 00229_update9 | Fri Jan 30 00:00:00 1970 PST
- 230 | 0 | 00230 | Sat Jan 31 00:00:00 1970 PST
- 231 | 1 | 00231 | Sun Feb 01 00:00:00 1970 PST
- 233 | 303 | 00233_update3 | Tue Feb 03 00:00:00 1970 PST
- 234 | 4 | 00234 | Wed Feb 04 00:00:00 1970 PST
- 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST
- 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST
- 238 | 8 | 00238 | Sun Feb 08 00:00:00 1970 PST
- 239 | 509 | 00239_update9 | Mon Feb 09 00:00:00 1970 PST
- 240 | 0 | 00240 | Tue Feb 10 00:00:00 1970 PST
- 241 | 1 | 00241 | Wed Feb 11 00:00:00 1970 PST
- 243 | 303 | 00243_update3 | Fri Feb 13 00:00:00 1970 PST
- 244 | 4 | 00244 | Sat Feb 14 00:00:00 1970 PST
- 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST
- 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST
- 248 | 8 | 00248 | Wed Feb 18 00:00:00 1970 PST
- 249 | 509 | 00249_update9 | Thu Feb 19 00:00:00 1970 PST
- 250 | 0 | 00250 | Fri Feb 20 00:00:00 1970 PST
- 251 | 1 | 00251 | Sat Feb 21 00:00:00 1970 PST
- 253 | 303 | 00253_update3 | Mon Feb 23 00:00:00 1970 PST
- 254 | 4 | 00254 | Tue Feb 24 00:00:00 1970 PST
- 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST
- 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST
- 258 | 8 | 00258 | Sat Feb 28 00:00:00 1970 PST
- 259 | 509 | 00259_update9 | Sun Mar 01 00:00:00 1970 PST
- 260 | 0 | 00260 | Mon Mar 02 00:00:00 1970 PST
- 261 | 1 | 00261 | Tue Mar 03 00:00:00 1970 PST
- 263 | 303 | 00263_update3 | Thu Mar 05 00:00:00 1970 PST
- 264 | 4 | 00264 | Fri Mar 06 00:00:00 1970 PST
- 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST
- 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST
- 268 | 8 | 00268 | Tue Mar 10 00:00:00 1970 PST
- 269 | 509 | 00269_update9 | Wed Mar 11 00:00:00 1970 PST
- 270 | 0 | 00270 | Thu Mar 12 00:00:00 1970 PST
- 271 | 1 | 00271 | Fri Mar 13 00:00:00 1970 PST
- 273 | 303 | 00273_update3 | Sun Mar 15 00:00:00 1970 PST
- 274 | 4 | 00274 | Mon Mar 16 00:00:00 1970 PST
- 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST
- 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST
- 278 | 8 | 00278 | Fri Mar 20 00:00:00 1970 PST
- 279 | 509 | 00279_update9 | Sat Mar 21 00:00:00 1970 PST
- 280 | 0 | 00280 | Sun Mar 22 00:00:00 1970 PST
- 281 | 1 | 00281 | Mon Mar 23 00:00:00 1970 PST
- 283 | 303 | 00283_update3 | Wed Mar 25 00:00:00 1970 PST
- 284 | 4 | 00284 | Thu Mar 26 00:00:00 1970 PST
- 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST
- 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST
- 288 | 8 | 00288 | Mon Mar 30 00:00:00 1970 PST
- 289 | 509 | 00289_update9 | Tue Mar 31 00:00:00 1970 PST
- 290 | 0 | 00290 | Wed Apr 01 00:00:00 1970 PST
- 291 | 1 | 00291 | Thu Apr 02 00:00:00 1970 PST
- 293 | 303 | 00293_update3 | Sat Apr 04 00:00:00 1970 PST
- 294 | 4 | 00294 | Sun Apr 05 00:00:00 1970 PST
- 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST
- 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST
- 298 | 8 | 00298 | Thu Apr 09 00:00:00 1970 PST
- 299 | 509 | 00299_update9 | Fri Apr 10 00:00:00 1970 PST
- 300 | 0 | 00300 | Thu Jan 01 00:00:00 1970 PST
- 301 | 1 | 00301 | Fri Jan 02 00:00:00 1970 PST
- 303 | 303 | 00303_update3 | Sun Jan 04 00:00:00 1970 PST
- 304 | 4 | 00304 | Mon Jan 05 00:00:00 1970 PST
- 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST
- 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST
- 308 | 8 | 00308 | Fri Jan 09 00:00:00 1970 PST
- 309 | 509 | 00309_update9 | Sat Jan 10 00:00:00 1970 PST
- 310 | 0 | 00310 | Sun Jan 11 00:00:00 1970 PST
- 311 | 1 | 00311 | Mon Jan 12 00:00:00 1970 PST
- 313 | 303 | 00313_update3 | Wed Jan 14 00:00:00 1970 PST
- 314 | 4 | 00314 | Thu Jan 15 00:00:00 1970 PST
- 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST
- 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST
- 318 | 8 | 00318 | Mon Jan 19 00:00:00 1970 PST
- 319 | 509 | 00319_update9 | Tue Jan 20 00:00:00 1970 PST
- 320 | 0 | 00320 | Wed Jan 21 00:00:00 1970 PST
- 321 | 1 | 00321 | Thu Jan 22 00:00:00 1970 PST
- 323 | 303 | 00323_update3 | Sat Jan 24 00:00:00 1970 PST
- 324 | 4 | 00324 | Sun Jan 25 00:00:00 1970 PST
- 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST
- 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST
- 328 | 8 | 00328 | Thu Jan 29 00:00:00 1970 PST
- 329 | 509 | 00329_update9 | Fri Jan 30 00:00:00 1970 PST
- 330 | 0 | 00330 | Sat Jan 31 00:00:00 1970 PST
- 331 | 1 | 00331 | Sun Feb 01 00:00:00 1970 PST
- 333 | 303 | 00333_update3 | Tue Feb 03 00:00:00 1970 PST
- 334 | 4 | 00334 | Wed Feb 04 00:00:00 1970 PST
- 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST
- 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST
- 338 | 8 | 00338 | Sun Feb 08 00:00:00 1970 PST
- 339 | 509 | 00339_update9 | Mon Feb 09 00:00:00 1970 PST
- 340 | 0 | 00340 | Tue Feb 10 00:00:00 1970 PST
- 341 | 1 | 00341 | Wed Feb 11 00:00:00 1970 PST
- 343 | 303 | 00343_update3 | Fri Feb 13 00:00:00 1970 PST
- 344 | 4 | 00344 | Sat Feb 14 00:00:00 1970 PST
- 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST
- 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST
- 348 | 8 | 00348 | Wed Feb 18 00:00:00 1970 PST
- 349 | 509 | 00349_update9 | Thu Feb 19 00:00:00 1970 PST
- 350 | 0 | 00350 | Fri Feb 20 00:00:00 1970 PST
- 351 | 1 | 00351 | Sat Feb 21 00:00:00 1970 PST
- 353 | 303 | 00353_update3 | Mon Feb 23 00:00:00 1970 PST
- 354 | 4 | 00354 | Tue Feb 24 00:00:00 1970 PST
- 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST
- 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST
- 358 | 8 | 00358 | Sat Feb 28 00:00:00 1970 PST
- 359 | 509 | 00359_update9 | Sun Mar 01 00:00:00 1970 PST
- 360 | 0 | 00360 | Mon Mar 02 00:00:00 1970 PST
- 361 | 1 | 00361 | Tue Mar 03 00:00:00 1970 PST
- 363 | 303 | 00363_update3 | Thu Mar 05 00:00:00 1970 PST
- 364 | 4 | 00364 | Fri Mar 06 00:00:00 1970 PST
- 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST
- 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST
- 368 | 8 | 00368 | Tue Mar 10 00:00:00 1970 PST
- 369 | 509 | 00369_update9 | Wed Mar 11 00:00:00 1970 PST
- 370 | 0 | 00370 | Thu Mar 12 00:00:00 1970 PST
- 371 | 1 | 00371 | Fri Mar 13 00:00:00 1970 PST
- 373 | 303 | 00373_update3 | Sun Mar 15 00:00:00 1970 PST
- 374 | 4 | 00374 | Mon Mar 16 00:00:00 1970 PST
- 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST
- 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST
- 378 | 8 | 00378 | Fri Mar 20 00:00:00 1970 PST
- 379 | 509 | 00379_update9 | Sat Mar 21 00:00:00 1970 PST
- 380 | 0 | 00380 | Sun Mar 22 00:00:00 1970 PST
- 381 | 1 | 00381 | Mon Mar 23 00:00:00 1970 PST
- 383 | 303 | 00383_update3 | Wed Mar 25 00:00:00 1970 PST
- 384 | 4 | 00384 | Thu Mar 26 00:00:00 1970 PST
- 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST
- 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST
- 388 | 8 | 00388 | Mon Mar 30 00:00:00 1970 PST
- 389 | 509 | 00389_update9 | Tue Mar 31 00:00:00 1970 PST
- 390 | 0 | 00390 | Wed Apr 01 00:00:00 1970 PST
- 391 | 1 | 00391 | Thu Apr 02 00:00:00 1970 PST
- 393 | 303 | 00393_update3 | Sat Apr 04 00:00:00 1970 PST
- 394 | 4 | 00394 | Sun Apr 05 00:00:00 1970 PST
- 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST
- 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST
- 398 | 8 | 00398 | Thu Apr 09 00:00:00 1970 PST
- 399 | 509 | 00399_update9 | Fri Apr 10 00:00:00 1970 PST
- 400 | 0 | 00400 | Thu Jan 01 00:00:00 1970 PST
- 401 | 1 | 00401 | Fri Jan 02 00:00:00 1970 PST
- 403 | 303 | 00403_update3 | Sun Jan 04 00:00:00 1970 PST
- 404 | 4 | 00404 | Mon Jan 05 00:00:00 1970 PST
- 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST
- 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST
- 408 | 8 | 00408 | Fri Jan 09 00:00:00 1970 PST
- 409 | 509 | 00409_update9 | Sat Jan 10 00:00:00 1970 PST
- 410 | 0 | 00410 | Sun Jan 11 00:00:00 1970 PST
- 411 | 1 | 00411 | Mon Jan 12 00:00:00 1970 PST
- 413 | 303 | 00413_update3 | Wed Jan 14 00:00:00 1970 PST
- 414 | 4 | 00414 | Thu Jan 15 00:00:00 1970 PST
- 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST
- 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST
- 418 | 8 | 00418 | Mon Jan 19 00:00:00 1970 PST
- 419 | 509 | 00419_update9 | Tue Jan 20 00:00:00 1970 PST
- 420 | 0 | 00420 | Wed Jan 21 00:00:00 1970 PST
- 421 | 1 | 00421 | Thu Jan 22 00:00:00 1970 PST
- 423 | 303 | 00423_update3 | Sat Jan 24 00:00:00 1970 PST
- 424 | 4 | 00424 | Sun Jan 25 00:00:00 1970 PST
- 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST
- 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST
- 428 | 8 | 00428 | Thu Jan 29 00:00:00 1970 PST
- 429 | 509 | 00429_update9 | Fri Jan 30 00:00:00 1970 PST
- 430 | 0 | 00430 | Sat Jan 31 00:00:00 1970 PST
- 431 | 1 | 00431 | Sun Feb 01 00:00:00 1970 PST
- 433 | 303 | 00433_update3 | Tue Feb 03 00:00:00 1970 PST
- 434 | 4 | 00434 | Wed Feb 04 00:00:00 1970 PST
- 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST
- 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST
- 438 | 8 | 00438 | Sun Feb 08 00:00:00 1970 PST
- 439 | 509 | 00439_update9 | Mon Feb 09 00:00:00 1970 PST
- 440 | 0 | 00440 | Tue Feb 10 00:00:00 1970 PST
- 441 | 1 | 00441 | Wed Feb 11 00:00:00 1970 PST
- 443 | 303 | 00443_update3 | Fri Feb 13 00:00:00 1970 PST
- 444 | 4 | 00444 | Sat Feb 14 00:00:00 1970 PST
- 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST
- 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST
- 448 | 8 | 00448 | Wed Feb 18 00:00:00 1970 PST
- 449 | 509 | 00449_update9 | Thu Feb 19 00:00:00 1970 PST
- 450 | 0 | 00450 | Fri Feb 20 00:00:00 1970 PST
- 451 | 1 | 00451 | Sat Feb 21 00:00:00 1970 PST
- 453 | 303 | 00453_update3 | Mon Feb 23 00:00:00 1970 PST
- 454 | 4 | 00454 | Tue Feb 24 00:00:00 1970 PST
- 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST
- 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST
- 458 | 8 | 00458 | Sat Feb 28 00:00:00 1970 PST
- 459 | 509 | 00459_update9 | Sun Mar 01 00:00:00 1970 PST
- 460 | 0 | 00460 | Mon Mar 02 00:00:00 1970 PST
- 461 | 1 | 00461 | Tue Mar 03 00:00:00 1970 PST
- 463 | 303 | 00463_update3 | Thu Mar 05 00:00:00 1970 PST
- 464 | 4 | 00464 | Fri Mar 06 00:00:00 1970 PST
- 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST
- 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST
- 468 | 8 | 00468 | Tue Mar 10 00:00:00 1970 PST
- 469 | 509 | 00469_update9 | Wed Mar 11 00:00:00 1970 PST
- 470 | 0 | 00470 | Thu Mar 12 00:00:00 1970 PST
- 471 | 1 | 00471 | Fri Mar 13 00:00:00 1970 PST
- 473 | 303 | 00473_update3 | Sun Mar 15 00:00:00 1970 PST
- 474 | 4 | 00474 | Mon Mar 16 00:00:00 1970 PST
- 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST
- 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST
- 478 | 8 | 00478 | Fri Mar 20 00:00:00 1970 PST
- 479 | 509 | 00479_update9 | Sat Mar 21 00:00:00 1970 PST
- 480 | 0 | 00480 | Sun Mar 22 00:00:00 1970 PST
- 481 | 1 | 00481 | Mon Mar 23 00:00:00 1970 PST
- 483 | 303 | 00483_update3 | Wed Mar 25 00:00:00 1970 PST
- 484 | 4 | 00484 | Thu Mar 26 00:00:00 1970 PST
- 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST
- 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST
- 488 | 8 | 00488 | Mon Mar 30 00:00:00 1970 PST
- 489 | 509 | 00489_update9 | Tue Mar 31 00:00:00 1970 PST
- 490 | 0 | 00490 | Wed Apr 01 00:00:00 1970 PST
- 491 | 1 | 00491 | Thu Apr 02 00:00:00 1970 PST
- 493 | 303 | 00493_update3 | Sat Apr 04 00:00:00 1970 PST
- 494 | 4 | 00494 | Sun Apr 05 00:00:00 1970 PST
- 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST
- 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST
- 498 | 8 | 00498 | Thu Apr 09 00:00:00 1970 PST
- 499 | 509 | 00499_update9 | Fri Apr 10 00:00:00 1970 PST
- 500 | 0 | 00500 | Thu Jan 01 00:00:00 1970 PST
- 501 | 1 | 00501 | Fri Jan 02 00:00:00 1970 PST
- 503 | 303 | 00503_update3 | Sun Jan 04 00:00:00 1970 PST
- 504 | 4 | 00504 | Mon Jan 05 00:00:00 1970 PST
- 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST
- 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST
- 508 | 8 | 00508 | Fri Jan 09 00:00:00 1970 PST
- 509 | 509 | 00509_update9 | Sat Jan 10 00:00:00 1970 PST
- 510 | 0 | 00510 | Sun Jan 11 00:00:00 1970 PST
- 511 | 1 | 00511 | Mon Jan 12 00:00:00 1970 PST
- 513 | 303 | 00513_update3 | Wed Jan 14 00:00:00 1970 PST
- 514 | 4 | 00514 | Thu Jan 15 00:00:00 1970 PST
- 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST
- 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST
- 518 | 8 | 00518 | Mon Jan 19 00:00:00 1970 PST
- 519 | 509 | 00519_update9 | Tue Jan 20 00:00:00 1970 PST
- 520 | 0 | 00520 | Wed Jan 21 00:00:00 1970 PST
- 521 | 1 | 00521 | Thu Jan 22 00:00:00 1970 PST
- 523 | 303 | 00523_update3 | Sat Jan 24 00:00:00 1970 PST
- 524 | 4 | 00524 | Sun Jan 25 00:00:00 1970 PST
- 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST
- 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST
- 528 | 8 | 00528 | Thu Jan 29 00:00:00 1970 PST
- 529 | 509 | 00529_update9 | Fri Jan 30 00:00:00 1970 PST
- 530 | 0 | 00530 | Sat Jan 31 00:00:00 1970 PST
- 531 | 1 | 00531 | Sun Feb 01 00:00:00 1970 PST
- 533 | 303 | 00533_update3 | Tue Feb 03 00:00:00 1970 PST
- 534 | 4 | 00534 | Wed Feb 04 00:00:00 1970 PST
- 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST
- 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST
- 538 | 8 | 00538 | Sun Feb 08 00:00:00 1970 PST
- 539 | 509 | 00539_update9 | Mon Feb 09 00:00:00 1970 PST
- 540 | 0 | 00540 | Tue Feb 10 00:00:00 1970 PST
- 541 | 1 | 00541 | Wed Feb 11 00:00:00 1970 PST
- 543 | 303 | 00543_update3 | Fri Feb 13 00:00:00 1970 PST
- 544 | 4 | 00544 | Sat Feb 14 00:00:00 1970 PST
- 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST
- 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST
- 548 | 8 | 00548 | Wed Feb 18 00:00:00 1970 PST
- 549 | 509 | 00549_update9 | Thu Feb 19 00:00:00 1970 PST
- 550 | 0 | 00550 | Fri Feb 20 00:00:00 1970 PST
- 551 | 1 | 00551 | Sat Feb 21 00:00:00 1970 PST
- 553 | 303 | 00553_update3 | Mon Feb 23 00:00:00 1970 PST
- 554 | 4 | 00554 | Tue Feb 24 00:00:00 1970 PST
- 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST
- 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST
- 558 | 8 | 00558 | Sat Feb 28 00:00:00 1970 PST
- 559 | 509 | 00559_update9 | Sun Mar 01 00:00:00 1970 PST
- 560 | 0 | 00560 | Mon Mar 02 00:00:00 1970 PST
- 561 | 1 | 00561 | Tue Mar 03 00:00:00 1970 PST
- 563 | 303 | 00563_update3 | Thu Mar 05 00:00:00 1970 PST
- 564 | 4 | 00564 | Fri Mar 06 00:00:00 1970 PST
- 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST
- 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST
- 568 | 8 | 00568 | Tue Mar 10 00:00:00 1970 PST
- 569 | 509 | 00569_update9 | Wed Mar 11 00:00:00 1970 PST
- 570 | 0 | 00570 | Thu Mar 12 00:00:00 1970 PST
- 571 | 1 | 00571 | Fri Mar 13 00:00:00 1970 PST
- 573 | 303 | 00573_update3 | Sun Mar 15 00:00:00 1970 PST
- 574 | 4 | 00574 | Mon Mar 16 00:00:00 1970 PST
- 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST
- 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST
- 578 | 8 | 00578 | Fri Mar 20 00:00:00 1970 PST
- 579 | 509 | 00579_update9 | Sat Mar 21 00:00:00 1970 PST
- 580 | 0 | 00580 | Sun Mar 22 00:00:00 1970 PST
- 581 | 1 | 00581 | Mon Mar 23 00:00:00 1970 PST
- 583 | 303 | 00583_update3 | Wed Mar 25 00:00:00 1970 PST
- 584 | 4 | 00584 | Thu Mar 26 00:00:00 1970 PST
- 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST
- 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST
- 588 | 8 | 00588 | Mon Mar 30 00:00:00 1970 PST
- 589 | 509 | 00589_update9 | Tue Mar 31 00:00:00 1970 PST
- 590 | 0 | 00590 | Wed Apr 01 00:00:00 1970 PST
- 591 | 1 | 00591 | Thu Apr 02 00:00:00 1970 PST
- 593 | 303 | 00593_update3 | Sat Apr 04 00:00:00 1970 PST
- 594 | 4 | 00594 | Sun Apr 05 00:00:00 1970 PST
- 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST
- 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST
- 598 | 8 | 00598 | Thu Apr 09 00:00:00 1970 PST
- 599 | 509 | 00599_update9 | Fri Apr 10 00:00:00 1970 PST
- 600 | 0 | 00600 | Thu Jan 01 00:00:00 1970 PST
- 601 | 1 | 00601 | Fri Jan 02 00:00:00 1970 PST
- 603 | 303 | 00603_update3 | Sun Jan 04 00:00:00 1970 PST
- 604 | 4 | 00604 | Mon Jan 05 00:00:00 1970 PST
- 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST
- 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST
- 608 | 8 | 00608 | Fri Jan 09 00:00:00 1970 PST
- 609 | 509 | 00609_update9 | Sat Jan 10 00:00:00 1970 PST
- 610 | 0 | 00610 | Sun Jan 11 00:00:00 1970 PST
- 611 | 1 | 00611 | Mon Jan 12 00:00:00 1970 PST
- 613 | 303 | 00613_update3 | Wed Jan 14 00:00:00 1970 PST
- 614 | 4 | 00614 | Thu Jan 15 00:00:00 1970 PST
- 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST
- 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST
- 618 | 8 | 00618 | Mon Jan 19 00:00:00 1970 PST
- 619 | 509 | 00619_update9 | Tue Jan 20 00:00:00 1970 PST
- 620 | 0 | 00620 | Wed Jan 21 00:00:00 1970 PST
- 621 | 1 | 00621 | Thu Jan 22 00:00:00 1970 PST
- 623 | 303 | 00623_update3 | Sat Jan 24 00:00:00 1970 PST
- 624 | 4 | 00624 | Sun Jan 25 00:00:00 1970 PST
- 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST
- 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST
- 628 | 8 | 00628 | Thu Jan 29 00:00:00 1970 PST
- 629 | 509 | 00629_update9 | Fri Jan 30 00:00:00 1970 PST
- 630 | 0 | 00630 | Sat Jan 31 00:00:00 1970 PST
- 631 | 1 | 00631 | Sun Feb 01 00:00:00 1970 PST
- 633 | 303 | 00633_update3 | Tue Feb 03 00:00:00 1970 PST
- 634 | 4 | 00634 | Wed Feb 04 00:00:00 1970 PST
- 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST
- 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST
- 638 | 8 | 00638 | Sun Feb 08 00:00:00 1970 PST
- 639 | 509 | 00639_update9 | Mon Feb 09 00:00:00 1970 PST
- 640 | 0 | 00640 | Tue Feb 10 00:00:00 1970 PST
- 641 | 1 | 00641 | Wed Feb 11 00:00:00 1970 PST
- 643 | 303 | 00643_update3 | Fri Feb 13 00:00:00 1970 PST
- 644 | 4 | 00644 | Sat Feb 14 00:00:00 1970 PST
- 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST
- 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST
- 648 | 8 | 00648 | Wed Feb 18 00:00:00 1970 PST
- 649 | 509 | 00649_update9 | Thu Feb 19 00:00:00 1970 PST
- 650 | 0 | 00650 | Fri Feb 20 00:00:00 1970 PST
- 651 | 1 | 00651 | Sat Feb 21 00:00:00 1970 PST
- 653 | 303 | 00653_update3 | Mon Feb 23 00:00:00 1970 PST
- 654 | 4 | 00654 | Tue Feb 24 00:00:00 1970 PST
- 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST
- 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST
- 658 | 8 | 00658 | Sat Feb 28 00:00:00 1970 PST
- 659 | 509 | 00659_update9 | Sun Mar 01 00:00:00 1970 PST
- 660 | 0 | 00660 | Mon Mar 02 00:00:00 1970 PST
- 661 | 1 | 00661 | Tue Mar 03 00:00:00 1970 PST
- 663 | 303 | 00663_update3 | Thu Mar 05 00:00:00 1970 PST
- 664 | 4 | 00664 | Fri Mar 06 00:00:00 1970 PST
- 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST
- 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST
- 668 | 8 | 00668 | Tue Mar 10 00:00:00 1970 PST
- 669 | 509 | 00669_update9 | Wed Mar 11 00:00:00 1970 PST
- 670 | 0 | 00670 | Thu Mar 12 00:00:00 1970 PST
- 671 | 1 | 00671 | Fri Mar 13 00:00:00 1970 PST
- 673 | 303 | 00673_update3 | Sun Mar 15 00:00:00 1970 PST
- 674 | 4 | 00674 | Mon Mar 16 00:00:00 1970 PST
- 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST
- 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST
- 678 | 8 | 00678 | Fri Mar 20 00:00:00 1970 PST
- 679 | 509 | 00679_update9 | Sat Mar 21 00:00:00 1970 PST
- 680 | 0 | 00680 | Sun Mar 22 00:00:00 1970 PST
- 681 | 1 | 00681 | Mon Mar 23 00:00:00 1970 PST
- 683 | 303 | 00683_update3 | Wed Mar 25 00:00:00 1970 PST
- 684 | 4 | 00684 | Thu Mar 26 00:00:00 1970 PST
- 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST
- 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST
- 688 | 8 | 00688 | Mon Mar 30 00:00:00 1970 PST
- 689 | 509 | 00689_update9 | Tue Mar 31 00:00:00 1970 PST
- 690 | 0 | 00690 | Wed Apr 01 00:00:00 1970 PST
- 691 | 1 | 00691 | Thu Apr 02 00:00:00 1970 PST
- 693 | 303 | 00693_update3 | Sat Apr 04 00:00:00 1970 PST
- 694 | 4 | 00694 | Sun Apr 05 00:00:00 1970 PST
- 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST
- 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST
- 698 | 8 | 00698 | Thu Apr 09 00:00:00 1970 PST
- 699 | 509 | 00699_update9 | Fri Apr 10 00:00:00 1970 PST
- 700 | 0 | 00700 | Thu Jan 01 00:00:00 1970 PST
- 701 | 1 | 00701 | Fri Jan 02 00:00:00 1970 PST
- 703 | 303 | 00703_update3 | Sun Jan 04 00:00:00 1970 PST
- 704 | 4 | 00704 | Mon Jan 05 00:00:00 1970 PST
- 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST
- 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST
- 708 | 8 | 00708 | Fri Jan 09 00:00:00 1970 PST
- 709 | 509 | 00709_update9 | Sat Jan 10 00:00:00 1970 PST
- 710 | 0 | 00710 | Sun Jan 11 00:00:00 1970 PST
- 711 | 1 | 00711 | Mon Jan 12 00:00:00 1970 PST
- 713 | 303 | 00713_update3 | Wed Jan 14 00:00:00 1970 PST
- 714 | 4 | 00714 | Thu Jan 15 00:00:00 1970 PST
- 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST
- 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST
- 718 | 8 | 00718 | Mon Jan 19 00:00:00 1970 PST
- 719 | 509 | 00719_update9 | Tue Jan 20 00:00:00 1970 PST
- 720 | 0 | 00720 | Wed Jan 21 00:00:00 1970 PST
- 721 | 1 | 00721 | Thu Jan 22 00:00:00 1970 PST
- 723 | 303 | 00723_update3 | Sat Jan 24 00:00:00 1970 PST
- 724 | 4 | 00724 | Sun Jan 25 00:00:00 1970 PST
- 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST
- 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST
- 728 | 8 | 00728 | Thu Jan 29 00:00:00 1970 PST
- 729 | 509 | 00729_update9 | Fri Jan 30 00:00:00 1970 PST
- 730 | 0 | 00730 | Sat Jan 31 00:00:00 1970 PST
- 731 | 1 | 00731 | Sun Feb 01 00:00:00 1970 PST
- 733 | 303 | 00733_update3 | Tue Feb 03 00:00:00 1970 PST
- 734 | 4 | 00734 | Wed Feb 04 00:00:00 1970 PST
- 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST
- 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST
- 738 | 8 | 00738 | Sun Feb 08 00:00:00 1970 PST
- 739 | 509 | 00739_update9 | Mon Feb 09 00:00:00 1970 PST
- 740 | 0 | 00740 | Tue Feb 10 00:00:00 1970 PST
- 741 | 1 | 00741 | Wed Feb 11 00:00:00 1970 PST
- 743 | 303 | 00743_update3 | Fri Feb 13 00:00:00 1970 PST
- 744 | 4 | 00744 | Sat Feb 14 00:00:00 1970 PST
- 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST
- 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST
- 748 | 8 | 00748 | Wed Feb 18 00:00:00 1970 PST
- 749 | 509 | 00749_update9 | Thu Feb 19 00:00:00 1970 PST
- 750 | 0 | 00750 | Fri Feb 20 00:00:00 1970 PST
- 751 | 1 | 00751 | Sat Feb 21 00:00:00 1970 PST
- 753 | 303 | 00753_update3 | Mon Feb 23 00:00:00 1970 PST
- 754 | 4 | 00754 | Tue Feb 24 00:00:00 1970 PST
- 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST
- 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST
- 758 | 8 | 00758 | Sat Feb 28 00:00:00 1970 PST
- 759 | 509 | 00759_update9 | Sun Mar 01 00:00:00 1970 PST
- 760 | 0 | 00760 | Mon Mar 02 00:00:00 1970 PST
- 761 | 1 | 00761 | Tue Mar 03 00:00:00 1970 PST
- 763 | 303 | 00763_update3 | Thu Mar 05 00:00:00 1970 PST
- 764 | 4 | 00764 | Fri Mar 06 00:00:00 1970 PST
- 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST
- 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST
- 768 | 8 | 00768 | Tue Mar 10 00:00:00 1970 PST
- 769 | 509 | 00769_update9 | Wed Mar 11 00:00:00 1970 PST
- 770 | 0 | 00770 | Thu Mar 12 00:00:00 1970 PST
- 771 | 1 | 00771 | Fri Mar 13 00:00:00 1970 PST
- 773 | 303 | 00773_update3 | Sun Mar 15 00:00:00 1970 PST
- 774 | 4 | 00774 | Mon Mar 16 00:00:00 1970 PST
- 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST
- 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST
- 778 | 8 | 00778 | Fri Mar 20 00:00:00 1970 PST
- 779 | 509 | 00779_update9 | Sat Mar 21 00:00:00 1970 PST
- 780 | 0 | 00780 | Sun Mar 22 00:00:00 1970 PST
- 781 | 1 | 00781 | Mon Mar 23 00:00:00 1970 PST
- 783 | 303 | 00783_update3 | Wed Mar 25 00:00:00 1970 PST
- 784 | 4 | 00784 | Thu Mar 26 00:00:00 1970 PST
- 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST
- 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST
- 788 | 8 | 00788 | Mon Mar 30 00:00:00 1970 PST
- 789 | 509 | 00789_update9 | Tue Mar 31 00:00:00 1970 PST
- 790 | 0 | 00790 | Wed Apr 01 00:00:00 1970 PST
- 791 | 1 | 00791 | Thu Apr 02 00:00:00 1970 PST
- 793 | 303 | 00793_update3 | Sat Apr 04 00:00:00 1970 PST
- 794 | 4 | 00794 | Sun Apr 05 00:00:00 1970 PST
- 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST
- 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST
- 798 | 8 | 00798 | Thu Apr 09 00:00:00 1970 PST
- 799 | 509 | 00799_update9 | Fri Apr 10 00:00:00 1970 PST
- 800 | 0 | 00800 | Thu Jan 01 00:00:00 1970 PST
- 801 | 1 | 00801 | Fri Jan 02 00:00:00 1970 PST
- 803 | 303 | 00803_update3 | Sun Jan 04 00:00:00 1970 PST
- 804 | 4 | 00804 | Mon Jan 05 00:00:00 1970 PST
- 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST
- 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST
- 808 | 8 | 00808 | Fri Jan 09 00:00:00 1970 PST
- 809 | 509 | 00809_update9 | Sat Jan 10 00:00:00 1970 PST
- 810 | 0 | 00810 | Sun Jan 11 00:00:00 1970 PST
- 811 | 1 | 00811 | Mon Jan 12 00:00:00 1970 PST
- 813 | 303 | 00813_update3 | Wed Jan 14 00:00:00 1970 PST
- 814 | 4 | 00814 | Thu Jan 15 00:00:00 1970 PST
- 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST
- 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST
- 818 | 8 | 00818 | Mon Jan 19 00:00:00 1970 PST
- 819 | 509 | 00819_update9 | Tue Jan 20 00:00:00 1970 PST
- 820 | 0 | 00820 | Wed Jan 21 00:00:00 1970 PST
- 821 | 1 | 00821 | Thu Jan 22 00:00:00 1970 PST
- 823 | 303 | 00823_update3 | Sat Jan 24 00:00:00 1970 PST
- 824 | 4 | 00824 | Sun Jan 25 00:00:00 1970 PST
- 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST
- 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST
- 828 | 8 | 00828 | Thu Jan 29 00:00:00 1970 PST
- 829 | 509 | 00829_update9 | Fri Jan 30 00:00:00 1970 PST
- 830 | 0 | 00830 | Sat Jan 31 00:00:00 1970 PST
- 831 | 1 | 00831 | Sun Feb 01 00:00:00 1970 PST
- 833 | 303 | 00833_update3 | Tue Feb 03 00:00:00 1970 PST
- 834 | 4 | 00834 | Wed Feb 04 00:00:00 1970 PST
- 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST
- 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST
- 838 | 8 | 00838 | Sun Feb 08 00:00:00 1970 PST
- 839 | 509 | 00839_update9 | Mon Feb 09 00:00:00 1970 PST
- 840 | 0 | 00840 | Tue Feb 10 00:00:00 1970 PST
- 841 | 1 | 00841 | Wed Feb 11 00:00:00 1970 PST
- 843 | 303 | 00843_update3 | Fri Feb 13 00:00:00 1970 PST
- 844 | 4 | 00844 | Sat Feb 14 00:00:00 1970 PST
- 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST
- 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST
- 848 | 8 | 00848 | Wed Feb 18 00:00:00 1970 PST
- 849 | 509 | 00849_update9 | Thu Feb 19 00:00:00 1970 PST
- 850 | 0 | 00850 | Fri Feb 20 00:00:00 1970 PST
- 851 | 1 | 00851 | Sat Feb 21 00:00:00 1970 PST
- 853 | 303 | 00853_update3 | Mon Feb 23 00:00:00 1970 PST
- 854 | 4 | 00854 | Tue Feb 24 00:00:00 1970 PST
- 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST
- 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST
- 858 | 8 | 00858 | Sat Feb 28 00:00:00 1970 PST
- 859 | 509 | 00859_update9 | Sun Mar 01 00:00:00 1970 PST
- 860 | 0 | 00860 | Mon Mar 02 00:00:00 1970 PST
- 861 | 1 | 00861 | Tue Mar 03 00:00:00 1970 PST
- 863 | 303 | 00863_update3 | Thu Mar 05 00:00:00 1970 PST
- 864 | 4 | 00864 | Fri Mar 06 00:00:00 1970 PST
- 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST
- 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST
- 868 | 8 | 00868 | Tue Mar 10 00:00:00 1970 PST
- 869 | 509 | 00869_update9 | Wed Mar 11 00:00:00 1970 PST
- 870 | 0 | 00870 | Thu Mar 12 00:00:00 1970 PST
- 871 | 1 | 00871 | Fri Mar 13 00:00:00 1970 PST
- 873 | 303 | 00873_update3 | Sun Mar 15 00:00:00 1970 PST
- 874 | 4 | 00874 | Mon Mar 16 00:00:00 1970 PST
- 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST
- 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST
- 878 | 8 | 00878 | Fri Mar 20 00:00:00 1970 PST
- 879 | 509 | 00879_update9 | Sat Mar 21 00:00:00 1970 PST
- 880 | 0 | 00880 | Sun Mar 22 00:00:00 1970 PST
- 881 | 1 | 00881 | Mon Mar 23 00:00:00 1970 PST
- 883 | 303 | 00883_update3 | Wed Mar 25 00:00:00 1970 PST
- 884 | 4 | 00884 | Thu Mar 26 00:00:00 1970 PST
- 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST
- 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST
- 888 | 8 | 00888 | Mon Mar 30 00:00:00 1970 PST
- 889 | 509 | 00889_update9 | Tue Mar 31 00:00:00 1970 PST
- 890 | 0 | 00890 | Wed Apr 01 00:00:00 1970 PST
- 891 | 1 | 00891 | Thu Apr 02 00:00:00 1970 PST
- 893 | 303 | 00893_update3 | Sat Apr 04 00:00:00 1970 PST
- 894 | 4 | 00894 | Sun Apr 05 00:00:00 1970 PST
- 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST
- 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST
- 898 | 8 | 00898 | Thu Apr 09 00:00:00 1970 PST
- 899 | 509 | 00899_update9 | Fri Apr 10 00:00:00 1970 PST
- 900 | 0 | 00900 | Thu Jan 01 00:00:00 1970 PST
- 901 | 1 | 00901 | Fri Jan 02 00:00:00 1970 PST
- 903 | 303 | 00903_update3 | Sun Jan 04 00:00:00 1970 PST
- 904 | 4 | 00904 | Mon Jan 05 00:00:00 1970 PST
- 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST
- 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST
- 908 | 8 | 00908 | Fri Jan 09 00:00:00 1970 PST
- 909 | 509 | 00909_update9 | Sat Jan 10 00:00:00 1970 PST
- 910 | 0 | 00910 | Sun Jan 11 00:00:00 1970 PST
- 911 | 1 | 00911 | Mon Jan 12 00:00:00 1970 PST
- 913 | 303 | 00913_update3 | Wed Jan 14 00:00:00 1970 PST
- 914 | 4 | 00914 | Thu Jan 15 00:00:00 1970 PST
- 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST
- 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST
- 918 | 8 | 00918 | Mon Jan 19 00:00:00 1970 PST
- 919 | 509 | 00919_update9 | Tue Jan 20 00:00:00 1970 PST
- 920 | 0 | 00920 | Wed Jan 21 00:00:00 1970 PST
- 921 | 1 | 00921 | Thu Jan 22 00:00:00 1970 PST
- 923 | 303 | 00923_update3 | Sat Jan 24 00:00:00 1970 PST
- 924 | 4 | 00924 | Sun Jan 25 00:00:00 1970 PST
- 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST
- 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST
- 928 | 8 | 00928 | Thu Jan 29 00:00:00 1970 PST
- 929 | 509 | 00929_update9 | Fri Jan 30 00:00:00 1970 PST
- 930 | 0 | 00930 | Sat Jan 31 00:00:00 1970 PST
- 931 | 1 | 00931 | Sun Feb 01 00:00:00 1970 PST
- 933 | 303 | 00933_update3 | Tue Feb 03 00:00:00 1970 PST
- 934 | 4 | 00934 | Wed Feb 04 00:00:00 1970 PST
- 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST
- 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST
- 938 | 8 | 00938 | Sun Feb 08 00:00:00 1970 PST
- 939 | 509 | 00939_update9 | Mon Feb 09 00:00:00 1970 PST
- 940 | 0 | 00940 | Tue Feb 10 00:00:00 1970 PST
- 941 | 1 | 00941 | Wed Feb 11 00:00:00 1970 PST
- 943 | 303 | 00943_update3 | Fri Feb 13 00:00:00 1970 PST
- 944 | 4 | 00944 | Sat Feb 14 00:00:00 1970 PST
- 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST
- 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST
- 948 | 8 | 00948 | Wed Feb 18 00:00:00 1970 PST
- 949 | 509 | 00949_update9 | Thu Feb 19 00:00:00 1970 PST
- 950 | 0 | 00950 | Fri Feb 20 00:00:00 1970 PST
- 951 | 1 | 00951 | Sat Feb 21 00:00:00 1970 PST
- 953 | 303 | 00953_update3 | Mon Feb 23 00:00:00 1970 PST
- 954 | 4 | 00954 | Tue Feb 24 00:00:00 1970 PST
- 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST
- 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST
- 958 | 8 | 00958 | Sat Feb 28 00:00:00 1970 PST
- 959 | 509 | 00959_update9 | Sun Mar 01 00:00:00 1970 PST
- 960 | 0 | 00960 | Mon Mar 02 00:00:00 1970 PST
- 961 | 1 | 00961 | Tue Mar 03 00:00:00 1970 PST
- 963 | 303 | 00963_update3 | Thu Mar 05 00:00:00 1970 PST
- 964 | 4 | 00964 | Fri Mar 06 00:00:00 1970 PST
- 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST
- 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST
- 968 | 8 | 00968 | Tue Mar 10 00:00:00 1970 PST
- 969 | 509 | 00969_update9 | Wed Mar 11 00:00:00 1970 PST
- 970 | 0 | 00970 | Thu Mar 12 00:00:00 1970 PST
- 971 | 1 | 00971 | Fri Mar 13 00:00:00 1970 PST
- 973 | 303 | 00973_update3 | Sun Mar 15 00:00:00 1970 PST
- 974 | 4 | 00974 | Mon Mar 16 00:00:00 1970 PST
- 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST
- 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST
- 978 | 8 | 00978 | Fri Mar 20 00:00:00 1970 PST
- 979 | 509 | 00979_update9 | Sat Mar 21 00:00:00 1970 PST
- 980 | 0 | 00980 | Sun Mar 22 00:00:00 1970 PST
- 981 | 1 | 00981 | Mon Mar 23 00:00:00 1970 PST
- 983 | 303 | 00983_update3 | Wed Mar 25 00:00:00 1970 PST
- 984 | 4 | 00984 | Thu Mar 26 00:00:00 1970 PST
- 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST
- 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST
- 988 | 8 | 00988 | Mon Mar 30 00:00:00 1970 PST
- 989 | 509 | 00989_update9 | Tue Mar 31 00:00:00 1970 PST
- 990 | 0 | 00990 | Wed Apr 01 00:00:00 1970 PST
- 991 | 1 | 00991 | Thu Apr 02 00:00:00 1970 PST
- 993 | 303 | 00993_update3 | Sat Apr 04 00:00:00 1970 PST
- 994 | 4 | 00994 | Sun Apr 05 00:00:00 1970 PST
- 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST
- 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST
- 998 | 8 | 00998 | Thu Apr 09 00:00:00 1970 PST
- 999 | 509 | 00999_update9 | Fri Apr 10 00:00:00 1970 PST
- 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST
- 1001 | 101 | 0000100001 |
- 1003 | 403 | 0000300003_update3 |
- 1004 | 104 | 0000400004 |
- 1006 | 106 | 0000600006 |
- 1007 | 507 | 0000700007_update7 |
- 1008 | 108 | 0000800008 |
- 1009 | 609 | 0000900009_update9 |
- 1010 | 100 | 0001000010 |
- 1011 | 101 | 0001100011 |
- 1013 | 403 | 0001300013_update3 |
- 1014 | 104 | 0001400014 |
- 1016 | 106 | 0001600016 |
- 1017 | 507 | 0001700017_update7 |
- 1018 | 108 | 0001800018 |
- 1019 | 609 | 0001900019_update9 |
- 1020 | 100 | 0002000020 |
- 1101 | 201 | aaa |
- 1103 | 503 | ccc_update3 |
- 1104 | 204 | ddd |
-(819 rows)
-
-EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 2: ... ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::...
- ^
-CONTEXT: referenced column: tableoid
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 1: ... ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::...
- ^
-CONTEXT: referenced column: tableoid
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 2: ...DATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::...
- ^
-CONTEXT: referenced column: tableoid
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 1: ...DATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::...
- ^
-CONTEXT: referenced column: tableoid
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 2: DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass...
- ^
-CONTEXT: referenced column: tableoid
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
-ERROR: column "tableoid" does not exist
-LINE 1: DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass...
- ^
-CONTEXT: referenced column: tableoid
--- Test UPDATE/DELETE with RETURNING on a three-table join
-INSERT INTO ft2 (c1,c2,c3)
- SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id;
-EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'foo'
- FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
- RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.ft2
- Output: ft2.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.*, ft4.c1, ft4.c2, ft4.c3
- -> Hash Join
- Output: ft2.c1, ft2.c2, NULL::integer, 'foo'::text, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.ctid, ft4.*, ft5.*, ft4.c1, ft4.c2, ft4.c3
- Hash Cond: (ft2.c2 = ft4.c1)
- -> Foreign Scan on public.ft2
- Output: ft2.c1, ft2.c2, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.ctid
- Remote SQL: SELECT "C 1", c2, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" > 1200)) FOR UPDATE
- -> Hash
- Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1
- -> Hash Join
- Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1
- Hash Cond: (ft4.c1 = ft5.c1)
- -> Foreign Scan on public.ft4
- Output: ft4.*, ft4.c1, ft4.c2, ft4.c3
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3"
- -> Hash
- Output: ft5.*, ft5.c1
- -> Foreign Scan on public.ft5
- Output: ft5.*, ft5.c1
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4"
-(21 rows)
-
-UPDATE ft2 SET c3 = 'foo'
- FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
- RETURNING ft2, ft2.*, ft4, ft4.*;
- ft2 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | ft4 | c1 | c2 | c3
---------------------------------+------+----+-----+----+----+----+------------+----+----------------+----+----+--------
- (1206,6,foo,,,,"ft2 ",) | 1206 | 6 | foo | | | | ft2 | | (6,7,AAA006) | 6 | 7 | AAA006
- (1212,12,foo,,,,"ft2 ",) | 1212 | 12 | foo | | | | ft2 | | (12,13,AAA012) | 12 | 13 | AAA012
- (1218,18,foo,,,,"ft2 ",) | 1218 | 18 | foo | | | | ft2 | | (18,19,AAA018) | 18 | 19 | AAA018
- (1224,24,foo,,,,"ft2 ",) | 1224 | 24 | foo | | | | ft2 | | (24,25,AAA024) | 24 | 25 | AAA024
- (1230,30,foo,,,,"ft2 ",) | 1230 | 30 | foo | | | | ft2 | | (30,31,AAA030) | 30 | 31 | AAA030
- (1236,36,foo,,,,"ft2 ",) | 1236 | 36 | foo | | | | ft2 | | (36,37,AAA036) | 36 | 37 | AAA036
- (1242,42,foo,,,,"ft2 ",) | 1242 | 42 | foo | | | | ft2 | | (42,43,AAA042) | 42 | 43 | AAA042
- (1248,48,foo,,,,"ft2 ",) | 1248 | 48 | foo | | | | ft2 | | (48,49,AAA048) | 48 | 49 | AAA048
- (1254,54,foo,,,,"ft2 ",) | 1254 | 54 | foo | | | | ft2 | | (54,55,AAA054) | 54 | 55 | AAA054
- (1260,60,foo,,,,"ft2 ",) | 1260 | 60 | foo | | | | ft2 | | (60,61,AAA060) | 60 | 61 | AAA060
- (1266,66,foo,,,,"ft2 ",) | 1266 | 66 | foo | | | | ft2 | | (66,67,AAA066) | 66 | 67 | AAA066
- (1272,72,foo,,,,"ft2 ",) | 1272 | 72 | foo | | | | ft2 | | (72,73,AAA072) | 72 | 73 | AAA072
- (1278,78,foo,,,,"ft2 ",) | 1278 | 78 | foo | | | | ft2 | | (78,79,AAA078) | 78 | 79 | AAA078
- (1284,84,foo,,,,"ft2 ",) | 1284 | 84 | foo | | | | ft2 | | (84,85,AAA084) | 84 | 85 | AAA084
- (1290,90,foo,,,,"ft2 ",) | 1290 | 90 | foo | | | | ft2 | | (90,91,AAA090) | 90 | 91 | AAA090
- (1296,96,foo,,,,"ft2 ",) | 1296 | 96 | foo | | | | ft2 | | (96,97,AAA096) | 96 | 97 | AAA096
-(16 rows)
-
-EXPLAIN (verbose, costs off)
-DELETE FROM ft2
- USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
- RETURNING 100; -- can be pushed down
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------
- Delete on public.ft2
- Output: 100
- -> Nested Loop Left Join
- Output: ft2.ctid, ft4.*, ft5.*
- Join Filter: (ft4.c1 = ft5.c1)
- -> Nested Loop
- Output: ft2.ctid, ft4.*, ft4.c1
- Join Filter: (ft2.c2 = ft4.c1)
- -> Foreign Scan on public.ft2
- Output: ft2.ctid, ft2.c2
- Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" WHERE (("C 1" > 1200)) AND ((("C 1" % 10) = 0)) FOR UPDATE
- -> Foreign Scan on public.ft4
- Output: ft4.*, ft4.c1
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3"
- -> Foreign Scan on public.ft5
- Output: ft5.*, ft5.c1
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4"
-(17 rows)
-
-DELETE FROM ft2
- USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
- WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
- RETURNING 100;
- ?column?
-----------
- 100
- 100
- 100
- 100
- 100
- 100
- 100
- 100
- 100
- 100
-(10 rows)
-
-DELETE FROM ft2 WHERE ft2.c1 > 1200;
--- Test that trigger on remote table works as expected
-CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
-BEGIN
- NEW.c3 = NEW.c3 || '_trig_update';
- RETURN NEW;
-END;
-$$ LANGUAGE plpgsql;
-CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
- ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
-INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+-----+-----------------+----+----+----+------------+----
- 1208 | 818 | fff_trig_update | | | | ft2 |
-(1 row)
-
-INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+-----+-----------------+----+----+------+------------+----
- 1218 | 818 | ggg_trig_update | | | (--; | ft2 |
-(1 row)
-
-UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+-----+------------------------+------------------------------+--------------------------+----+------------+-----
- 8 | 608 | 00008_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 18 | 608 | 00018_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 28 | 608 | 00028_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 38 | 608 | 00038_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 48 | 608 | 00048_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 58 | 608 | 00058_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 68 | 608 | 00068_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 78 | 608 | 00078_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 88 | 608 | 00088_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 98 | 608 | 00098_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 108 | 608 | 00108_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 118 | 608 | 00118_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 128 | 608 | 00128_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 138 | 608 | 00138_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 148 | 608 | 00148_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 158 | 608 | 00158_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 168 | 608 | 00168_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 178 | 608 | 00178_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 188 | 608 | 00188_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 198 | 608 | 00198_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 208 | 608 | 00208_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 218 | 608 | 00218_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 228 | 608 | 00228_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 238 | 608 | 00238_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 248 | 608 | 00248_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 258 | 608 | 00258_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 268 | 608 | 00268_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 278 | 608 | 00278_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 288 | 608 | 00288_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 298 | 608 | 00298_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 308 | 608 | 00308_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 318 | 608 | 00318_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 328 | 608 | 00328_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 338 | 608 | 00338_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 348 | 608 | 00348_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 358 | 608 | 00358_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 368 | 608 | 00368_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 378 | 608 | 00378_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 388 | 608 | 00388_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 398 | 608 | 00398_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 408 | 608 | 00408_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 418 | 608 | 00418_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 428 | 608 | 00428_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 438 | 608 | 00438_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 448 | 608 | 00448_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 458 | 608 | 00458_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 468 | 608 | 00468_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 478 | 608 | 00478_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 488 | 608 | 00488_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 498 | 608 | 00498_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 508 | 608 | 00508_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 518 | 608 | 00518_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 528 | 608 | 00528_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 538 | 608 | 00538_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 548 | 608 | 00548_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 558 | 608 | 00558_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 568 | 608 | 00568_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 578 | 608 | 00578_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 588 | 608 | 00588_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 598 | 608 | 00598_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 608 | 608 | 00608_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 618 | 608 | 00618_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 628 | 608 | 00628_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 638 | 608 | 00638_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 648 | 608 | 00648_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 658 | 608 | 00658_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 668 | 608 | 00668_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 678 | 608 | 00678_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 688 | 608 | 00688_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 698 | 608 | 00698_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 708 | 608 | 00708_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 718 | 608 | 00718_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 728 | 608 | 00728_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 738 | 608 | 00738_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 748 | 608 | 00748_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 758 | 608 | 00758_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 768 | 608 | 00768_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 778 | 608 | 00778_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 788 | 608 | 00788_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 798 | 608 | 00798_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 808 | 608 | 00808_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 818 | 608 | 00818_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 828 | 608 | 00828_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 838 | 608 | 00838_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 848 | 608 | 00848_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 858 | 608 | 00858_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 868 | 608 | 00868_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 878 | 608 | 00878_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 888 | 608 | 00888_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 898 | 608 | 00898_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 908 | 608 | 00908_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 918 | 608 | 00918_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
- 928 | 608 | 00928_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
- 938 | 608 | 00938_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
- 948 | 608 | 00948_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
- 958 | 608 | 00958_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
- 968 | 608 | 00968_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
- 978 | 608 | 00978_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
- 988 | 608 | 00988_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
- 998 | 608 | 00998_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
- 1008 | 708 | 0000800008_trig_update | | | | ft2 |
- 1018 | 708 | 0001800018_trig_update | | | | ft2 |
-(102 rows)
-
--- Test errors thrown on remote side during update
-ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
-INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
-ERROR: duplicate key value violates unique constraint "t1_pkey"
-DETAIL: Key ("C 1")=(11) already exists.
-CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
-INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
-ERROR: new row for relation "T 1" violates check constraint "c2positive"
-DETAIL: Failing row contains (1111, -2, _trig_update, null, null, null, ft1 , null).
-CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
-UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
-ERROR: new row for relation "T 1" violates check constraint "c2positive"
-DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo).
-CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1
--- Test savepoint/rollback behavior
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 0 | 100
- 1 | 100
- 4 | 100
- 6 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 0 | 100
- 1 | 100
- 4 | 100
- 6 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-begin;
-update ft2 set c2 = 42 where c2 = 0;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 4 | 100
- 6 | 100
- 42 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-savepoint s1;
-update ft2 set c2 = 44 where c2 = 4;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-release savepoint s1;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-savepoint s2;
-update ft2 set c2 = 46 where c2 = 6;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 42 | 100
- 44 | 100
- 46 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-rollback to savepoint s2;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-release savepoint s2;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-savepoint s3;
-update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
-ERROR: new row for relation "T 1" violates check constraint "c2positive"
-DETAIL: Failing row contains (10, -2, 00010_trig_update_trig_update, 1970-01-11 08:00:00+00, 1970-01-11 00:00:00, 0, 0 , foo).
-CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1
-rollback to savepoint s3;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-release savepoint s3;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
--- none of the above is committed yet remotely
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 0 | 100
- 1 | 100
- 4 | 100
- 6 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-commit;
-select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
-select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
- c2 | count
------+-------
- 1 | 100
- 6 | 100
- 42 | 100
- 44 | 100
- 100 | 2
- 101 | 2
- 104 | 2
- 106 | 2
- 201 | 1
- 204 | 1
- 303 | 100
- 403 | 2
- 407 | 100
-(13 rows)
-
--- ===================================================================
--- test copy
--- ===================================================================
-select count(*) from ft2;
- count
--------
- 821
-(1 row)
-
-select * from ft2 order by c1 limit 10;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+-----+-------------------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 3 | 303 | 00003_update3 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 44 | 00004_trig_update | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 8 | 608 | 00008_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 9 | 509 | 00009_update9 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | ft2 | foo
- 10 | 42 | 00010_trig_update | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
- 11 | 1 | 00011 | Mon Jan 12 00:00:00 1970 PST | Mon Jan 12 00:00:00 1970 | 1 | 1 | foo
- 13 | 303 | 00013_update3 | Wed Jan 14 00:00:00 1970 PST | Wed Jan 14 00:00:00 1970 | 3 | 3 | foo
-(10 rows)
-
-\! rm -f ./foreigntable_ft2.data
-\COPY (select * from ft2) to './foreigntable_ft2.data';
-delete from ft2;
-select * from ft2 order by c1 limit 10;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+----+----+----+----+----+----+----
-(0 rows)
-
-\COPY ft2 from './foreigntable_ft2.data';
-select * from ft2 order by c1 limit 10;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-----+-----+-------------------------------+------------------------------+--------------------------+----+------------+-----
- 1 | 1 | 00001_trig_update | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
- 3 | 303 | 00003_update3_trig_update | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
- 4 | 44 | 00004_trig_update_trig_update | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
- 6 | 6 | 00006_trig_update | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
- 7 | 407 | 00007_update7_trig_update | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
- 8 | 608 | 00008_trig_update_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
- 9 | 509 | 00009_update9_trig_update | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | ft2 | foo
- 10 | 42 | 00010_trig_update_trig_update | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
- 11 | 1 | 00011_trig_update | Mon Jan 12 00:00:00 1970 PST | Mon Jan 12 00:00:00 1970 | 1 | 1 | foo
- 13 | 303 | 00013_update3_trig_update | Wed Jan 14 00:00:00 1970 PST | Wed Jan 14 00:00:00 1970 | 3 | 3 | foo
-(10 rows)
-
-select count(*) from ft2;
- count
--------
- 821
-(1 row)
-
-\! rm -f ./foreigntable_ft2.data
--- ===================================================================
--- test serial columns (ie, sequence-based defaults)
--- ===================================================================
-create table loc1 (f1 serial, f2 text);
-NOTICE: CREATE TABLE will create implicit sequence "loc1_f1_seq" for serial column "loc1.f1"
-create foreign table rem1 (f1 serial, f2 text)
- server loopback options(table_name 'loc1');
-NOTICE: CREATE FOREIGN TABLE will create implicit sequence "rem1_f1_seq" for serial column "rem1.f1"
-select pg_catalog.setval('rem1_f1_seq', 10, false);
- setval
---------
- 10
-(1 row)
-
-insert into loc1(f2) values('hi');
-insert into rem1(f2) values('hi remote');
-insert into loc1(f2) values('bye');
-insert into rem1(f2) values('bye remote');
-select * from loc1;
- f1 | f2
-----+------------
- 1 | hi
- 10 | hi remote
- 2 | bye
- 11 | bye remote
-(4 rows)
-
-select * from rem1;
- f1 | f2
-----+------------
- 1 | hi
- 10 | hi remote
- 2 | bye
- 11 | bye remote
-(4 rows)
-
--- ===================================================================
--- test local triggers
--- ===================================================================
--- Trigger functions "borrowed" from triggers regress test.
-CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
-BEGIN
- RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
- TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
- RETURN NULL;
-END;$$;
-CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
- FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
- FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
-ERROR: "rem1" is not a table or view
-CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
-LANGUAGE plpgsql AS $$
-
-declare
- oldnew text[];
- relid text;
- argstr text;
-begin
-
- relid := TG_relid::regclass;
- argstr := '';
- for i in 0 .. TG_nargs - 1 loop
- if i > 0 then
- argstr := argstr || ', ';
- end if;
- argstr := argstr || TG_argv[i];
- end loop;
-
- RAISE NOTICE '%(%) % % % ON %',
- tg_name, argstr, TG_when, TG_level, TG_OP, relid;
- oldnew := '{}'::text[];
- if TG_OP != 'INSERT' then
- oldnew := array_append(oldnew, format('OLD: %s', OLD));
- end if;
-
- if TG_OP != 'DELETE' then
- oldnew := array_append(oldnew, format('NEW: %s', NEW));
- end if;
-
- RAISE NOTICE '%', array_to_string(oldnew, ',');
-
- if TG_OP = 'DELETE' then
- return OLD;
- else
- return NEW;
- end if;
-end;
-$$;
--- Test basic functionality
-CREATE TRIGGER trig_row_before
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_row_after
-AFTER INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-delete from rem1;
-insert into rem1 values(1,'insert');
-update rem1 set f2 = 'update' where f1 = 1;
-update rem1 set f2 = f2 || f2;
--- cleanup
-DROP TRIGGER trig_row_before ON rem1;
-ERROR: trigger "trig_row_before" for table "rem1" does not exist
-DROP TRIGGER trig_row_after ON rem1;
-ERROR: trigger "trig_row_after" for table "rem1" does not exist
-DROP TRIGGER trig_stmt_before ON rem1;
-ERROR: trigger "trig_stmt_before" for table "rem1" does not exist
-DROP TRIGGER trig_stmt_after ON rem1;
-ERROR: trigger "trig_stmt_after" for table "rem1" does not exist
-DELETE from rem1;
--- Test WHEN conditions
-CREATE TRIGGER trig_row_before_insupd
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW
-WHEN (NEW.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_row_after_insupd
-AFTER INSERT OR UPDATE ON rem1
-FOR EACH ROW
-WHEN (NEW.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
--- Insert or update not matching: nothing happens
-INSERT INTO rem1 values(1, 'insert');
-UPDATE rem1 set f2 = 'test';
--- Insert or update matching: triggers are fired
-INSERT INTO rem1 values(2, 'update');
-UPDATE rem1 set f2 = 'update update' where f1 = '2';
-CREATE TRIGGER trig_row_before_delete
-BEFORE DELETE ON rem1
-FOR EACH ROW
-WHEN (OLD.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_row_after_delete
-AFTER DELETE ON rem1
-FOR EACH ROW
-WHEN (OLD.f2 like '%update%')
-EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
--- Trigger is fired for f1=2, not for f1=1
-DELETE FROM rem1;
--- cleanup
-DROP TRIGGER trig_row_before_insupd ON rem1;
-ERROR: trigger "trig_row_before_insupd" for table "rem1" does not exist
-DROP TRIGGER trig_row_after_insupd ON rem1;
-ERROR: trigger "trig_row_after_insupd" for table "rem1" does not exist
-DROP TRIGGER trig_row_before_delete ON rem1;
-ERROR: trigger "trig_row_before_delete" for table "rem1" does not exist
-DROP TRIGGER trig_row_after_delete ON rem1;
-ERROR: trigger "trig_row_after_delete" for table "rem1" does not exist
--- Test various RETURN statements in BEFORE triggers.
-CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
- BEGIN
- NEW.f2 := NEW.f2 || ' triggered !';
- RETURN NEW;
- END
-$$ language plpgsql;
-CREATE TRIGGER trig_row_before_insupd
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-ERROR: "rem1" is not a table or view
--- The new values should have 'triggered' appended
-INSERT INTO rem1 values(1, 'insert');
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | insert
-(1 row)
-
-INSERT INTO rem1 values(2, 'insert') RETURNING f2;
- f2
---------
- insert
-(1 row)
-
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | insert
- 2 | insert
-(2 rows)
-
-UPDATE rem1 set f2 = '';
-SELECT * from loc1;
- f1 | f2
-----+----
- 1 |
- 2 |
-(2 rows)
-
-UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
- f2
---------
- skidoo
- skidoo
-(2 rows)
-
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | skidoo
- 2 | skidoo
-(2 rows)
-
-EXPLAIN (verbose, costs off)
-UPDATE rem1 set f1 = 10; -- all columns should be transmitted
- QUERY PLAN
------------------------------------------------------------------
- Update on public.rem1
- -> Foreign Scan on public.rem1
- Output: 10, f2, ctid
- Remote SQL: SELECT f2, ctid FROM public.loc1 FOR UPDATE
-(4 rows)
-
-UPDATE rem1 set f1 = 10;
-SELECT * from loc1;
- f1 | f2
-----+--------
- 10 | skidoo
- 10 | skidoo
-(2 rows)
-
-DELETE FROM rem1;
--- Add a second trigger, to check that the changes are propagated correctly
--- from trigger to trigger
-CREATE TRIGGER trig_row_before_insupd2
-BEFORE INSERT OR UPDATE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-ERROR: "rem1" is not a table or view
-INSERT INTO rem1 values(1, 'insert');
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | insert
-(1 row)
-
-INSERT INTO rem1 values(2, 'insert') RETURNING f2;
- f2
---------
- insert
-(1 row)
-
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | insert
- 2 | insert
-(2 rows)
-
-UPDATE rem1 set f2 = '';
-SELECT * from loc1;
- f1 | f2
-----+----
- 1 |
- 2 |
-(2 rows)
-
-UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
- f2
---------
- skidoo
- skidoo
-(2 rows)
-
-SELECT * from loc1;
- f1 | f2
-----+--------
- 1 | skidoo
- 2 | skidoo
-(2 rows)
-
-DROP TRIGGER trig_row_before_insupd ON rem1;
-ERROR: trigger "trig_row_before_insupd" for table "rem1" does not exist
-DROP TRIGGER trig_row_before_insupd2 ON rem1;
-ERROR: trigger "trig_row_before_insupd2" for table "rem1" does not exist
-DELETE from rem1;
-INSERT INTO rem1 VALUES (1, 'test');
--- Test with a trigger returning NULL
-CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
- BEGIN
- RETURN NULL;
- END
-$$ language plpgsql;
-CREATE TRIGGER trig_null
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trig_null();
-ERROR: "rem1" is not a table or view
--- Nothing should have changed.
-INSERT INTO rem1 VALUES (2, 'test2');
-SELECT * from loc1;
- f1 | f2
-----+-------
- 1 | test
- 2 | test2
-(2 rows)
-
-UPDATE rem1 SET f2 = 'test2';
-SELECT * from loc1;
- f1 | f2
-----+-------
- 1 | test2
- 2 | test2
-(2 rows)
-
-DELETE from rem1;
-SELECT * from loc1;
- f1 | f2
-----+----
-(0 rows)
-
-DROP TRIGGER trig_null ON rem1;
-ERROR: trigger "trig_null" for table "rem1" does not exist
-DELETE from rem1;
--- Test a combination of local and remote triggers
-CREATE TRIGGER trig_row_before
-BEFORE INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_row_after
-AFTER INSERT OR UPDATE OR DELETE ON rem1
-FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
-ERROR: "rem1" is not a table or view
-CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
-FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
-INSERT INTO rem1(f2) VALUES ('test');
-UPDATE rem1 SET f2 = 'testo';
--- Test returning a system attribute
-INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
-ERROR: column "ctid" does not exist
-LINE 1: INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
- ^
-CONTEXT: referenced column: ctid
\ No newline at end of file
diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source
index 4b95d5281..e6c64000d 100644
--- a/src/test/regress/output/recovery_2pc_tools.source
+++ b/src/test/regress/output/recovery_2pc_tools.source
@@ -49,7 +49,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c
audit_rotation_size | integer | kB | 1024 | 1048576
audit_set_parameter | integer | | 0 | 1
audit_space_limit | integer | kB | 1024 | 1073741824
- audit_system_object | integer | | 0 | 134217727
+ audit_system_function_exec | integer | | 0 | 1
+ audit_system_object | integer | | 0 | 268435455
audit_thread_num | integer | | 1 | 48
audit_user_locked | integer | | 0 | 1
audit_user_violation | integer | | 0 | 1
@@ -81,6 +82,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c
bbox_dump_count | integer | | 1 | 20
bbox_dump_path | string | | |
behavior_compat_options | string | | |
+ b_format_behavior_compat_options | string | | |
bgwriter_delay | integer | ms | 10 | 10000
bgwriter_flush_after | integer | 8kB | 0 | 256
bgwriter_lru_maxpages | integer | | 0 | 1000
@@ -219,6 +221,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c
enable_asp | bool | | |
enable_auto_clean_unique_sql | bool | | |
enable_auto_explain | bool | | |
+ enable_availablezone | bool | | |
enable_bbox_dump | bool | | |
enable_beta_features | bool | | |
enable_beta_opfusion | bool | | |
@@ -339,6 +342,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c
force_promote | integer | | 0 | 1
from_collapse_limit | integer | | 1 | 2147483647
fsync | bool | | |
+ full_audit_users | string | | |
full_page_writes | bool | | |
geqo | bool | | |
geqo_effort | integer | | 1 | 10
@@ -481,6 +485,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c
ngram_grapsymbol_ignore | bool | | |
ngram_punctuation_ignore | bool | | |
nls_timestamp_format | string | | |
+ no_audit_client | string | | |
numa_distribute_mode | string | | |
num_internal_lock_partitions | string | | |
nvm_buffers | integer | 8kB | 0 | 1073741823
diff --git a/src/test/regress/output/select_into_file.source b/src/test/regress/output/select_into_file.source
new file mode 100644
index 000000000..0a1d8fb9e
--- /dev/null
+++ b/src/test/regress/output/select_into_file.source
@@ -0,0 +1,53 @@
+create database test_select_into_file dbcompatibility 'b';
+\c test_select_into_file;
+CREATE TYPE my_enum AS enum('ENUM1','ENUM2');
+create table t(id int, a char(2), b text, c my_enum, d blob, e raw, f bytea);
+insert into t values(1, 'c1', 'text1', 'ENUM1', '01', HEXTORAW('DEADBEEF'), E'\\xDEADBEEF');
+insert into t values(2, 'c2', 'text2', 'ENUM2', '11', HEXTORAW('DEADBEEE'), E'\\xDEADBEEE');
+select * from t;
+ id | a | b | c | d | e | f
+----+----+-------+-------+----+----------+------------
+ 1 | c1 | text1 | ENUM1 | 01 | DEADBEEF | \xdeadbeef
+ 2 | c2 | text2 | ENUM2 | 11 | DEADBEEE | \xdeadbeee
+(2 rows)
+
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data';
+\! cat @abs_srcdir@/data/select_into_file.data
+1 c1 text1 ENUM1 01 DEADBEEF \\xdeadbeef
+2 c2 text2 ENUM2 11 DEADBEEE \\xdeadbeee
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS TERMINATED BY '~~';
+\! cat @abs_srcdir@/data/select_into_file.data
+1~~c1~~text1~~ENUM1~~01~~DEADBEEF~~\\xdeadbeef
+2~~c2~~text2~~ENUM2~~11~~DEADBEEE~~\\xdeadbeee
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS ENCLOSED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+^1^ ^c1^ ^text1^ ^ENUM1^ ^01^ ^DEADBEEF^ ^\xdeadbeef^
+^2^ ^c2^ ^text2^ ^ENUM2^ ^11^ ^DEADBEEE^ ^\xdeadbeee^
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS OPTIONALLY ENCLOSED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+1 ^c1^ ^text1^ ^ENUM1^ ^01^ ^DEADBEEF^ \xdeadbeef
+2 ^c2^ ^text2^ ^ENUM2^ ^11^ ^DEADBEEE^ \xdeadbeee
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS ENCLOSED BY 't' ESCAPED BY '^';
+\! cat @abs_srcdir@/data/select_into_file.data
+t1t tc1t t^tex^t1t tENUM1t t01t tDEADBEEFt t\xdeadbeeft
+t2t tc2t t^tex^t2t tENUM2t t11t tDEADBEEEt t\xdeadbeeet
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' LINES STARTING BY '$';
+\! cat @abs_srcdir@/data/select_into_file.data
+$1 c1 text1 ENUM1 01 DEADBEEF \\xdeadbeef
+$2 c2 text2 ENUM2 11 DEADBEEE \\xdeadbeee
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' LINES TERMINATED BY '&\n';
+\! cat @abs_srcdir@/data/select_into_file.data
+1 c1 text1 ENUM1 01 DEADBEEF \\xdeadbeef&
+2 c2 text2 ENUM2 11 DEADBEEE \\xdeadbeee&
+select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS TERMINATED BY '~' ENCLOSED BY 't' ESCAPED BY '^' LINES STARTING BY '$' TERMINATED BY '&\n';
+\! cat @abs_srcdir@/data/select_into_file.data
+$t1t~tc1t~t^tex^t1t~tENUM1t~t01t~tDEADBEEFt~t\xdeadbeeft&
+$t2t~tc2t~t^tex^t2t~tENUM2t~t11t~tDEADBEEEt~t\xdeadbeeet&
+--error dumpfile more than one row
+select * from t into dumpfile '@abs_srcdir@/data/select_into_file.data';
+ERROR: Result consisted of more than one row
+select * from t limit 1 into dumpfile '@abs_srcdir@/data/select_into_file.data';
+\! cat @abs_srcdir@/data/select_into_file.data
+1c1text1ENUM101DEADBEEF\\xdeadbeef\c regression;
+drop database test_select_into_file;
+\! rm @abs_srcdir@/data/select_into_file.data
diff --git a/src/test/regress/output/select_into_user_defined_variables.source b/src/test/regress/output/select_into_user_defined_variables.source
index a8e21ed8a..14fb96701 100644
--- a/src/test/regress/output/select_into_user_defined_variables.source
+++ b/src/test/regress/output/select_into_user_defined_variables.source
@@ -3,21 +3,15 @@ select 10 into @aa;
ERROR: syntax error at or near "@"
LINE 1: select 10 into @aa;
^
-\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=on" >/dev/null 2>&1
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "b_format_behavior_compat_options='enable_set_variables'" >/dev/null 2>&1
\! sleep 1
-- error
select 10 into @aa;
ERROR: syntax error at or near "@"
LINE 1: select 10 into @aa;
^
-create database test dbcompatibility 'b';
-\c test
-show enable_set_variable_b_format;
- enable_set_variable_b_format
-------------------------------
- on
-(1 row)
-
+create database test_select_into_var dbcompatibility 'b';
+\c test_select_into_var
drop table if exists t;
NOTICE: table "t" does not exist, skipping
create table t(i int, t text, b bool, f float, bi bit(3), vbi bit varying(5));
@@ -138,6 +132,19 @@ select @aa,@bb,@cc,@dd,@ee,@ff;
| | | | |
(1 row)
+create or replace procedure my_pro()
+as
+declare outfile int default 0;
+begin
+select 10 into outfile;
+end;
+/
+call my_pro();
+ my_pro
+--------
+
+(1 row)
+
--procedure stmt 1
create or replace procedure my_pro()
as
@@ -388,13 +395,79 @@ select @num;
(1 row)
drop trigger tri_delete_after on t1;
-\c regression
-drop database if exists test;
-\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=off" >/dev/null 2>&1
-\! sleep 1
-show enable_set_variable_b_format;
- enable_set_variable_b_format
-------------------------------
- off
+select 10,@v1/2 into @v1,@v2;
+select @v1,@v2;
+ @v1 | @v2
+-----+-----
+ 10 |
(1 row)
+select 10,@v1/2 into @v1,@v2;
+select @v1,@v2;
+ @v1 | @v2
+-----+-----
+ 10 | 5
+(1 row)
+
+select 10 into @value;
+select @value;
+ @value
+--------
+ 10
+(1 row)
+
+select sha(@value) into @sha_value;
+select @sha_value;
+ @sha_value
+------------------------------------------
+ b1d5781111d84f7b3fe45a0852e59758cd7a87e5
+(1 row)
+
+select -1,'hello' into @v1, @v2;
+select @v1, @v2;
+ @v1 | @v2
+-----+-------
+ -1 | hello
+(1 row)
+
+select @v1 + 1, abs(@v1), concat(@v2, ' world!') into @v3, @abs, @concat;
+select @v3, @abs, @concat;
+ @v3 | @abs | @concat
+-----+------+--------------
+ 0 | 1 | hello world!
+(1 row)
+
+\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "b_format_behavior_compat_options=''" >/dev/null 2>&1
+\! sleep 1
+create table t2(a int, b int);
+set b_format_behavior_compat_options="enable_set_variables";
+create or replace procedure test(a int, b int) as
+declare
+ num3 int := a;
+ num4 int := b;
+ pragma autonomous_transaction;
+
+begin
+ set b_format_behavior_compat_options="enable_set_variables";
+ select num3,num4 into @v1,@v2;
+ commit;
+ insert into t2 values(@v1, @v2);
+ rollback;
+ set b_format_behavior_compat_options="enable_set_variables";
+ insert into t2 values(@v1-1, @v2+1);
+end;
+/
+call test(1,1);
+ test
+------
+
+(1 row)
+
+select * from t2;
+ a | b
+---+---
+ 0 | 2
+(1 row)
+
+\c regression
+drop database if exists test_select_into_var;
diff --git a/src/test/regress/output/set_system_variables_test.source b/src/test/regress/output/set_system_variables_test.source
index 96b7845fd..c08cff1fd 100644
--- a/src/test/regress/output/set_system_variables_test.source
+++ b/src/test/regress/output/set_system_variables_test.source
@@ -592,6 +592,28 @@ raise info 'v1:%', v1;
raise info 'v2:%', v2;
end;
/
+-- test b_format_behavior_compat_options param
+\c test_set
+show b_format_behavior_compat_options;
+ b_format_behavior_compat_options
+----------------------------------
+
+(1 row)
+
+set b_format_behavior_compat_options = 'enable_set_variables, enable_set_variables';
+show b_format_behavior_compat_options;
+ b_format_behavior_compat_options
+--------------------------------------------
+ enable_set_variables, enable_set_variables
+(1 row)
+
+set @v1 = 1;
+select @v1;
+ @v1
+-----
+ 1
+(1 row)
+
\c regression
drop database if exists test_set;
\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=off" >/dev/null 2>&1
diff --git a/src/test/regress/output/set_user_defined_variables_test.source b/src/test/regress/output/set_user_defined_variables_test.source
index b4b9e8dc4..0a7ee3117 100644
--- a/src/test/regress/output/set_user_defined_variables_test.source
+++ b/src/test/regress/output/set_user_defined_variables_test.source
@@ -1227,6 +1227,79 @@ select @v1, @v2, @v3, @v4;
1 | 2 | 3 | 3
(1 row)
+\c test_set
+set @v2 := 'aaa';
+set @V4 :=(SELECT @v2 + 1);
+select @v2, @v4;
+ @v2 | @v4
+-----+-----
+ aaa | 1
+(1 row)
+
+set @aa = 10;
+set @bb = (select sha(@aa));
+select @aa, @bb;
+ @aa | @bb
+-----+------------------------------------------
+ 10 | b1d5781111d84f7b3fe45a0852e59758cd7a87e5
+(1 row)
+
+\c test_set
+set b_format_behavior_compat_options="enable_set_variables";
+drop table if exists t2;
+create table t2(a int, b int);
+insert into t2 values(1,2);
+create or replace procedure test(a int, b int) as
+declare
+ num3 int := a;
+ num4 int := b;
+ pragma autonomous_transaction;
+
+begin
+ set b_format_behavior_compat_options="enable_set_variables";
+ set @v1 := num3, @v2 := num4;
+ insert into t2 values(@v1, @v2);
+ rollback;
+ set b_format_behavior_compat_options="enable_set_variables";
+ insert into t2 values(@v1-1, @v2+1);
+end;
+/
+call test(1,1);
+ test
+------
+
+(1 row)
+
+\c test_set
+set @a := 1, @b := @a;
+select @a, @b;
+ @a | @b
+----+----
+ 1 |
+(1 row)
+
+set @a := @c := 2, @b := @d := @a;
+select @a, @b, @c, @d;
+ @a | @b | @c | @d
+----+----+----+----
+ 2 | 1 | 2 | 1
+(1 row)
+
+\c test_set
+set @a := 1, @b := @a;
+select @a, @b;
+ @a | @b
+----+----
+ 1 |
+(1 row)
+
+set @a := @c := 2, @b := @d := @a, @@session_timeout = 700, @e := @f := @a;
+select @a, @b, @c, @d, @e, @f, @@session_timeout;
+ @a | @b | @c | @d | @e | @f | ?column?
+----+----+----+----+----+----+----------
+ 2 | 1 | 2 | 1 | 1 | 1 | 700
+(1 row)
+
\c regression
drop database if exists test_set;
\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_set_variable_b_format=off" >/dev/null 2>&1
diff --git a/src/test/regress/output/single_node_user_mapping.source b/src/test/regress/output/single_node_user_mapping.source
index 0c22ab74f..55760d55f 100644
--- a/src/test/regress/output/single_node_user_mapping.source
+++ b/src/test/regress/output/single_node_user_mapping.source
@@ -6,6 +6,10 @@ SELECT pg_delete_audit('1012-11-10', '3012-11-11');
(1 row)
--- prepare
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.cipher
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.rand
+\! echo $GAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $?
+0
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.cipher
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/usermapping.key.rand
\! echo $OLDGAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $?
@@ -17,11 +21,19 @@ CREATE SERVER dummy_srv FOREIGN DATA WRAPPER dummy;
CREATE SERVER dummy_srv2 FOREIGN DATA WRAPPER dummy;
CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password 'shouldBeEncrypt');
CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
-ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBeEncrypt');
-ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBeEncrypt2');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBe''''''''''''''''''''''''''''''Encrypt');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBe''''''''''''''''''''''''''''''Encrypt2');
ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password);
DROP USER MAPPING FOR current_user SERVER dummy_srv2;
DROP USER MAPPING FOR current_user SERVER dummy_srv;
+-- test with password keyword double-quote
+CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp');
+CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(SET "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp2');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp');
+ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET "password" 'shouldBe''''''''''''''''''''''''''''''Encrypt_tmp2');
+DROP USER MAPPING FOR current_user SERVER dummy_srv2;
+DROP USER MAPPING FOR current_user SERVER dummy_srv;
DROP SERVER dummy_srv;
DROP SERVER dummy_srv2;
--DROP FOREIGN DATA WRAPPER is not supported now
@@ -30,8 +42,8 @@ RESET SESSION AUTHORIZATION;
--can't drop role regress_usermapping_user, since FOREIGN DATA WRAPPER dummy depend on it
--DROP ROLE regress_usermapping_user;
SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'current_user';
- object_name | detail_info
---------------+------------------------------------------------------------------------------------------------------
+ object_name | detail_info
+--------------+--------------------------------------------------------------------------------------------------------
current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password '********');
current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password '********');
@@ -39,7 +51,14 @@ SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-1
current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password);
current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv2;
current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv;
-(7 rows)
+ current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', "password" '********');
+ current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test');
+ current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(SET "password" '********');
+ current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD "password" '********');
+ current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET "password" '********');
+ current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv2;
+ current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv;
+(14 rows)
--clear audit log
SELECT pg_delete_audit('1012-11-10', '3012-11-11');
diff --git a/src/test/regress/output/slow_sql.source b/src/test/regress/output/slow_sql.source
new file mode 100644
index 000000000..e07c8cf75
--- /dev/null
+++ b/src/test/regress/output/slow_sql.source
@@ -0,0 +1,183 @@
+--?.*
+--?.*
+--?.*
+--?.*
+server signaled
+
+Total instances: 1. Failed instances: 0.
+Success to perform gs_guc!
+
+\c postgres
+create schema slow_sql;
+SET search_path = slow_sql, public;
+create table slow_sql.test(col1 int, col2 numeric, col3 text);
+create index index1 on slow_sql.test(col1);
+create index index2 on slow_sql.test(col2);
+create index index3 on slow_sql.test(col3);
+insert into slow_sql.test values (generate_series(1,100), generate_series(101,200), generate_series(201,300));
+delete from statement_history;
+set track_stmt_stat_level='L0,L0';
+-- test col1(integer)
+select col1 from slow_sql.test where col1 = 12;
+ col1
+------
+ 12
+(1 row)
+
+select col2 from slow_sql.test where col1 = 123456781234567812345678;
+ col2
+------
+(0 rows)
+
+select col3 from slow_sql.test where col1 = '12';
+ col3
+------
+ 212
+(1 row)
+
+select * from slow_sql.test where col1::numeric = 12;
+ col1 | col2 | col3
+------+------+------
+ 12 | 112 | 212
+(1 row)
+
+select * from slow_sql.test where col1::text = '12';
+ col1 | col2 | col3
+------+------+------
+ 12 | 112 | 212
+(1 row)
+
+-- test col2(numeric)
+select col1 from slow_sql.test where col2 = 123456781234567812345678;
+ col1
+------
+(0 rows)
+
+select col2 from slow_sql.test where col2 = 12;
+ col2
+------
+(0 rows)
+
+select col3 from slow_sql.test where col2 = '123456781234567812345678';
+ col3
+------
+(0 rows)
+
+select * from slow_sql.test where col2::integer = 123456781234567812345678;
+ col1 | col2 | col3
+------+------+------
+(0 rows)
+
+select * from slow_sql.test where col2::text = '123456781234567812345678';
+ col1 | col2 | col3
+------+------+------
+(0 rows)
+
+-- test col3(text)
+select col1 from slow_sql.test where col3 = '12';
+ col1
+------
+(0 rows)
+
+select col2 from slow_sql.test where col3 = 12;
+ col2
+------
+(0 rows)
+
+select col3 from slow_sql.test where col3 = 123456781234567812345678;
+ col3
+------
+(0 rows)
+
+select * from slow_sql.test where col3::integer = 12;
+ col1 | col2 | col3
+------+------+------
+(0 rows)
+
+select * from slow_sql.test where col3::numeric = 12;
+ col1 | col2 | col3
+------+------+------
+(0 rows)
+
+-- test limit
+select col1 from slow_sql.test where col1 = 12 limit 4999;
+ col1
+------
+ 12
+(1 row)
+
+select col2 from slow_sql.test where col1 = 12 limit 5000;
+ col2
+------
+ 112
+(1 row)
+
+select col3 from slow_sql.test where col1 = 12 limit 5001;
+ col3
+------
+ 212
+(1 row)
+
+-- test all
+select col1,col2 from slow_sql.test where col1 = 12 limit 4999;
+ col1 | col2
+------+------
+ 12 | 112
+(1 row)
+
+select col1,col3 from slow_sql.test where col1 = 123456781234567812345678 limit 4999;
+ col1 | col3
+------+------
+(0 rows)
+
+select col2,col3 from slow_sql.test where col1 = 12 limit 5000;
+ col2 | col3
+------+------
+ 112 | 212
+(1 row)
+
+select col1,col2,col3 from slow_sql.test where col1 = 123456781234567812345678 limit 5000;
+ col1 | col2 | col3
+------+------+------
+(0 rows)
+
+set track_stmt_stat_level='OFF,L0';
+--wait insert history
+insert into slow_sql.test values (generate_series(1,10000), generate_series(10001,20000), generate_series(20001,30000));
+select schema_name, query, advise from statement_history order by start_time;
+ schema_name | query | advise
+------------------+------------------------------------------------------------------+------------------------------------------------------
+ slow_sql, public | set track_stmt_stat_level='L0,L0'; |
+ slow_sql, public | select col1 from slow_sql.test where col1 = ?; |
+ slow_sql, public | select col2 from slow_sql.test where col1 = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col3 from slow_sql.test where col1 = ?; |
+ slow_sql, public | select * from slow_sql.test where col1::numeric = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select * from slow_sql.test where col1::text = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col1 from slow_sql.test where col2 = ?; |
+ slow_sql, public | select col2 from slow_sql.test where col2 = ?; |
+ slow_sql, public | select col3 from slow_sql.test where col2 = ?; |
+ slow_sql, public | select * from slow_sql.test where col2::integer = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select * from slow_sql.test where col2::text = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col1 from slow_sql.test where col3 = ?; |
+ slow_sql, public | select col2 from slow_sql.test where col3 = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col3 from slow_sql.test where col3 = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select * from slow_sql.test where col3::integer = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select * from slow_sql.test where col3::numeric = ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col1 from slow_sql.test where col1 = ? limit ?; |
+ slow_sql, public | select col2 from slow_sql.test where col1 = ? limit ?; | Limit too much rows.
+ slow_sql, public | select col3 from slow_sql.test where col1 = ? limit ?; | Limit too much rows.
+ slow_sql, public | select col1,col2 from slow_sql.test where col1 = ? limit ?; |
+ slow_sql, public | select col1,col3 from slow_sql.test where col1 = ? limit ?; | Cast Function Cause Index Miss.
+ slow_sql, public | select col2,col3 from slow_sql.test where col1 = ? limit ?; | Limit too much rows.
+ slow_sql, public | select col1,col2,col3 from slow_sql.test where col1 = ? limit ?; | Cast Function Cause Index Miss. Limit too much rows.
+(23 rows)
+
+--?.*
+--?.*
+--?.*
+--?.*
+server signaled
+
+Total instances: 1. Failed instances: 0.
+Success to perform gs_guc!
+
diff --git a/src/test/regress/output/subscription.source b/src/test/regress/output/subscription.source
index 52a5e49c3..2da18de9a 100644
--- a/src/test/regress/output/subscription.source
+++ b/src/test/regress/output/subscription.source
@@ -45,6 +45,10 @@ select pg_sleep(1);
(1 row)
--- prepare
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.cipher
+\! echo $GAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.rand
+\! echo $GAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $?
+0
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.cipher
\! echo $OLDGAUSSHOME | xargs -I{} rm -f {}/bin/subscription.key.rand
\! echo $OLDGAUSSHOME | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $?
@@ -173,9 +177,22 @@ BEGIN;
ALTER SUBSCRIPTION testsub_rename REFRESH PUBLICATION;
ERROR: ALTER SUBSCRIPTION ... REFRESH cannot run inside a transaction block
COMMIT;
+-- success, password len with 999
+CREATE SUBSCRIPTION sub_len_999 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*lei' PUBLICATION insert_only WITH (connect = false);
+WARNING: tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tables
+-- fail, password len with 1000
+CREATE SUBSCRIPTION sub_len_1000 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid' PUBLICATION insert_only WITH (enabled = false);
+ERROR: Password can't contain more than 999 characters.
+-- fail, set password len with 1000
+ALTER SUBSCRIPTION sub_len_999 SET (conninfo='host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid');
+ERROR: Password can't contain more than 999 characters.
+-- fail, set password len with 1000
+ALTER SUBSCRIPTION sub_len_999 CONNECTION 'host=192.16''''8.1.50 port=5432 user=foo dbname=foodb password=xxin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leideyipi@123xin!@#$%^&*(!@#@$%^&*!@#$%^&*leid';
+ERROR: Password can't contain more than 999 characters.
--- drop subscription
DROP SUBSCRIPTION IF EXISTS testsub_rename;
DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo;
+DROP SUBSCRIPTION IF EXISTS sub_len_999;
--- cleanup
RESET SESSION AUTHORIZATION;
DROP ROLE regress_subscription_user;
@@ -298,8 +315,8 @@ ERROR: could not find tuple for replication origin 'origin_test'
CONTEXT: referenced column: pg_replication_origin_session_setup
drop table t_origin_test;
SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_publication_subscription' order by time;
- object_name | detail_info
-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ object_name | detail_info
+----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
testsub | DROP SUBSCRIPTION IF EXISTS testsub;
testsub | CREATE SUBSCRIPTION testsub CONNECTION **********************PUBLICATION testpub WITH (CONNECT=false, slot_name='testsub', synchronous_commit=off);
testsub_maskconninfo | CREATE SUBSCRIPTION testsub_maskconninfo CONNECTION ***************************************************************************************************PUBLICATION testpub WITH (CONNECT=false, slot_name='testsub', synchronous_commit=off);
@@ -313,9 +330,11 @@ SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-1
testsub | ALTER SUBSCRIPTION testsub owner to regress_subscription_user2;
testsub | ALTER SUBSCRIPTION testsub SET (binary=true);
testsub | ALTER SUBSCRIPTION testsub rename to testsub_rename;
+ sub_len_999 | CREATE SUBSCRIPTION sub_len_999 CONNECTION *********************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************PUBLICATION insert_only WITH (connect = false);
testsub_rename | DROP SUBSCRIPTION IF EXISTS testsub_rename;
testsub_maskconninfo | DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo;
-(15 rows)
+ sub_len_999 | DROP SUBSCRIPTION IF EXISTS sub_len_999;
+(17 rows)
--clear audit log
SELECT pg_delete_audit('1012-11-10', '3012-11-11');
diff --git a/src/test/regress/output/ustore_ddl.source b/src/test/regress/output/ustore_ddl.source
index 2c1ae41cd..69bd44ad9 100644
--- a/src/test/regress/output/ustore_ddl.source
+++ b/src/test/regress/output/ustore_ddl.source
@@ -227,10 +227,10 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
{"table_name":"public.bmsql_order_line","op_type":"INSERT","columns_name":["ol_w_id","ol_d_id","ol_o_id","ol_number","ol_i_id","ol_delivery_d","ol_amount","ol_supply_w_id","ol_quantity","ol_dist_info"],"columns_type":["integer","integer","integer","integer","integer","timestamp without time zone","numeric","integer","integer","character"],"columns_val":["1","1","1","1","1","null","null","null","null","'123 '"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]}
--?.*
BEGIN
- {"table_name":"public.bmsql_order_line","op_type":"UPDATE","columns_name":["ol_w_id","ol_d_id","ol_o_id","ol_number","ol_i_id","ol_delivery_d","ol_amount","ol_supply_w_id","ol_quantity","ol_dist_info"],"columns_type":["integer","integer","integer","integer","integer","timestamp without time zone","numeric","integer","integer","character"],"columns_val":["1","1","1","1","1","null","null","null","null","'ss '"],"old_keys_name":["ol_w_id","ol_d_id","ol_o_id","ol_number","ol_i_id","ol_dist_info"],"old_keys_type":["integer","integer","integer","integer","integer","character"],"old_keys_val":["1","1","1","1","1","'123 '"]}
+ {"table_name":"public.bmsql_order_line","op_type":"UPDATE","columns_name":["ol_w_id","ol_d_id","ol_o_id","ol_number","ol_i_id","ol_delivery_d","ol_amount","ol_supply_w_id","ol_quantity","ol_dist_info"],"columns_type":["integer","integer","integer","integer","integer","timestamp without time zone","numeric","integer","integer","character"],"columns_val":["1","1","1","1","1","null","null","null","null","'ss '"],"old_keys_name":["ol_w_id","ol_d_id","ol_o_id","ol_number"],"old_keys_type":["integer","integer","integer","integer"],"old_keys_val":["1","1","1","1"]}
--?.*
BEGIN
- {"table_name":"public.bmsql_order_line","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["ol_w_id","ol_d_id","ol_o_id","ol_number","ol_i_id","ol_dist_info"],"old_keys_type":["integer","integer","integer","integer","integer","character"],"old_keys_val":["1","1","1","1","1","'ss '"]}
+ {"table_name":"public.bmsql_order_line","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["ol_w_id","ol_d_id","ol_o_id","ol_number"],"old_keys_type":["integer","integer","integer","integer"],"old_keys_val":["1","1","1","1"]}
--?.*
(25 rows)
diff --git a/src/test/regress/output/ustore_decode.source b/src/test/regress/output/ustore_decode.source
index 2803afd4b..e0aecdb8b 100644
--- a/src/test/regress/output/ustore_decode.source
+++ b/src/test/regress/output/ustore_decode.source
@@ -69,7 +69,7 @@ SELECT data FROM pg_logical_slot_get_changes('ustore_slot', NULL, NULL);
--?.*data
--?.*
--?.*BEGIN ?.*
- {"table_name":"public.t1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["a","b"],"old_keys_type":["integer","text"],"old_keys_val":["3","'abc'"]}
+ {"table_name":"public.t1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["a","b","c"],"old_keys_type":["integer","text","integer"],"old_keys_val":["3","'abc'","null"]}
--?.*COMMIT ?.* CSN ?.*
(3 rows)
diff --git a/src/test/regress/output/view_definer_test.source b/src/test/regress/output/view_definer_test.source
old mode 100644
new mode 100755
index d8c5b2cf2..bf3350d7a
--- a/src/test/regress/output/view_definer_test.source
+++ b/src/test/regress/output/view_definer_test.source
@@ -168,6 +168,7 @@ SET
SET
SET
SET
+SET
CREATE TABLE
ALTER TABLE
CREATE VIEW
diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0
index 2c1ee7179..e38dd02ed 100644
--- a/src/test/regress/parallel_schedule0
+++ b/src/test/regress/parallel_schedule0
@@ -11,6 +11,7 @@
#test: recovery_2pc_tools recovery_2pc_tools02
test: recovery_2pc_tools
+test: sqlbypass_partition
test: sqlpatch_base
test: sqlpatch_func
@@ -41,10 +42,15 @@ test: postgres_fdw postgres_fdw_cstore postgres_fdw_partition
#test user_defined_variable
test: set_user_defined_variables_test
-test: select_into_user_defined_variables
-
test: set_system_variables_test
+# test for set [session | global] transaction
+test: set_transaction_test
+
+# test select into statement
+test: select_into_user_defined_variables
+test: select_into_file
+
test: gs_dump_package trigger_dump
test: out_param_func
#test: sqlcode_cursor
@@ -72,14 +78,21 @@ test: hw_pwd_encryption_sm3
test: sync_standy_names
+#test event
+test: event
+test: event_dump_audit
+
#test sha func
test: single_node_sha
+#test b format collation
+test: test_b_format_collate charset_b_format
+
# test subpartition
test: hw_subpartition_createtable hw_subpartition_scan hw_subpartition_select hw_subpartition_split hw_subpartition_truncate hw_subpartition_update hw_subpartition_gpi hw_subpartition_analyze_vacuum hw_subpartition_alter_table hw_subpartition_index hw_subpartition_add_drop_partition hw_subpartition_tablespace hw_subpartition_ddl_index hw_subpartition_size
test: hw_subpartition_vacuum_partition hw_subpartition_tablespace_global
test: gs_dump_subpartition
-test: partition_dml_operations partition_minmax
+test: partition_dml_operations partition_minmax partition_pruning hw_partitionno hw_partition_parallel
test: partition_param_path
#test: partition_cost_model
test: row_partition_iterator_elimination col_partition_iterator_elimination
@@ -633,6 +646,7 @@ test: holdable_cursor
test: alter_table_000 alter_table_002 alter_table_003
#test: alter_table_001
+test: alter_table_modify alter_table_modify_ustore alter_table_modify_ltt alter_table_modify_gtt
#test: with
@@ -700,6 +714,10 @@ test: hw_pwd_reuse
test: hw_audit_toughness
test: hw_audit_detailinfo
+test: hw_audit_client
+test: hw_audit_full
+test: hw_audit_system_func
+
test: performance_enhance
test: explain_fqs
test: explain_pbe
@@ -911,6 +929,8 @@ test: hw_partition_hash_dml
test: hw_partition_hash_dql
test: hw_partition_list_dml
test: hw_partition_list_dql
+test: hw_partition_b_db
+test: dump_partition_b_db
test: hw_cipher_sm4
test: hw_cipher_aes128
test: hw_pwd_encryption_sm3
@@ -1048,3 +1068,6 @@ test: show_warnings prevent_table_in_sys_schema
# partition expression key
test: partition_expr_key
test: alter_foreign_schema
+
+# test for slow_sql
+test: slow_sql
diff --git a/src/test/regress/script/gs_basebackup/gs_basebackup.sh b/src/test/regress/script/gs_basebackup/gs_basebackup.sh
old mode 100644
new mode 100755
diff --git a/src/test/regress/sql/aggregate_B_database.sql b/src/test/regress/sql/aggregate_B_database.sql
index 39a525394..a226b97f6 100644
--- a/src/test/regress/sql/aggregate_B_database.sql
+++ b/src/test/regress/sql/aggregate_B_database.sql
@@ -1,12 +1,24 @@
-- test normal db
-create database test;
-\c test
+create database group_concat_test1 dbcompatibility 'A';;
+\c group_concat_test1
+CREATE TABLE t(id int, v text);
+INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
+select id, group_concat(v separator ';') from t group by id order by id asc;
+create database group_concat_test2 dbcompatibility 'C';;
+\c group_concat_test2
+CREATE TABLE t(id int, v text);
+INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
+select id, group_concat(v separator ';') from t group by id order by id asc;
+create database group_concat_test3 dbcompatibility 'PG';;
+\c group_concat_test3
CREATE TABLE t(id int, v text);
INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
select id, group_concat(v separator ';') from t group by id order by id asc;
\c regression
-drop database test;
--- test group_concat (compatible with B db)
+drop database group_concat_test1;
+drop database group_concat_test2;
+drop database group_concat_test3;
+-- test group_concat (in B db)
create database test_group_concat_B_db dbcompatibility 'B';
\c test_group_concat_B_db
set group_concat_max_len to 20480;
@@ -118,9 +130,9 @@ SELECT mgrno, ename, job, group_concat(ename,job) OVER(PARTITION BY mgrno) AS em
-- test for plan changes, dfx
SET explain_perf_mode=pretty;
-EXPLAIN verbose SELECT deptno, group_concat(ename ORDER BY ename SEPARATOR ',') AS employees_order_by_ename_varchar FROM emp GROUP BY deptno;
-EXPLAIN verbose SELECT deptno, group_concat(sign ORDER BY email SEPARATOR '##') AS email_order_by_email_text_en FROM emp GROUP BY deptno;
-EXPLAIN verbose SELECT deptno, group_concat(VARIADIC ARRAY[ename,':',job] ORDER BY ename) AS bonus_order_by_bonus_numeric FROM emp GROUP BY deptno;
+EXPLAIN (costs off) SELECT deptno, group_concat(ename ORDER BY ename SEPARATOR ',') AS employees_order_by_ename_varchar FROM emp GROUP BY deptno;
+EXPLAIN (costs off) SELECT deptno, group_concat(sign ORDER BY email SEPARATOR '##') AS email_order_by_email_text_en FROM emp GROUP BY deptno;
+EXPLAIN (costs off) SELECT deptno, group_concat(VARIADIC ARRAY[ename,':',job] ORDER BY ename) AS bonus_order_by_bonus_numeric FROM emp GROUP BY deptno;
-- test for date print format
SET datestyle = 'SQL,DMY';
@@ -185,10 +197,11 @@ select * from test_group_concat_bin order by bt_col1;
select group_concat(BT_COL2,BT_COL3,BT_COL4 order by BT_COL1 separator '') from test_group_concat_bin;
\c regression
+clean connection to all force for database test_group_concat_B_db;
drop database test_group_concat_B_db;
-create database t dbcompatibility 'B';
-\c t;
+create database test_group_concat_max_len dbcompatibility 'B';
+\c test_group_concat_max_len;
CREATE TABLE t(id int, v text);
INSERT INTO t(id, v) VALUES(1, 'A'),(2, 'B'),(1, 'C'),(2, 'DDDDDDDDDDDDDDDDDDDDDDDDDDDDD');
@@ -202,12 +215,12 @@ show group_concat_max_len;
select id, group_concat(v separator ';') from t group by id order by id asc;
--alter database XXX set XXX to XXX (current session)
-alter database t set group_concat_max_len to 10;
+alter database test_group_concat_max_len set group_concat_max_len to 10;
show group_concat_max_len;
select id, group_concat(v separator ';') from t group by id order by id asc;
--new session
\c regression
-\c t
+\c test_group_concat_max_len
show group_concat_max_len;
select id, group_concat(v separator ';') from t group by id order by id asc;
@@ -219,7 +232,7 @@ select id, group_concat(v separator ';') from t group by id order by id asc;
--show database value above
\c regression
-\c t
+\c test_group_concat_max_len
show group_concat_max_len;
select id, group_concat(v separator ';') from t group by id order by id asc;
@@ -228,4 +241,5 @@ set group_concat_max_len to -1;
set group_concat_max_len to 9223372036854775808;
\c regression
-drop database t;
\ No newline at end of file
+clean connection to all force for database test_group_concat_max_len;
+drop database test_group_concat_max_len;
\ No newline at end of file
diff --git a/src/test/regress/sql/alter_table_002.sql b/src/test/regress/sql/alter_table_002.sql
index fba70c229..7f102f8e7 100644
--- a/src/test/regress/sql/alter_table_002.sql
+++ b/src/test/regress/sql/alter_table_002.sql
@@ -331,6 +331,17 @@ alter table def_test alter column c2 set default 20;
-- set defaults on a non-existent column: this should fail
alter table def_test alter column c3 set default 30;
+-- create rule based on table
+create table t_base (id int);
+
+create table t_actual (id int);
+insert into t_actual values(2);
+
+select relname,reloptions,relkind from pg_class where relname='t_base' or relname='t_actual' order by 1;
+CREATE RULE "_RETURN" AS ON SELECT TO t_base DO INSTEAD SELECT * FROM t_actual;
+select relname,reloptions,relkind from pg_class where relname='t_base' or relname='t_actual' order by 1;
+drop table t_actual cascade;
+
-- set defaults on views: we need to create a view, add a rule
-- to allow insertions into it, and then alter the view to add
-- a default
diff --git a/src/test/regress/sql/alter_table_003.sql b/src/test/regress/sql/alter_table_003.sql
index ffa8962e8..fa03bd15a 100644
--- a/src/test/regress/sql/alter_table_003.sql
+++ b/src/test/regress/sql/alter_table_003.sql
@@ -617,3 +617,2635 @@ ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
DROP TABLE alter2.tt8;
DROP SCHEMA alter2;
+
+create database test_first_after_A dbcompatibility 'A';
+\c test_first_after_A
+
+-- test add column ... first | after columnname
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+select * from t1;
+
+-- 1 primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+\d+ t1;
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+select * from t1;
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1;
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 6 check constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+
+-- 7 foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key);
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+select * from range_range;
+
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique(f3, f4),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+alter table t_pri add f4 int first, add f5 int after f2;
+alter table t1 add f8 int primary key first, add f9 int unique after f3;
+\d+ t_pri
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+\d+ t1
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+set enable_default_ustore_table = off;
+
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique(f3, f4),
+ check(f5 = 10)
+);
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+
+alter table t1 add f8 int first, add f9 int unique after f1;
+\d+ t1
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+alter table t1 add f4 int default 4 first;
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+alter table t1 drop f2, add f5 int default 5 after f1;
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+alter table t1 add f5 text default 'aaa' first;
+\d+ t1
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1;
+\d+ t1
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+-- pg_depend test
+drop table if exists t1 cascade;
+create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored);
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+
+alter table t1 add t1 add f4 int first;
+\d+ t1
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+alter table t1 drop f2, add f6 int, add f7 int generated always as (f1 + f6) stored after f1;
+\d+ t1
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f2 from t1;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+
+drop trigger if exists f1_trig_update on t1;
+drop trigger if exists f1_trig_insert on t1;
+
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+alter table t1 add f5 int after f1, add f6 boolean first;
+\d+ t1
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy;
+create role test_rlspolicy nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy using (f1 < 30);
+
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first;
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+drop table if exists t1 cascade;
+
+\c postgres
+drop database test_first_after_A;
+
+-- test add column ... first | after columnname in B compatibility
+create database test_first_after_B dbcompatibility 'b';
+\c test_first_after_B
+
+-- test add column ... first | after columnname in astore table
+-- ASTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+select * from t1;
+
+-- 1 primary key
+-- 1.1.1 primary key in original table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+
+-- 1.1.2 primary key in original table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+
+-- 1.2.1 primary key in a table without data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.2.2 primary key in a table with data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+\d+ t1;
+
+-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+\d+ t1
+
+-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+select * from t1;
+
+-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+\d+ t1
+
+-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+select * from t1;
+
+-- 2 unique index
+-- 2.1.1 unique index in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+
+-- 2.1.2 unique index in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+
+-- 2.2.1 unique index in a table without data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+\d+ t1
+
+-- 2.2.2 unique index in a table with data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+select * from t1;
+
+-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+\d+ t1
+
+-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+select * from t1;
+
+-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int unique default 6 first;
+alter table t1 drop f1, add f7 float unique default 7 after f3;
+\d+ t1
+
+-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int unique default 6 first;
+alter table t1 drop f1;
+-- error
+alter table t1 add f7 float unique default 7 after f3;
+select * from t1;
+
+-- 3 default and generated column
+-- 3.1.1 default in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+
+-- 3.1.2 default in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+
+-- 3.2.1 default in a table without data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+
+-- 3.2.2 default in a table with data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+select * from t1;
+
+-- 3.3.1 default in a table without data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+
+-- 3.3.2 default in a table with data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+
+-- 3.4.1 generated column in a table without data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+\d+ t1
+
+-- 3.4.1 generated column in a table with data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+select * from t1;
+
+-- 3.5.1 generated column in a table without data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f2 + f3) stored after f5;
+\d+ t1;
+
+-- 3.5.2 generated column in table with data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1;
+
+-- 4 auto_increment
+-- 4.1.1 auto_increment in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 4.1.2 auto_increment in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 4.2.1 auto_increment in a table without data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+\d+ t1
+
+-- 4.2.2 auto_increment in a table with data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+select * from t1;
+
+-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+\d+ t1
+
+-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+
+-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+\d+ t1
+
+-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+select * from t1;
+
+-- 5 NULL and NOT NULL
+-- 5.1.1 null and not null in a table without data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 5.1.2 null and not null in a table with data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 5.2.1 null and not null in table without data, add column with null or not null
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int null first;
+alter table t1 add f7 float not null after f3;
+\d+ t1
+
+-- 5.2.2 null and not null in a table with data, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, drop f2, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 6 check constraint
+-- 6.1.1 check constraint in a table without data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 6.1.2 check constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 6.2.1 check constraint in a table without data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7);
+\d+ t1
+
+-- 6.2.2 check constraint in a table with data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3;
+select * from t1;
+
+-- 6.3.1 check constraint in a table without data, drop check, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3;
+\d+ t1
+
+-- 6.3.2 check constraint in a table with data, drop check, add column with with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+
+-- 7 foreign key
+-- 7.1.1 foreign key constraint in a table without data, add column without constraint
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+\d+ t1
+
+-- 7.1.2 foreign key constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t1(f1 text, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values('a', 1, true), ('b', 2, false);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+select * from t1;
+
+-- 7.2.1 foreign key constraint in a table without data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+create table t_pri2(f1 int, f2 int, f4 int primary key);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 add f4 int references t_pri2(f4) first;
+\d+ t1
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+\d+ t1
+
+-- 7.2.2 foreign key constraint in a table with data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 int, f2 bool, f4 int primary key);
+insert into t_pri2 values(11, true, 1), (22, false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 add f4 int references t_pri2(f4) first;
+select * from t1;
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+select * from t1;
+
+-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+create table t_pri2(f1 int, f2 int, f4 int primary key);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+\d+ t1
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+\d+ t1
+
+-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key);
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+select * from range_range;
+
+-- USTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+select * from t1;
+
+-- 1 primary key
+-- 1.1.1 primary key in original table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 clob first, add f7 blob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+\d+ t1
+
+-- 1.1.2 primary key in original table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 blob first, add f7 clob after f2;
+alter table t1 add f8 int, add f9 text first, add f10 float after f3;
+select * from t1;
+
+-- 1.2.1 primary key in a table without data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.2.2 primary key in a table with data, add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.3.1 primary key in a table without data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 text, add f7 int primary key first, add f8 float after f3;
+\d+ t1;
+
+-- 1.3.2 primary key in a table with data, drop primary key, then add column with primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key first, add f8 float after f3;
+select * from t1;
+
+-- 1.4.1 primary key in a table without data, drop primary key, the add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+\d+ t1
+
+-- 1.4.2 primary key in a table with data, drop primary key, then add column with primary key and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 text, add f7 int primary key default 7 first, add f8 float after f3;
+select * from t1;
+
+-- 1.5.1 primary key in a table without data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+\d+ t1
+
+-- 1.5.2 primary key in a table with data, drop primary key, the add column with primary key and auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 text, add f7 int primary key auto_increment first, add f8 float after f3;
+select * from t1;
+
+-- 2 unique index
+-- 2.1.1 unique index in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+
+-- 2.1.2 unique index in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+
+-- 2.2.1 unique index in a table without data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+\d+ t1
+
+-- 2.2.2 unique index in a table with data, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int unique first, add f7 float unique after f3;
+select * from t1;
+
+-- 2.3.1 unique index in a table without data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+\d+ t1
+
+-- 2.3.2 unique index in a table with data, drop unique index, add column with unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int unique first, add f7 float unique after f3;
+select * from t1;
+
+-- 2.4.1 unique index in a table without data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int unique default 6 first;
+alter table t1 drop f1, add f7 float unique default 7 after f3;
+\d+ t1
+
+-- 2.4.2 unique index in a table with data, drop unique index, add column with unique index and default
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int unique default 6 first;
+alter table t1 drop f1;
+-- error
+alter table t1 add f7 float unique default 7 after f3;
+select * from t1;
+
+-- 3 default and generated column
+-- 3.1.1 default in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int first, add f7 float after f3;
+\d+ t1
+
+-- 3.1.2 default in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int first, add f7 float after f3;
+select * from t1;
+
+-- 3.2.1 default in a table without data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+
+-- 3.2.2 default in a table with data, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6 first, add f7 float default 7 after f3;
+select * from t1;
+
+-- 3.3.1 default in a table without data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int default 6 first, add f7 float default 7 after f3;
+\d+ t1
+
+-- 3.3.2 default in a table with data, drop default, add column with default
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int default 1 first, add f7 float default 7 after f3;
+select * from t1;
+
+-- 3.4.1 generated column in a table without data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+\d+ t1
+
+-- 3.4.1 generated column in a table with data, drop generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 drop f1, add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f3*10) stored after f5;
+select * from t1;
+
+-- 3.5.1 generated column in a table without data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+\d+ t1;
+
+-- 3.5.2 generated column in table with data, add generated column
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with (storage_type = ustore);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+alter table t1 add f5 int generated always as (f2 + f3) stored first, add f6 int generated always as (f1 + f3) stored after f5;
+select * from t1 cascade;
+
+-- 4 auto_increment
+-- 4.1.1 auto_increment in a table without data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 4.1.2 auto_increment in a table with data, add column without constraints
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 4.2.1 auto_increment in a table without data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+\d+ t1
+
+-- 4.2.2 auto_increment in a table with data, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 int primary key auto_increment first;
+-- error
+alter table t1 add f7 int primary key auto_increment after f3;
+select * from t1;
+
+-- 4.3.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+\d+ t1
+
+-- 4.3.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int primary key auto_increment first;
+
+-- 4.4.1 auto_increment in a table without data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+\d+ t1
+
+-- 4.4.2 auto_increment in a table with data, drop auto_increment, add column with auto_increment and default
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1;
+-- error
+alter table t1 add f6 int primary key auto_increment default 6 first;
+select * from t1;
+
+-- 5 NULL and NOT NULL
+-- 5.1.1 null and not null in a table without data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 5.1.2 null and not null in a table with data, add column without constraints
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 5.2.1 null and not null in table without data, add column with null or not null
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 5.2.2 null and not null in a table with data, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 5.3.1 null and not null in a table without data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 5.3.2 null and not null in a table with data, drop null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 5.4.1 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f1, drop f2, add f6 int null first, add f7 float not null after f3;
+\d+ t1
+
+-- 5.4.2 null and not null in a table without data, drop null and not null, add column with null or not null
+drop table if exists t1 cascade;
+create table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f1, drop f2, add f6 int null first;
+-- error
+alter table t1 add f7 float not null after f3;
+select * from t1;
+
+-- 6 check constraint
+-- 6.1.1 check constraint in a table without data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 text first, add f7 float after f3;
+\d+ t1
+
+-- 6.1.2 check constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 text first, add f7 float after f3;
+select * from t1;
+
+-- 6.2.1 check constraint in a table without data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7);
+\d+ t1
+
+-- 6.2.2 check constraint in a table with data, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 add f6 int default 6, add f7 text check(f6 = 6) first, add f8 float check(f1 + f2 == 7) after f3;
+select * from t1;
+
+-- 6.3.1 check constraint in a table without data, drop check, add column with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check (f7 - f1 > 0) after f3;
+\d+ t1
+
+-- 6.3.2 check constraint in a table with data, drop check, add column with with check
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (1, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 drop f2, add f6 text check (f1 > 0) first, add f7 int check(f7 - f1 > 0) after f3;
+select * from t1;
+
+-- 7 foreign key
+-- 7.1.1 foreign key constraint in a table without data, add column without constraint
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key) with (storage_type = ustore);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+\d+ t1
+
+-- 7.1.2 foreign key constraint in a table with data, add column without constraint
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t1(f1 text, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values('a', 1, true), ('b', 2, false);
+alter table t1 add f4 int, add f5 text first, f6 float after f2;
+select * from t1;
+
+-- 7.2.1 foreign key constraint in a table without data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+create table t_pri2(f1 int, f2 int, f4 int primary key) with (storage_type = ustore);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 add f4 int references t_pri2(f4) first;
+\d+ t1
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+\d+ t1
+
+-- 7.2.2 foreign key constraint in a table with data, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 int, f2 bool, f4 int primary key) with (storage_type = ustore);
+insert into t_pri2 values(11, true, 1), (22, false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 add f4 int references t_pri2(f4) first;
+select * from t1;
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f2;
+select * from t1;
+
+-- 7.3.1 foreign key constraint in a table without data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 int, f2 int primary key) with (storage_type = ustore);
+create table t_pri2(f1 int, f2 int, f4 int primary key) with (storage_type = ustore);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+\d+ t1
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+\d+ t1
+
+-- 7.3.2 foreign key constraint in a table with data, drop foreign key, add column with foreign key
+drop table if exists t1 cascade;
+drop table if exists t_pri1 cascade;
+drop table if exists t_pri2 cascade;
+create table t_pri1(f1 text, f2 int primary key) with (storage_type = ustore);
+insert into t_pri1 values('a', 1), ('b', 2);
+create table t_pri2(f1 text, f2 bool, f4 int primary key) with (storage_type = ustore);
+insert into t_pri2 values('a', true, 1), ('b', false, 2);
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with (storage_type = ustore);
+insert into t1 values(1, 2, true), (2, 2, false);
+alter table t1 drop f2, add f4 int references t_pri2(f4) first;
+select * from t1;
+alter table t1 drop f4, add f4 int references t_pri2(f4) after f1;
+select * from t1;
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with (storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with (storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+
+-- test pg_partition
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+alter table range_range add f1 int default 1 first, add f2 text after id;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+select * from range_range;
+
+-- orientation = column not support
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (orientation = column);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 add f6 text first;
+-- error
+alter table t1 add f6 text after f1;
+
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+alter table t_pri add f4 int first, add f5 int after f2;
+alter table t1 add f8 int primary key first, add f9 int unique after f3;
+\d+ t_pri
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+\d+ t1
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+set enable_default_ustore_table = off;
+
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10)
+);
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+
+alter table t1 add f8 int first, add f9 int unique after f1;
+\d+ t1
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1') order by 1, 2, 3;
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+alter table t1 add f4 int default 4 first;
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+alter table t1 drop f2, add f5 int default 5 after f1;
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+alter table t1 add f5 text default 'aaa' first;
+\d+ t1
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+alter table t1 drop f2, add f6 int generated always as (f1 + abs(f3)) stored after f1; -- ERROR
+
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 22, 33);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f2 from t1;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+alter table t1 add f4 int first, add f5 int after f1;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+
+drop trigger if exists f1_trig_update on t1;
+drop trigger if exists f1_trig_insert on t1;
+
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+alter table t1 add f5 int after f1, add f6 boolean first;
+\d+ t1
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy2;
+create role test_rlspolicy2 nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy2 using (f1 < 30);
+
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 + 100) stored first;
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+
+-- expression test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text);
+insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei');
+-- T_FuncExpr
+create index t1_idx1 on t1(abs(f1), f2);
+-- T_OpExpr
+create index t1_idx2 on t1((f1 + f2), (f1 - f3));
+-- T_BooleanTest
+create index t1_idx3 on t1((f4 is true));
+-- T_CaseExpr and T_CaseWhen
+create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end));
+-- T_ArrayExpr
+create index t1_idx5 on t1((array[f1, f2, f3]));
+-- T_TypeCast
+create index t1_idx6 on t1(((f1 + f2 + 1) :: text));
+-- T_BoolExpr
+create index t1_idx7 on t1((f1 and f2), (f2 or f3));
+-- T_ArrayRef
+create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1]));
+-- T_ScalarArrayOpExpr
+create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10])));
+-- T_RowCompareExpr
+create index t1_idx10 on t1((row(f1, f5) < row(f2, f6)));
+-- T_MinMaxExpr
+create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3));
+-- T_RowExpr
+drop table if exists mytable cascade;
+create table mytable(f1 int, f2 int, f3 text);
+create function getf1(mytable) returns int as 'select $1.f1' language sql;
+create index t1_idx12 on t1(getf1(row(f1, 2, 'a')));
+-- T_CoalesceExpr
+create index t1_idx13 on t1(nvl(f1, f2));
+-- T_NullTest
+create index t1_idx14 on t1((f1 is null));
+-- T_ScalarArrayOpExpr
+create index t1_idx16 on t1((f1 in (1,2,3)));
+-- T_NullIfExpr
+create index t1_idx17 on t1(nullif(f5,f6));
+-- T_RelabelType
+alter table t1 add f7 oid;
+create index t1_idx18 on t1((f7::int4));
+-- T_CoerceViaIO
+alter table t1 add f8 json;
+create index t1_idx19 on t1((f8::jsonb));
+-- T_ArrayCoerceExpr
+alter table t1 add f9 float[];
+create index t1_idx20 on t1((f9::int[]));
+-- T_PrefixKey
+create index t1_idx21 on t1(f6(5));
+
+\d+ t1
+select * from t1;
+
+alter table t1 add f10 int primary key auto_increment after f4,
+ add f11 int generated always as (f1 + f2) stored after f1,
+ add f12 date default '2023-01-05' first,
+ add f13 int not null default 13 first;
+
+\d+ t1
+select * from t1;
+
+-- test modify column ... first | after column in astore table
+-- ASTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2;
+\d+ t1
+select * from t1;
+alter table t1 modify
+
+-- 1 primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+alter table t1 drop f1;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored);
+insert into t1 values(1,2,3),(11,22,33);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+select * from t1;
+alter table t1 drop f1;
+\d+ t1
+select * from t1;
+
+-- 4 auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+\d+ t1
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+select * from t1;
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3);
+select f1 from t1;
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+alter table t1 modify f2 varchar(20) after f3;
+\d+ t1
+
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+alter table t1 modify f2 varchar(20) after f3;
+\d+ t1
+select * from t1;
+
+-- 6 check constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 7 foreign key
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+alter table t1 modify f2 int first;
+\d+ t1
+alter table t1 modify f2 int after f3;
+\d+ t1
+
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key);
+insert into t_pri1 values(1,1),(2,2);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 modify f2 int first;
+\d+ t1
+select * from t1;
+alter table t1 modify f2 int after f3;
+\d+ t1
+select * from t1;
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int, primary key (f1, f2))
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+-- modify operation before add
+alter table t1 add f4 int after f2, modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int, primary key (f1, f2))
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3);
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 modify f1 int after f2;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 add f4 int after f2, modify f1 int after f2;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday))
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null, primary key(id, birthday))
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+select * from range_range;
+
+-- USTORE table
+-- common scenatios
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4, modify f5 bool after f2;
+\d+ t1
+select * from t1;
+alter table t1 modify
+
+-- 1 primary key
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 2 unique index
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int unique, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 3 default and generated column
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int default 1, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with(storage_type = ustore);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+alter table t1 drop f1;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int default 2, f3 int default 3, f4 int generated always as (f1 + f2) stored) with(storage_type = ustore);
+insert into t1 values(1,2,3),(11,22,33);
+alter table t1 modify f4 int after f2, modify f1 int after f3, modify f3 int first;
+\d+ t1
+select * from t1;
+alter table t1 drop f1;
+\d+ t1
+select * from t1;
+
+-- 4 auto_increment
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+\d+ t1
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1(f2, f3, f4, f5) values('a', '2022-11-08 19:56:10.158564', x'41', true), ('b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+select * from t1;
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+insert into t1(f3, f2, f4, f5, f1) values('2022-11-10 19:56:10.158564', 'c', x'43', false, 3);
+select f1 from t1;
+
+-- 5 NULL and NOT NULL
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+alter table t1 modify f2 varchar(20) after f3;
+\d+ t1
+
+drop table if exists t1 cascade;
+alter table t1(f1 int null, f2 varchar(20) not null, f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+alter table t1 modify f2 varchar(20) after f3;
+\d+ t1
+select * from t1;
+
+-- 6 check constraint
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+alter table t1 modify f1 int after f3;
+\d+ t1
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+
+drop table if exists t1 cascade;
+create table t1(f1 int check(f1 = 1), f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with(storage_type = ustore);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+alter table t1 modify f3 timestamp first, modify f1 int after f4;
+\d+ t1
+select * from t1;
+alter table t1 modify f1 int after f3;
+\d+ t1
+select * from t1;
+alter table t1 drop f1, modify f5 bool first;
+\d+ t1
+select * from t1;
+
+-- 7 foreign key
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key) with(storage_type = ustore);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with(storage_type = ustore);
+alter table t1 modify f2 int first;
+\d+ t1
+alter table t1 modify f2 int after f3;
+\d+ t1
+
+drop table if exists t_pri1 cascade;
+create table t_pri1(f1 int, f2 int primary key) with(storage_type = ustore);
+insert into t_pri1 values(1,1),(2,2);
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int references t_pri1(f2), f3 bool) with(storage_type = ustore);
+insert into t1 values(1, 1, true), (2, 2, false);
+alter table t1 modify f2 int first;
+\d+ t1
+select * from t1;
+alter table t1 modify f2 int after f3;
+\d+ t1
+select * from t1;
+
+-- partition table
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with(storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+alter table t1 modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1') order by relname;
+
+-- modify operation before add
+alter table t1 add f4 int after f2, modify f1 int after f2;
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+
+drop table if exists t1 cascade;
+create table t1
+(f1 int, f2 int, f3 int) with(storage_type = ustore)
+partition by range(f1, f2)
+(
+ partition t1_p0 values less than (10, 0),
+ partition t1_p1 values less than (20, 0),
+ partition t1_p2 values less than (30, 0)
+);
+insert into t1 values(9, -1, 1), (19, -1, 2), (29, -1, 3);
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 int first;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 modify f1 int after f2;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+alter table t1 add f4 int after f2, modify f1 int after f2;
+\d+ t1
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='t1');
+select * from t1 partition (t1_p0);
+select * from t1 partition (t1_p1);
+select * from t1 partition (t1_p2);
+
+-- subpartition table
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with(storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null) with(storage_type = ustore)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+select * from range_range;
+
+-- orientation = column not support
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool) with (orientation = column);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+-- error
+alter table t1 modify f1 int after f3;
+-- error
+alter table t1 modify f3 timestamp first;
+
+-- pg_constraint test
+set enable_default_ustore_table = on;
+drop table if exists t_pri cascade;
+drop table if exists t1 cascade;
+create table t_pri(f1 int, f2 int, f3 int, primary key(f2, f3));
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ foreign key(f1, f2) references t_pri(f2, f3),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10),
+ unique(f4, f5) include(f6, f7)
+);
+\d+ t_pri
+\d+ t1
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+alter table t_pri modify f3 int first;
+alter table t1 modify f2 int first, modify f4 int after f1, modify f5 int after f7;
+\d+ t_pri
+\d+ t1
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't_pri') order by conname;
+select conname, contype, conkey, confkey, conbin, consrc, conincluding from pg_constraint
+ where conrelid = (select oid from pg_class where relname = 't1') order by conname;
+
+set enable_default_ustore_table = off;
+
+-- pg_index test
+drop table if exists t1 cascade;
+create table t1
+(
+ f1 int, f2 int, f3 varchar(20), f4 int, f5 int, f6 int, f7 int,
+ primary key(f1, f2),
+ unique((lower(f3)), (abs(f4))),
+ check(f5 = 10)
+);
+create unique index partial_t1_idx on t1(f5, abs(f6)) where f5 + f6 - abs(f7) > 0;
+
+\d+ t1
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1');
+
+alter table t1 modify f1 int after f2, modify f4 int after f6, modify f5 int first;
+\d+ t1
+select indkey, indexprs, indpred from pg_index where indrelid = (select oid from pg_class where relname = 't1');
+
+-- pg_attribute test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+alter table t1 modify f3 int first, modify f1 int after f2;
+\d+ t1
+select attname, attnum, atthasdef, attisdropped from pg_attribute where attrelid = (select oid from pg_class where relname = 't1') and attnum > 0 order by attnum;
+
+-- pg_attrdef test
+drop table if exists t1 cascade;
+create table t1(f1 int primary key auto_increment, f2 int, f3 int default 3, f4 int generated always as (f2 + f3) stored);
+\d+ t1
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+alter table t1 modify f3 int first, modify f1 int after f4, modify f4 int first;
+\d+ t1
+select adnum, adsrc, adgencol from pg_attrdef where adrelid = (select oid from pg_class where relname = 't1') order by adnum;
+
+-- pg_depend test
+drop table if exists t1 cascade;
+create table t1(f1 int default 10, f2 int primary key, f3 int generated always as (f1 + f2) stored, f4 int, unique ((abs(f4))));
+\d+ t1
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+
+alter table t1 modify f4 int first, modify f3 int after f1, modify f1 int after f2;
+\d+ t1
+select classid, objsubid, refclassid, refobjsubid, deptype from pg_depend
+ where refobjid = (select oid from pg_class where relname='t1') or objid = (select oid from pg_class where relname='t1') order by 1, 2, 3, 4, 5;
+
+-- pg_partition test
+drop table if exists range_range cascade;
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+insert into range_range values(198,'boy','2010-02-15'),(33,'boy','2003-08-11'),(78,'girl','2014-06-24');
+insert into range_range values(233,'girl','2010-01-01'),(360,'boy','2007-05-14'),(146,'girl','2005-03-08');
+insert into range_range values(111,'girl','2013-11-19'),(15,'girl','2009-01-12'),(156,'boy','2011-05-21');
+
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify gender varchar after birthday;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+alter table range_range modify birthday date first, modify id int after gender;
+\d+ range_range
+select relname, parttype, partkey from pg_partition where parentid=(select oid from pg_class where relname='range_range') order by relname;
+
+
+-- pg_rewrite test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 int);
+insert into t1 values(1, 2, 3, 4), (11, 22, 33, 44);
+create view t1_view1 as select * from t1;
+create view t1_view2 as select f1, f4 from t1;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+alter table t1 modify f2 int first, modify f1 int after f4, add f5 int after f4;
+\d+ t1_view1
+\d+ t1_view2
+\d+ t1
+select pg_get_viewdef('t1_view1');
+select pg_get_viewdef('t1_view2');
+select * from t1_view1;
+select * from t1_view2;
+select * from t1;
+
+-- pg_trigger test
+drop table if exists t1 cascade;
+create table t1(f1 boolean not null, f2 text, f3 int, f4 date);
+alter table t1 add primary key(f1);
+create or replace function dummy_update_func() returns trigger as $$
+begin
+ raise notice 'dummy_update_func(%) called: action = %, oid = %, new = %', TG_ARGV[0], TG_OP, OLD, NEW;
+ return new;
+end;
+$$ language plpgsql;
+
+drop trigger if exists f1_trig_update on t1;
+drop trigger if exists f1_trig_insert on t1;
+
+create trigger f1_trig_update after update of f1 on t1 for each row
+ when (not old.f1 and new.f1) execute procedure dummy_update_func('update');
+create trigger f1_trig_insert after insert on t1 for each row
+ when (not new.f1) execute procedure dummy_update_func('insert');
+
+\d+ t1
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+alter table t1 modify f3 int first, modify f1 boolean after f4;
+\d+ t1
+select tgname, tgattr, tgqual from pg_trigger where tgrelid = (select oid from pg_class where relname='t1') order by tgname;
+
+-- pg_rlspolicy test
+drop table if exists t1 cascade;
+drop role if exists test_rlspolicy3;
+create role test_rlspolicy3 nologin password 'Gauss_234';
+create table t1 (f1 int, f2 int, f3 text) partition by range (f1)
+(
+ partition t1_p0 values less than(10),
+ partition t1_p1 values less than(50),
+ partition t1_p2 values less than(100),
+ partition t1_p3 values less than(MAXVALUE)
+);
+
+INSERT INTO t1 VALUES (generate_series(1, 150) % 24, generate_series(1, 150), 'huawei');
+grant select on t1 to public;
+
+create row level security policy t1_rls1 on t1 as permissive to public using (f2 <= 20);
+create row level security policy t1_rls2 on t1 as restrictive to test_rlspolicy3 using (f1 < 30);
+
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+
+alter table t1 modify f2 int first, modify f1 int after f3;
+
+\d+ t1
+select * from t1 limit 10;
+select polname, polqual from pg_rlspolicy where polrelid = (select oid from pg_class where relname='t1');
+
+
+-- expression test
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, f4 bool, f5 text, f6 text);
+insert into t1 values(1, 2, 3, true, 'nanjin', 'huawei');
+-- T_FuncExpr
+create index t1_idx1 on t1(abs(f1), f2);
+-- T_OpExpr
+create index t1_idx2 on t1((f1 + f2), (f1 - f3));
+-- T_BooleanTest
+create index t1_idx3 on t1((f4 is true));
+-- T_CaseExpr and T_CaseWhen
+create index t1_idx4 on t1((case f1 when f2 then 'yes' when f3 then 'no' else 'unknow' end));
+-- T_ArrayExpr
+create index t1_idx5 on t1((array[f1, f2, f3]));
+-- T_TypeCast
+create index t1_idx6 on t1(((f1 + f2 + 1) :: text));
+-- T_BoolExpr
+create index t1_idx7 on t1((f1 and f2), (f2 or f3));
+-- T_ArrayRef
+create index t1_idx8 on t1((f1 = (array[f1, f2, 3])[1]));
+-- T_ScalarArrayOpExpr
+create index t1_idx9 on t1((f1 = ANY(ARRAY[f2, 1, f1 + 10])));
+-- T_RowCompareExpr
+create index t1_idx10 on t1((row(f1, f5) < row(f2, f6)));
+-- T_MinMaxExpr
+create index t1_idx11 on t1(greatest(f1, f2, f3), least(f1, f2, f3));
+-- T_RowExpr
+drop table if exists mytable cascade;
+create table mytable(f1 int, f2 int, f3 text);
+create function getf1(mytable) returns int as 'select $1.f1' language sql;
+create index t1_idx12 on t1(getf1(row(f1, 2, 'a')));
+-- T_CoalesceExpr
+create index t1_idx13 on t1(nvl(f1, f2));
+-- T_NullTest
+create index t1_idx14 on t1((f1 is null));
+-- T_ScalarArrayOpExpr
+create index t1_idx16 on t1((f1 in (1,2,3)));
+-- T_NullIfExpr
+create index t1_idx17 on t1(nullif(f5,f6));
+-- T_RelabelType
+alter table t1 add f7 oid;
+create index t1_idx18 on t1((f7::int4));
+-- T_CoerceViaIO
+alter table t1 add f8 json;
+create index t1_idx19 on t1((f8::jsonb));
+-- T_ArrayCoerceExpr
+alter table t1 add f9 float[];
+create index t1_idx20 on t1((f9::int[]));
+
+\d+ t1
+select * from t1;
+
+alter table t1 modify f8 json first, modify f2 int after f6, modify f7 oid after f3;
+
+\d+ t1
+select * from t1;
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int default 3, add f4 int default 4 after f3, add f5 int default 5, add f6 int default 6 after f3;
+select * from t1;
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int default 3, add f4 int default 4 after f1, add f5 int default 5, add f6 int default 6 after f5;
+select * from t1;
+
+drop table if exists t1;
+create table t1(f1 int, f2 int);
+insert into t1 values(1,2);
+alter table t1 add f3 int, add f4 int after f3, add f5 int, add f6 int first;
+select * from t1;
+
+drop table if exists t1;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+
+alter table t1 drop f5,
+ add f6 int default 6 , add f7 int first, add f8 int default 8 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f1 text, modify f2 text after f4;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int, primary key(f1, f3));
+insert into t1 values(1, 2, 3), (11, 22, 33);
+\d+ t1
+select * from t1;
+alter table t1 modify f3 int first, modify f1 int after f2;
+\d+ t1
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 int, f3 int);
+insert into t1 values(1, 2, 3), (11, 12, 13), (21, 22, 23);
+select * from t1;
+alter table t1 add f4 int generated always as (f1 + 100) stored after f1, add f5 int generated always as (f2 * 10) stored first;
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+
+\d+ t1
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 int, primary key(f1, f3));
+insert into t1 values(1, 'a', 1), (2, 'b', 2);
+\d+ t1
+select * from t1;
+
+alter table t1 modify f1 text after f3, add f10 int default 10 after f2;
+\d+ t1
+select * from t1;
+
+-- unlogged table
+drop table if exists t1 cascade;
+create unlogged table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+\d+ t1
+select * from t1;
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+
+\d+ t1
+select * from t1;
+
+-- temp table
+drop table if exists t1 cascade;
+create temp table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool, f6 int generated always as (f1 * 10) stored, primary key(f1, f2));
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+
+alter table t1 drop f4,
+ add f7 int default 7 , add f8 int first, add f9 int default 9 after f3,
+ modify f3 timestamp first, modify f6 int after f2, modify f5 int, modify f2 text after f5,
+ add f10 timestamp generated always as (f3) stored after f3,
+ add f11 int generated always as (f1 * 100) stored first;
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan'));
+insert into t1 values(1, 'shanghai,beijing'), (2, 'wuhan');
+\d+ t1
+select * from t1;
+alter table t1 add f3 int default 3 first, add f4 int default 4 after f3,
+ add f5 SET('beijing','shanghai','nanjing','wuhan') default 'nanjing' first;
+\d+ t1
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 SET('beijing','shanghai','nanjing','wuhan'));
+-- error
+alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') first;
+alter table t1 modify f2 SET('beijing','shanghai','nanjing','wuhan') after f1;
+
+drop table if exists t1 cascade;
+
+--DTS
+drop table if exists unit cascade;
+CREATE TABLE unit
+(
+ f11 INTEGER CHECK (f11 >=2),
+ f12 bool,
+ f13 text,
+ f14 varchar(20),
+ primary key (f11,f12)
+);
+
+insert into unit values(2,3,4,5);
+insert into unit values(3,4,5,6);
+ALTER TABLE unit ADD f1 int CHECK (f1 >=10) FIRST;
+insert into unit values (10,6,1,1,1);
+insert into unit values (11,7,1,1,1);
+ALTER TABLE unit ADD f2 int CHECK (f2 >=10) after f11;
+select * from unit;
+ALTER TABLE unit MODIFY f12 int FIRST;
+select * from unit;
+drop table if exists unit cascade;
+
+-- dts for set
+drop table if exists test_s1 cascade;
+create table test_s1 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1));
+insert into test_s1 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7);
+alter table test_s1 add f1 text after c1;
+alter table test_s1 modify c2 int first;
+select * from test_s1;
+drop table if exists test_s1 cascade;
+
+drop table if exists test_s2 cascade;
+create table test_s2 (c1 int,c2 SET('aaa','bbb','ccc'), c3 bool, primary key(c1));
+insert into test_s2 values(1,2,1), (2,'aaa',3), (3,4,4), (4,5,5), (5,1,6), (6,3,7);
+alter table test_s2 add f1 text check(f1 >= 2) after c1;
+alter table test_s2 add f2 SET('w','ww','www','wwww') first;
+alter table test_s2 modify f2 text after c1;
+alter table test_s2 modify c2 int first;
+select * from test_s2;
+drop table if exists test_s2 cascade;
+
+drop table if exists t1 cascade;
+create table t1(f1 set('aaa','bbb','ccc'), f2 set('1','2','3'), f3 set('beijing','shannghai','nanjing'),
+ f4 set('aaa','bbb','ccc') generated always as(f1+f2+f3) stored,
+ f5 set('1','2','3') generated always as(f1+f2+f3) stored,
+ f6 set('beijing','shannghai','nanjing') generated always as(f1+f2+f3) stored);
+\d+ t1
+alter table t1 modify f1 int after f6;
+\d+ t1
+alter table t1 drop f1;
+\d+ t1
+drop table if exists t1 cascade;
+
+drop table t1 cascade;
+create table t1(f1 int, f2 text, f3 int, f4 bool, f5 int generated always as (f1 + f3) stored);
+insert into t1 values(1, 'aaa', 3, true);
+insert into t1 values(11, 'bbb', 33, false);
+insert into t1 values(111, 'ccc', 333, true);
+insert into t1 values(1111, 'ddd', 3333, true);
+
+create view t1_view1 as select * from t1;
+select * from t1_view1;
+alter table t1 modify f1 int after f2, modify f3 int first;
+drop view t1_view1;
+create view t1_view1 as select * from t1;
+alter table t1 modify f1 int after f2, modify f3 int first;
+drop table t1 cascade;
+
+create table t1(f1 int, f2 text, f3 int, f4 bigint, f5 int generated always as (f1 + f3) stored);
+insert into t1 values(1, 'aaa', 3, 1);
+insert into t1 values(11, 'bbb', 33, 2);
+insert into t1 values(111, 'ccc', 333, 3);
+insert into t1 values(1111, 'ddd', 3333, 4);
+
+create view t1_view1 as select * from t1;
+select * from t1_view1;
+alter table t1 add f6 int first, add f7 int after f4, modify f1 int after f2, modify f3 int first;
+select * from t1_view1;
+drop view t1_view1;
+
+create view t1_view2 as select f1, f3, f5 from t1 where f2='aaa';
+select * from t1_view2;
+alter table t1 add f8 int first, add f9 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view2;
+drop view t1_view2;
+
+create view t1_view3 as select * from (select f1+f3, f5 from t1);
+select * from t1_view3;
+alter table t1 add f10 int first, add f11 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view3;
+drop view t1_view3;
+
+create view t1_view4 as select * from (select abs(f1+f3) as col1, abs(f5) as col2 from t1);
+select * from t1_view4;
+alter table t1 add f12 int first, add f13 int after f4, modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view4;
+drop view t1_view4;
+
+create view t1_view5 as select * from (select * from t1);
+select * from t1_view5;
+alter table t1 add f14 int first, add f15 int after f4, modify f1 int after f2, modify f3 int first;
+select * from t1_view5;
+drop view t1_view5;
+
+create view t1_view6 as select f1, f3, f5 from t1 where f2='aaa';
+select * from t1_view6;
+alter table t1 modify f1 int after f2, modify f3 int first, modify f2 varchar(20) first;
+select * from t1_view6;
+drop view t1_view6;
+drop table t1 cascade;
+
+-- dts for add
+drop table if exists test_d;
+create table test_d (f2 int primary key, f3 bool, f5 text);
+insert into test_d values(1,2,3), (2,3,4), (3,4,5);
+select * from test_d;
+alter table test_d add f1 int default 1,add f11 text check (f11 >=2) first;
+select * from test_d;
+
+drop table if exists test_d;
+create table test_d (f2 int primary key, f3 bool, f5 text);
+insert into test_d values(1,2,3), (2,3,4), (3,4,5);
+select * from test_d;
+alter table test_d add f1 int default 1;
+alter table test_d add f11 text check (f11 >=2) first;
+select * from test_d;
+drop table if exists test_d;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '777' first,
+ add f8 int default 8, add f9 int primary key auto_increment after f6;
+select * from t1;
+
+drop table if exists t1 cascade;
+create table t1(f1 int, f2 varchar(20), f3 timestamp, f4 bit(8), f5 bool);
+insert into t1 values(1, 'a', '2022-11-08 19:56:10.158564', x'41', true), (2, 'b', '2022-11-09 19:56:10.158564', x'42', false);
+select * from t1;
+alter table t1 add f6 int generated always as (f1 * 10) stored, add f7 text default '7' first,
+ add f8 int default 8, add f9 int primary key auto_increment after f1,
+ add f10 bool default true, add f11 timestamp after f2,
+ add f12 text after f3, add f14 int default '14', add f15 int default 15 check(f15 = 15) after f9;
+select * from t1;
+drop table if exists t1 cascade;
+
+\c postgres
+drop database test_first_after_B;
\ No newline at end of file
diff --git a/src/test/regress/sql/alter_table_modify.sql b/src/test/regress/sql/alter_table_modify.sql
new file mode 100644
index 000000000..b225a3540
--- /dev/null
+++ b/src/test/regress/sql/alter_table_modify.sql
@@ -0,0 +1,1306 @@
+create database atbdb WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb
+CREATE SCHEMA atbdb_schema;
+SET CURRENT_SCHEMA TO atbdb_schema;
+
+ALTER TABLE pg_catalog.pg_class MODIFY COLUMN relname int; -- ERROR
+-- cstore not supported
+CREATE TABLE test_at_modify_cstore(
+ a int,
+ b int NOT NULL
+) WITH (ORIENTATION=column, COMPRESSION=high, COMPRESSLEVEL=2);
+ALTER TABLE test_at_modify_cstore MODIFY COLUMN b int PRIMARY KEY;
+DROP TABLE test_at_modify_cstore;
+
+-- test modify column syntax
+CREATE TABLE test_at_modify_syntax(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify_syntax MODIFY b int INVISIBLE; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b int COMMENT 'string'; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b int CHECK (b < 100) NOT ENFORCED; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b int GENERATED ALWAYS AS (a+1) VIRTUAL; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b int KEY; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY COLUMN xmax int; -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b int encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC); -- ERROR
+ALTER TABLE test_at_modify_syntax MODIFY b varchar(8); -- alter column type only
+\d+ test_at_modify_syntax;
+set b_format_behavior_compat_options = 'enable_modify_column';
+show b_format_behavior_compat_options;
+ALTER TABLE test_at_modify_syntax MODIFY b varchar(8); -- modify column
+\d+ test_at_modify_syntax;
+set b_format_behavior_compat_options = '';
+show b_format_behavior_compat_options;
+ALTER TABLE test_at_modify_syntax MODIFY b int UNIQUE KEY;
+\d+ test_at_modify_syntax;
+ALTER TABLE test_at_modify_syntax MODIFY COLUMN b int PRIMARY KEY;
+\d+ test_at_modify_syntax;
+DROP TABLE test_at_modify_syntax;
+
+-- test modify column without data
+CREATE TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+select pg_get_tabledef('test_at_modify'::regclass);
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+
+-- test modify column datatype
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_modify_type'::regclass);
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+
+-- test modify column constraint
+CREATE TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_modify_constr;
+
+-- test modify column default
+CREATE TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+\d+ test_at_modify_default;
+DROP TABLE test_at_modify_default;
+
+-- test modify column depended by generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+\d+ test_at_modify_generated
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32) AFTER c;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a) STORED FIRST, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY b::int,a::int;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int AFTER b, MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(4,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) AFTER a, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(5,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a) STORED;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int FIRST, MODIFY COLUMN b int FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(6,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool, MODIFY COLUMN b varchar(32);
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, MODIFY COLUMN a int;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+
+-- error generated column reference generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,1);
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED, MODIFY c float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY COLUMN c float4, MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED;
+DROP TABLE test_at_modify_generated;
+
+-- test modify column AUTO_INCREMENT
+CREATE TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+DROP TABLE test_at_modify_autoinc;
+
+-- test generated column reference auto_increment column
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY; -- ERROR
+DROP TABLE test_at_modify_fa;
+
+-- test modify column depended by other objects
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_depend VALUES(1,1);
+INSERT INTO test_at_modify_depend VALUES(2,2);
+INSERT INTO test_at_modify_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_modify_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_modify_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NOT NULL;
+CALL test_at_modify_proc(2);
+DROP PROCEDURE test_at_modify_proc;
+
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_modify_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_modify_depend SET b = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_modify
+ AFTER UPDATE ON test_at_modify_depend
+ FOR EACH ROW WHEN ( NEW.b IS NULL AND OLD.b = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b int NULL DEFAULT 0;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+UPDATE test_at_modify_depend SET b = NULL WHERE a = 2;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+
+-- --TRIGGER contains but does not depend column
+CREATE TRIGGER tg_bf_test_at_modify
+ BEFORE INSERT ON test_at_modify_depend
+ FOR EACH ROW
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+INSERT INTO test_at_modify_depend VALUES (4, 4);
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+DROP PROCEDURE tg_bf_test_at_modify_func;
+
+-- --VIEW depends column
+CREATE VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL; -- ERROR
+ALTER TABLE test_at_modify_depend MODIFY b int NULL; -- ERROR
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+SELECT * FROM test_at_modify_view ORDER BY 1;
+DROP VIEW test_at_modify_view;
+CREATE VIEW test_at_modify_view AS SELECT a FROM test_at_modify_depend where b > 0;
+CREATE VIEW test_at_modify_view1 AS SELECT * FROM test_at_modify_view;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify_depend MODIFY b int NULL;
+DROP VIEW test_at_modify_view1;
+DROP VIEW test_at_modify_view;
+CREATE materialized VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bigint not null; -- ERROR
+DROP MATERIALIZED VIEW test_at_modify_view;
+
+-- --TABLE reference column.
+DELETE FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b INT PRIMARY KEY;
+CREATE TABLE test_at_modify_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b varchar(8);
+INSERT INTO test_at_modify_ref VALUES(0,0); -- ERROR
+INSERT INTO test_at_modify_depend VALUES(0,0);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+\d+ test_at_modify_ref
+DROP TABLE test_at_modify_ref;
+
+-- --TABLE reference self column.
+CREATE TABLE test_at_modify_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_ref(a) ON DELETE SET NULL
+);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_modify_ref VALUES('a','a');
+DROP TABLE test_at_modify_ref;
+
+-- --RULE reference column.
+CREATE RULE test_at_modify_rule AS ON INSERT TO test_at_modify_depend WHERE (b is null) DO INSTEAD UPDATE test_at_modify_depend SET b=0;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; -- ERROR
+DROP RULE test_at_modify_rule ON test_at_modify_depend;
+
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_modify_depend;
+CREATE ROLE at_modify_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_schema TO at_modify_role;
+GRANT SELECT ON test_at_modify_depend TO at_modify_role;
+ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(21,21);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(22,22);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TABLE test_at_modify_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_schema FROM at_modify_role;
+DROP ROLE at_modify_role;
+
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column syntax
+CREATE TABLE test_at_change_syntax(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change_syntax CHANGE b b1 int INVISIBLE; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int COMMENT 'string'; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int CHECK (b < 100) NOT ENFORCED; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int GENERATED ALWAYS AS (a+1) VIRTUAL; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int KEY; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC); -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b a int UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b a int NOT NULL,CHANGE COLUMN a b int; -- ERROR
+ALTER TABLE test_at_change_syntax CHANGE b b1 int UNIQUE KEY;
+\d+ test_at_change_syntax;
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b1 b int PRIMARY KEY;
+\d+ test_at_change_syntax;
+ALTER TABLE test_at_change_syntax CHANGE COLUMN b b123456789012345678901234567890123456789012345678901234567890123 int UNIQUE KEY;
+\d+ test_at_change_syntax;
+DROP TABLE test_at_change_syntax;
+
+-- test change column without data
+CREATE TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+select pg_get_tabledef('test_at_change'::regclass);
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+
+-- test change column datatype
+CREATE TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_change_type'::regclass);
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+
+-- test change column constraint
+CREATE TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_change_constr;
+
+-- test change column default
+CREATE TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+\d+ test_at_change_default;
+DROP TABLE test_at_change_default;
+
+-- test change column depended by generated column
+CREATE TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+\d+ test_at_change_generated
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32) AFTER c;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+CREATE TABLE test_at_change_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool, MODIFY COLUMN b varchar(32);
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, CHANGE COLUMN a1 a int;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+
+-- test change column AUTO_INCREMENT
+CREATE TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+INSERT INTO test_at_change_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 2,1;
+DROP TABLE test_at_change_autoinc;
+
+-- test change column depended by other objects
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_depend VALUES(1,1);
+INSERT INTO test_at_change_depend VALUES(2,2);
+INSERT INTO test_at_change_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_change_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NOT NULL;
+CALL test_at_change_proc(2); -- ERROR
+DROP PROCEDURE test_at_change_proc;
+
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_change_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_change
+ AFTER UPDATE ON test_at_change_depend
+ FOR EACH ROW WHEN ( NEW.b1 IS NULL AND OLD.b1 = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_change_func();
+ALTER TABLE test_at_change_depend CHANGE b1 b varchar(8) NULL DEFAULT '0';
+UPDATE test_at_change_depend SET b = NULL WHERE a = 2; -- ERROR
+DROP TRIGGER tg_bf_test_at_change ON test_at_change_depend;
+DROP FUNCTION tg_bf_test_at_change_func;
+
+-- --VIEW depends column
+CREATE VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b1 bigint NULL; -- ERROR
+ALTER TABLE test_at_change_depend CHANGE b b1 int NULL; -- ERROR
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+SELECT b FROM test_at_change_view ORDER BY 1;
+DROP VIEW test_at_change_view;
+CREATE VIEW test_at_change_view AS SELECT a FROM test_at_change_depend where b1 > 0;
+CREATE VIEW test_at_change_view1 AS SELECT * FROM test_at_change_view;
+ALTER TABLE test_at_change_depend CHANGE b1 b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change_depend CHANGE b1 b int NULL;
+SELECT * FROM test_at_change_view1 ORDER BY 1;
+DROP VIEW test_at_change_view1;
+DROP VIEW test_at_change_view;
+CREATE materialized VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 bigint not null; -- ERROR
+DROP MATERIALIZED VIEW test_at_change_view;
+
+-- --TABLE reference column.
+DELETE FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b INT PRIMARY KEY;
+CREATE TABLE test_at_change_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 varchar(8);
+INSERT INTO test_at_change_ref VALUES(0,0); -- ERROR
+INSERT INTO test_at_change_depend VALUES(0,0);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+\d+ test_at_change_ref
+DROP TABLE test_at_change_ref;
+
+-- --TABLE reference self column.
+CREATE TABLE test_at_change_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_ref(a) ON DELETE SET NULL
+);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+ALTER TABLE test_at_change_ref CHANGE COLUMN a a1 varchar(8);
+INSERT INTO test_at_change_ref VALUES('a','a');
+DROP TABLE test_at_change_ref;
+
+-- --RULE reference column.
+CREATE RULE test_at_change_rule AS ON INSERT TO test_at_change_depend WHERE (b1 is null) DO INSTEAD UPDATE test_at_change_depend SET b1=0;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b bigint not null; -- ERROR
+DROP RULE test_at_change_rule ON test_at_change_depend;
+
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_change_depend;
+CREATE ROLE at_change_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_schema TO at_change_role;
+GRANT SELECT ON test_at_change_depend TO at_change_role;
+ALTER TABLE test_at_change_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_role USING(b >= 20);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 int not null;
+INSERT INTO test_at_change_depend VALUES(21,21);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b2 bool not null;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b2 b3 int not null;
+INSERT INTO test_at_change_depend VALUES(22,22);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+DROP TABLE test_at_change_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_schema FROM at_change_role;
+DROP ROLE at_change_role;
+
+-- test alter command order
+CREATE TABLE test_at_pass(
+ a int,
+ b int
+);
+INSERT INTO test_at_pass VALUES(1,0);
+ALTER TABLE test_at_pass ADD COLUMN c int, DROP COLUMN c; -- ERROR
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, MODIFY COLUMN c bigint; -- ERROR
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, CHANGE COLUMN c c1 bigint; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, MODIFY COLUMN b float4; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, CHANGE COLUMN b b1 float4; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, CHANGE COLUMN b1 b2 bigint; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, DROP COLUMN b; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b1; -- ERROR
+ALTER TABLE test_at_pass MODIFY a bigint, MODIFY COLUMN a VARCHAR(8); -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b a bigint, CHANGE COLUMN a b VARCHAR(8); -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, ALTER COLUMN b SET DEFAULT 100;
+\d test_at_pass
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, ALTER COLUMN b1 SET DEFAULT 100;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b1 b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, DROP CONSTRAINT atpass_pk; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT atpass_pk PRIMARY KEY(a); -- ERROR
+DROP TABLE test_at_pass;
+
+-- test complex commands combined
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN a varchar(8), MODIFY COLUMN b int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+DROP TABLE test_at_complex;
+
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN b int AUTO_INCREMENT UNIQUE, MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+DROP TABLE test_at_complex;
+
+-- test modify partitioned table column without data
+CREATE TABLE pt_at_modify (a int, b int NOT NULL, PRIMARY KEY(b,a))
+PARTITION BY RANGE (a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (1000),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE)
+);
+ALTER TABLE pt_at_modify MODIFY a int8 DEFAULT 0; -- ERROR
+ALTER TABLE pt_at_modify MODIFY a int DEFAULT 0;
+ALTER TABLE pt_at_modify MODIFY a int GENERATED ALWAYS AS (b+1) STORED; -- ERROR
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 NULL;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 DEFAULT 0;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int AUTO_INCREMENT;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int2 UNIQUE;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int CHECK (b < 10000);
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 GENERATED ALWAYS AS (a+1) STORED;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b varchar(8) NOT NULL;
+\d+ pt_at_modify;
+select pg_get_tabledef('pt_at_modify'::regclass);
+INSERT INTO pt_at_modify VALUES(1,1);
+DROP TABLE pt_at_modify;
+
+-- test alter modify first after
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY c float4 FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c float4 GENERATED ALWAYS AS (b+100) STORED AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c bigint AUTO_INCREMENT PRIMARY KEY AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,NULL);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(11,12,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(21,22,DEFAULT);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED FIRST; -- ERROR
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (b+100) STORED AFTER a, MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ALTER TABLE test_at_modify_fa ADD COLUMN d bigint AUTO_INCREMENT PRIMARY KEY AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,33);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (a+100) STORED AFTER c;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST; -- ERROR
+DROP TABLE test_at_modify_fa;
+
+-- primary key should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11));
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int AFTER f13;
+\d test11
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+\d test11
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+drop table test11;
+-- primary keys should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11,f12));
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ALTER TABLE test11 MODIFY f11 int AFTER f13;
+\d test11
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+\d test11
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+drop table test11;
+-- primary keys in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (id, birthday);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (id);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING btree (gender);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN gender varchar AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary keys in multi range keys partition table should be not null after modify
+create table multi_keys_range(f1 int, f2 int, f3 int)
+partition by range(f1, f2)
+(
+ partition multi_keys_range_p0 values less than (10, 0),
+ partition multi_keys_range_p1 values less than (20, 0),
+ partition multi_keys_range_p2 values less than (30, 0)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_range modify f1 int after f3, ADD CONSTRAINT multi_keys_range_pkey PRIMARY KEY USING btree (f1,f2);
+\d+ multi_keys_range
+alter table multi_keys_range modify f2 int after f3;
+\d+ multi_keys_range
+drop table if exists multi_keys_range cascade;
+-- primary keys in multi list keys partition table should be not null after modify
+create table multi_keys_list(f1 int, f2 int, f3 int)
+partition by list(f1, f2)
+(
+ partition multi_keys_list_p0 values ((10, 0)),
+ partition multi_keys_list_p1 values ((20, 0)),
+ partition multi_keys_list_p2 values (DEFAULT)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_list modify f1 int after f3, ADD CONSTRAINT multi_keys_list_pkey PRIMARY KEY USING btree (f1,f2);
+\d+ multi_keys_list
+alter table multi_keys_list modify f2 int after f3;
+\d+ multi_keys_list
+drop table if exists multi_keys_list cascade;
+
+-- test moidfy/change VIEW depends column
+-- --modify
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN f0 int;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST; -- ERROR
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test modify view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1, DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- --change
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COLUMN f0 int; -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST; -- ERROR
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test CHANGE view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER c1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER f1, DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb;
+drop database if exists atbdb;
\ No newline at end of file
diff --git a/src/test/regress/sql/alter_table_modify_gtt.sql b/src/test/regress/sql/alter_table_modify_gtt.sql
new file mode 100644
index 000000000..fb9e0b098
--- /dev/null
+++ b/src/test/regress/sql/alter_table_modify_gtt.sql
@@ -0,0 +1,408 @@
+create database atbdb_gtt WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_gtt
+CREATE SCHEMA atbdb_gtt_schema;
+SET CURRENT_SCHEMA TO atbdb_gtt_schema;
+
+-- test modify column without data
+CREATE GLOBAL TEMPORARY TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+select pg_get_tabledef('test_at_modify'::regclass);
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+
+-- test modify column datatype
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_modify_type'::regclass);
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+
+-- test modify column constraint
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_modify_constr;
+
+-- test modify column default
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+\d+ test_at_modify_default;
+DROP TABLE test_at_modify_default;
+
+-- test modify column depended by generated column
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+\d+ test_at_modify_generated
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+
+
+-- test modify column AUTO_INCREMENT
+CREATE GLOBAL TEMPORARY TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY;
+INSERT INTO test_at_modify_autoinc VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+DROP TABLE test_at_modify_autoinc;
+
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE GLOBAL TEMPORARY TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+select pg_get_tabledef('test_at_change'::regclass);
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+
+-- test change column datatype
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_change_type'::regclass);
+DROP TABLE test_at_change_type;
+CREATE GLOBAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+
+-- test change column constraint
+CREATE GLOBAL TEMPORARY TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_change_constr;
+
+-- test change column default
+CREATE GLOBAL TEMPORARY TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+\d+ test_at_change_default;
+DROP TABLE test_at_change_default;
+
+-- test change column depended by generated column
+CREATE GLOBAL TEMPORARY TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+\d+ test_at_change_generated
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+
+-- test change column AUTO_INCREMENT
+CREATE GLOBAL TEMPORARY TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY;
+INSERT INTO test_at_change_autoinc VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+DROP TABLE test_at_change_autoinc;
+
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_gtt_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_gtt;
+drop database if exists atbdb_gtt;
\ No newline at end of file
diff --git a/src/test/regress/sql/alter_table_modify_ltt.sql b/src/test/regress/sql/alter_table_modify_ltt.sql
new file mode 100644
index 000000000..43076c436
--- /dev/null
+++ b/src/test/regress/sql/alter_table_modify_ltt.sql
@@ -0,0 +1,359 @@
+create database atbdb_ltt WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_ltt
+CREATE SCHEMA atbdb_ltt_schema;
+SET CURRENT_SCHEMA TO atbdb_ltt_schema;
+
+-- test modify column without data
+CREATE LOCAL TEMPORARY TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY INITIALLY DEFERRED;
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE DEFERRABLE;
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+
+-- test modify column datatype
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE LOCAL TEMPORARY TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+
+-- test modify column constraint
+CREATE LOCAL TEMPORARY TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_modify_constr;
+
+-- test modify column default
+CREATE LOCAL TEMPORARY TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+DROP TABLE test_at_modify_default;
+
+-- test modify column depended by generated column
+CREATE LOCAL TEMPORARY TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+
+
+-- test modify column AUTO_INCREMENT
+CREATE LOCAL TEMPORARY TABLE test_at_modify_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY;
+INSERT INTO test_at_modify_autoinc VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+DROP TABLE test_at_modify_autoinc;
+
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE LOCAL TEMPORARY TABLE test_at_change(
+ a int,
+ b int NOT NULL
+);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+
+-- test change column datatype
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b text
+);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+DROP TABLE test_at_change_type;
+CREATE LOCAL TEMPORARY TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+
+-- test change column constraint
+CREATE LOCAL TEMPORARY TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_change_constr;
+
+-- test change column default
+CREATE LOCAL TEMPORARY TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+DROP TABLE test_at_change_default;
+
+-- test change column depended by generated column
+CREATE LOCAL TEMPORARY TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+
+-- test change column AUTO_INCREMENT
+CREATE LOCAL TEMPORARY TABLE test_at_change_autoinc(
+ a int,
+ b int
+);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY;
+INSERT INTO test_at_change_autoinc VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+DROP TABLE test_at_change_autoinc;
+
+-- TEMPORARY view
+CREATE LOCAL TEMPORARY TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique);
+INSERT INTO test_at_modify_view_column VALUES(1, '1', '1', 1);
+CREATE TEMPORARY VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_ltt_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_ltt;
+drop database if exists atbdb_ltt;
\ No newline at end of file
diff --git a/src/test/regress/sql/alter_table_modify_ustore.sql b/src/test/regress/sql/alter_table_modify_ustore.sql
new file mode 100644
index 000000000..e7f85790e
--- /dev/null
+++ b/src/test/regress/sql/alter_table_modify_ustore.sql
@@ -0,0 +1,1246 @@
+create database atbdb_ustore WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c atbdb_ustore
+CREATE SCHEMA atbdb_ustore_schema;
+SET CURRENT_SCHEMA TO atbdb_ustore_schema;
+
+-- test modify column without data
+CREATE TABLE test_at_modify(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_modify MODIFY b varchar(8) NULL;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) DEFAULT '0';
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int AUTO_INCREMENT PRIMARY KEY;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) UNIQUE;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) CHECK (b < 'a');
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_modify;
+ALTER TABLE test_at_modify MODIFY b int NOT NULL;
+\d+ test_at_modify;
+select pg_get_tabledef('test_at_modify'::regclass);
+INSERT INTO test_at_modify VALUES(1,1);
+DROP TABLE test_at_modify;
+
+-- test modify column datatype
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(8);
+SELECT * FROM test_at_modify_type where b = '3';
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b RAW;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b serial NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,1);
+INSERT INTO test_at_modify_type VALUES(2,2);
+INSERT INTO test_at_modify_type VALUES(3,3);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int[]; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b int16;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b serial; -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DECIMAL(4,2);
+SELECT * FROM test_at_modify_type where b = 3;
+ALTER TABLE test_at_modify_type MODIFY COLUMN b BOOLEAN;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b text
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,'beijing');
+INSERT INTO test_at_modify_type VALUES(2,'shanghai');
+INSERT INTO test_at_modify_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_modify_type'::regclass);
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b varchar(32)
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_modify_type MODIFY COLUMN b varchar(10); -- ERROR
+ALTER TABLE test_at_modify_type MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+CREATE TABLE test_at_modify_type(
+ a int,
+ b int[] NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_type VALUES(1,ARRAY[1,1]);
+INSERT INTO test_at_modify_type VALUES(2,ARRAY[2,2]);
+INSERT INTO test_at_modify_type VALUES(3,ARRAY[3,3]);
+ALTER TABLE test_at_modify_type MODIFY COLUMN b float4[];
+SELECT * FROM test_at_modify_type ORDER BY 1,2;
+DROP TABLE test_at_modify_type;
+
+-- test modify column constraint
+CREATE TABLE test_at_modify_constr(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_constr VALUES(1,1);
+INSERT INTO test_at_modify_constr VALUES(2,2);
+INSERT INTO test_at_modify_constr VALUES(3,3);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_modify_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(4,NULL);
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_modify_constr WHERE b IS NULL;
+ALTER TABLE test_at_modify_constr MODIFY b int NULL PRIMARY KEY;
+INSERT INTO test_at_modify_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 3); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b < 5);
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_modify_constr VALUES(4,4);
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_modify_constr VALUES(0,'a');
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL;
+INSERT INTO test_at_modify_constr VALUES(5,5); -- ERROR
+SELECT b FROM test_at_modify_constr ORDER BY 1;
+select pg_get_tabledef('test_at_modify_constr'::regclass);
+ALTER TABLE test_at_modify_constr MODIFY b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_modify_constr;
+
+-- test modify column default
+CREATE TABLE test_at_modify_default(
+ a int,
+ b int DEFAULT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_default VALUES(1,1);
+INSERT INTO test_at_modify_default VALUES(2,2);
+INSERT INTO test_at_modify_default VALUES(3,3);
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b bigint DEFAULT NULL;
+\d+ test_at_modify_default;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) DEFAULT 'a';
+\d+ test_at_modify_default;
+INSERT INTO test_at_modify_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_default MODIFY b int DEFAULT 4;
+INSERT INTO test_at_modify_default VALUES(4,DEFAULT);
+SELECT b FROM test_at_modify_default ORDER BY 1;
+ALTER TABLE test_at_modify_default MODIFY b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+ALTER TABLE test_at_modify_default MODIFY a varchar(8) DEFAULT 'a';
+INSERT INTO test_at_modify_default VALUES(DEFAULT,DEFAULT);
+SELECT a,b FROM test_at_modify_default ORDER BY 1,2;
+\d+ test_at_modify_default;
+DROP TABLE test_at_modify_default;
+
+-- test modify column depended by generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b DATE;
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+\d+ test_at_modify_generated
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(32);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(2,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a) STORED FIRST, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(3,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY b::int,a::int;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int AFTER b, MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(4,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b varchar(8) AFTER a, MODIFY COLUMN a varchar(8) AFTER b;
+INSERT INTO test_at_modify_generated(a,b) VALUES(5,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a) STORED;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int FIRST, MODIFY COLUMN b int FIRST;
+INSERT INTO test_at_modify_generated(a,b) VALUES(6,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool, MODIFY COLUMN b varchar(32);
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, MODIFY COLUMN a int;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a bool;
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+ALTER TABLE test_at_modify_generated MODIFY COLUMN a int;
+INSERT INTO test_at_modify_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_modify_generated
+SELECT * FROM test_at_modify_generated ORDER BY 1,2;
+DROP TABLE test_at_modify_generated;
+
+-- error generated column reference generated column
+CREATE TABLE test_at_modify_generated(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_generated(a,b) VALUES(1,1);
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED, MODIFY c float4 GENERATED ALWAYS AS (a+1000) STORED; -- ERROR
+ALTER TABLE test_at_modify_generated MODIFY COLUMN c float4, MODIFY b float4 GENERATED ALWAYS AS (c+1000) STORED;
+DROP TABLE test_at_modify_generated;
+
+-- test modify column AUTO_INCREMENT
+CREATE TABLE test_at_modify_autoinc(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_autoinc VALUES(1,NULL);
+INSERT INTO test_at_modify_autoinc VALUES(2,0);
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(3,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+INSERT INTO test_at_modify_autoinc VALUES(4,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc MODIFY b int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+INSERT INTO test_at_modify_autoinc VALUES(5,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_modify_autoinc MODIFY b int2 AUTO_INCREMENT;
+INSERT INTO test_at_modify_autoinc VALUES(6,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, MODIFY b int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_modify_autoinc MODIFY COLUMN b int;
+ALTER TABLE test_at_modify_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_modify_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_modify_autoinc DROP COLUMN c , MODIFY b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+ALTER TABLE test_at_modify_autoinc MODIFY b float4; -- ALTER TYPE ONLY, KEEP AUTO_INCREMENT
+INSERT INTO test_at_modify_autoinc(a,b) VALUES(9,0);
+SELECT * FROM test_at_modify_autoinc ORDER BY 2,1;
+DROP TABLE test_at_modify_autoinc;
+
+-- test generated column reference auto_increment column
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c int AUTO_INCREMENT PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY; -- ERROR
+DROP TABLE test_at_modify_fa;
+
+-- test modify column depended by other objects
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_depend VALUES(1,1);
+INSERT INTO test_at_modify_depend VALUES(2,2);
+INSERT INTO test_at_modify_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_modify_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_modify_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NOT NULL;
+CALL test_at_modify_proc(2);
+DROP PROCEDURE test_at_modify_proc;
+
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_modify_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_modify_depend SET b = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_modify
+ AFTER UPDATE ON test_at_modify_depend
+ FOR EACH ROW WHEN ( NEW.b IS NULL AND OLD.b = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b int NULL DEFAULT 0;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+UPDATE test_at_modify_depend SET b = NULL WHERE a = 2;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+
+-- --TRIGGER contains but does not depend column
+CREATE TRIGGER tg_bf_test_at_modify
+ BEFORE INSERT ON test_at_modify_depend
+ FOR EACH ROW
+ EXECUTE PROCEDURE tg_bf_test_at_modify_func();
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+INSERT INTO test_at_modify_depend VALUES (4, 4);
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TRIGGER tg_bf_test_at_modify ON test_at_modify_depend;
+DROP PROCEDURE tg_bf_test_at_modify_func;
+
+-- --VIEW depends column
+CREATE VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL; -- ERROR
+ALTER TABLE test_at_modify_depend MODIFY b int NULL; -- ERROR
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+SELECT * FROM test_at_modify_view ORDER BY 1;
+DROP VIEW test_at_modify_view;
+CREATE VIEW test_at_modify_view AS SELECT a FROM test_at_modify_depend where b > 0;
+CREATE VIEW test_at_modify_view1 AS SELECT * FROM test_at_modify_view;
+ALTER TABLE test_at_modify_depend MODIFY b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_modify_depend MODIFY b varchar(8) NULL;
+ALTER TABLE test_at_modify_depend MODIFY b int NULL;
+DROP VIEW test_at_modify_view1;
+DROP VIEW test_at_modify_view;
+CREATE materialized VIEW test_at_modify_view AS SELECT b FROM test_at_modify_depend; --ERROR
+
+-- --TABLE reference column.
+DELETE FROM test_at_modify_depend;
+ALTER TABLE test_at_modify_depend MODIFY b INT PRIMARY KEY;
+CREATE TABLE test_at_modify_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_depend(b) ON DELETE SET NULL
+);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b varchar(8);
+INSERT INTO test_at_modify_ref VALUES(0,0); -- ERROR
+INSERT INTO test_at_modify_depend VALUES(0,0);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+\d+ test_at_modify_ref
+DROP TABLE test_at_modify_ref;
+
+-- --TABLE reference self column.
+CREATE TABLE test_at_modify_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_modify_ref(a) ON DELETE SET NULL
+);
+INSERT INTO test_at_modify_ref VALUES(0,0);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_modify_ref MODIFY COLUMN b varchar(8);
+ALTER TABLE test_at_modify_ref MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_modify_ref VALUES('a','a');
+DROP TABLE test_at_modify_ref;
+
+-- --RULE reference column.
+CREATE RULE test_at_modify_rule AS ON INSERT TO test_at_modify_depend WHERE (b is null) DO INSTEAD UPDATE test_at_modify_depend SET b=0;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; -- ERROR
+DROP RULE test_at_modify_rule ON test_at_modify_depend;
+
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_modify_depend;
+CREATE ROLE at_modify_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_modify_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role;
+GRANT SELECT ON test_at_modify_depend TO at_modify_role;
+ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20);
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(21,21);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null;
+ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null;
+INSERT INTO test_at_modify_depend VALUES(22,22);
+SET ROLE at_modify_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_modify_depend ORDER BY 1,2;
+DROP TABLE test_at_modify_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role;
+DROP ROLE at_modify_role;
+
+-- ------------------------------------------------------ test ALTER TABLE CHANGE
+-- test change column without data
+CREATE TABLE test_at_change(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) NULL;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) DEFAULT '0';
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) UNIQUE;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) CHECK (b1 < 'a');
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b varchar(8) COLLATE "POSIX";
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+\d+ test_at_change;
+ALTER TABLE test_at_change CHANGE b1 b int NOT NULL;
+\d+ test_at_change;
+select pg_get_tabledef('test_at_change'::regclass);
+INSERT INTO test_at_change VALUES(1,1);
+DROP TABLE test_at_change;
+
+-- test change column datatype
+CREATE TABLE test_at_change_type(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(8);
+SELECT * FROM test_at_change_type where b1 = '3';
+ALTER TABLE test_at_change_type CHANGE b1 b DATE; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b RAW;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b serial NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,1);
+INSERT INTO test_at_change_type VALUES(2,2);
+INSERT INTO test_at_change_type VALUES(3,3);
+ALTER TABLE test_at_change_type CHANGE b b1 int;
+ALTER TABLE test_at_change_type CHANGE b1 b serial; -- ERROR
+ALTER TABLE test_at_change_type CHANGE b1 b DECIMAL(4,2);
+SELECT * FROM test_at_change_type where b = 3;
+ALTER TABLE test_at_change_type CHANGE b b1 BOOLEAN;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b text
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,'beijing');
+INSERT INTO test_at_change_type VALUES(2,'shanghai');
+INSERT INTO test_at_change_type VALUES(3,'guangzhou');
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','wuhan'); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 SET('beijing','shanghai','nanjing','guangzhou');
+ALTER TABLE test_at_change_type CHANGE b1 b SET('beijing','shanghai','guangzhou','wuhan'); -- ERROR
+select pg_get_tabledef('test_at_change_type'::regclass);
+DROP TABLE test_at_change_type;
+CREATE TABLE test_at_change_type(
+ a int,
+ b varchar(32)
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_type VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_type VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_type VALUES(3,'2022-11-24 12:00:00');
+ALTER TABLE test_at_change_type CHANGE b b1 varchar(10); -- ERROR
+ALTER TABLE test_at_change_type CHANGE b b1 DATE;
+SELECT * FROM test_at_change_type ORDER BY 1,2;
+DROP TABLE test_at_change_type;
+
+-- test change column constraint
+CREATE TABLE test_at_change_constr(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_constr VALUES(1,1);
+INSERT INTO test_at_change_constr VALUES(2,2);
+INSERT INTO test_at_change_constr VALUES(3,3);
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) NOT NULL NULL; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) UNIQUE KEY NULL;
+INSERT INTO test_at_change_constr VALUES(3,3); -- ERROR
+INSERT INTO test_at_change_constr VALUES(4,NULL);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY; -- ERROR
+DELETE FROM test_at_change_constr WHERE b1 IS NULL;
+ALTER TABLE test_at_change_constr CHANGE b1 b int NULL PRIMARY KEY;
+INSERT INTO test_at_change_constr VALUES(4,NULL); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 3); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 varchar(8) CONSTRAINT t_at_m_check CHECK (b1 < 5);
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check CHECK (b = a); -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b1 b varchar(8) CONSTRAINT t_at_m_check_1 CHECK (b = a);
+INSERT INTO test_at_change_constr VALUES(4,4);
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+INSERT INTO test_at_change_constr VALUES(6,'a'); -- ERROR
+INSERT INTO test_at_change_constr VALUES(0,'a');
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL PRIMARY KEY; -- ERROR
+ALTER TABLE test_at_change_constr CHANGE b b1 int NOT NULL;
+INSERT INTO test_at_change_constr VALUES(5,5); -- ERROR
+SELECT b1 FROM test_at_change_constr ORDER BY 1;
+select pg_get_tabledef('test_at_change_constr'::regclass);
+ALTER TABLE test_at_change_constr CHANGE b1 b int NOT NULL REFERENCES test_at_ref (a); -- ERROR
+DROP TABLE test_at_change_constr;
+
+-- test change column default
+CREATE TABLE test_at_change_default(
+ a int,
+ b int DEFAULT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_default VALUES(1,1);
+INSERT INTO test_at_change_default VALUES(2,2);
+INSERT INTO test_at_change_default VALUES(3,3);
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT (a+1); -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 bigint DEFAULT NULL;
+\d+ test_at_change_default;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a' GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) DEFAULT 'a';
+\d+ test_at_change_default;
+INSERT INTO test_at_change_default VALUES(0,DEFAULT);
+SELECT b FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_default CHANGE b b1 int DEFAULT 4;
+INSERT INTO test_at_change_default VALUES(4,DEFAULT);
+SELECT b1 FROM test_at_change_default ORDER BY 1;
+ALTER TABLE test_at_change_default CHANGE b1 b varchar(8) GENERATED ALWAYS AS (a+1) STORED;
+SELECT a,b FROM test_at_change_default ORDER BY 1,2;
+ALTER TABLE test_at_change_default CHANGE a a1 varchar(8) DEFAULT 'a';
+INSERT INTO test_at_change_default VALUES(DEFAULT,DEFAULT);
+SELECT * FROM test_at_change_default ORDER BY 1,2;
+\d+ test_at_change_default;
+DROP TABLE test_at_change_default;
+
+-- test change column depended by generated column
+CREATE TABLE test_at_change_generated(
+ a int,
+ b varchar(32),
+ c varchar(32) GENERATED ALWAYS AS (b) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,'2022-11-22 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(2,'2022-11-23 12:00:00');
+INSERT INTO test_at_change_generated(a,b) VALUES(3,'2022-11-24 12:00:00');
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN b b1 DATE;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+\d+ test_at_change_generated
+ALTER TABLE test_at_change_generated CHANGE COLUMN b1 b varchar(32) AFTER c;
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+CREATE TABLE test_at_change_generated(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_generated(a,b) VALUES(-1,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(0,DEFAULT);
+INSERT INTO test_at_change_generated(a,b) VALUES(1,DEFAULT);
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool, MODIFY COLUMN b varchar(32);
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated MODIFY COLUMN b int GENERATED ALWAYS AS (a+1) STORED, CHANGE COLUMN a1 a int;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated MODIFY COLUMN b bool GENERATED ALWAYS AS (a) STORED;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a a1 bool;
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+ALTER TABLE test_at_change_generated CHANGE COLUMN a1 a int;
+INSERT INTO test_at_change_generated(a,b) VALUES(100,DEFAULT);
+\d test_at_change_generated
+SELECT * FROM test_at_change_generated ORDER BY 1,2;
+DROP TABLE test_at_change_generated;
+
+-- test change column AUTO_INCREMENT
+CREATE TABLE test_at_change_autoinc(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_autoinc VALUES(1,NULL);
+INSERT INTO test_at_change_autoinc VALUES(2,0);
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 DECIMAL(4,2) AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 serial AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int2 AUTO_INCREMENT NULL UNIQUE KEY;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(3,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int;
+INSERT INTO test_at_change_autoinc VALUES(4,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int AUTO_INCREMENT PRIMARY KEY, AUTO_INCREMENT=100;
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+INSERT INTO test_at_change_autoinc VALUES(5,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc AUTO_INCREMENT=1000;
+ALTER TABLE test_at_change_autoinc CHANGE b1 b int2 AUTO_INCREMENT;
+INSERT INTO test_at_change_autoinc VALUES(6,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE, CHANGE b b1 int2 AUTO_INCREMENT UNIQUE KEY; -- ERROR
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE; -- ERROR
+ALTER TABLE test_at_change_autoinc CHANGE b b1 int;
+ALTER TABLE test_at_change_autoinc ADD COLUMN c int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_change_autoinc VALUES(7,0,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 1,2;
+ALTER TABLE test_at_change_autoinc DROP COLUMN c , CHANGE b1 b int2 AUTO_INCREMENT UNIQUE KEY FIRST;
+INSERT INTO test_at_change_autoinc(a,b) VALUES(8,0);
+SELECT * FROM test_at_change_autoinc ORDER BY 2,1;
+DROP TABLE test_at_change_autoinc;
+
+-- test change column depended by other objects
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_depend VALUES(1,1);
+INSERT INTO test_at_change_depend VALUES(2,2);
+INSERT INTO test_at_change_depend VALUES(3,3);
+-- --PROCEDURE contains column
+CREATE OR REPLACE PROCEDURE test_at_change_proc(IN p_in int)
+ AS
+ BEGIN
+ INSERT INTO test_at_change_depend(a,b) VALUES(p_in, p_in);
+ END;
+/
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NOT NULL;
+CALL test_at_change_proc(2); -- ERROR
+DROP PROCEDURE test_at_change_proc;
+
+-- --TRIGGER contains and depends column
+CREATE OR REPLACE FUNCTION tg_bf_test_at_change_func() RETURNS TRIGGER AS
+$$
+ DECLARE
+ BEGIN
+ UPDATE test_at_change_depend SET b1 = NULL WHERE a < NEW.a;
+ RETURN NEW;
+ END
+$$ LANGUAGE PLPGSQL;
+CREATE TRIGGER tg_bf_test_at_change
+ AFTER UPDATE ON test_at_change_depend
+ FOR EACH ROW WHEN ( NEW.b1 IS NULL AND OLD.b1 = OLD.a)
+ EXECUTE PROCEDURE tg_bf_test_at_change_func();
+ALTER TABLE test_at_change_depend CHANGE b1 b varchar(8) NULL DEFAULT '0';
+UPDATE test_at_change_depend SET b = NULL WHERE a = 2; -- ERROR
+DROP TRIGGER tg_bf_test_at_change ON test_at_change_depend;
+DROP FUNCTION tg_bf_test_at_change_func;
+
+-- --VIEW depends column
+CREATE VIEW test_at_change_view AS SELECT b FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b1 bigint NULL; -- ERROR
+ALTER TABLE test_at_change_depend CHANGE b b1 int NULL; -- ERROR
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+SELECT b FROM test_at_change_view ORDER BY 1;
+DROP VIEW test_at_change_view;
+CREATE VIEW test_at_change_view AS SELECT a FROM test_at_change_depend where b1 > 0;
+CREATE VIEW test_at_change_view1 AS SELECT * FROM test_at_change_view;
+ALTER TABLE test_at_change_depend CHANGE b1 b bigint NULL GENERATED ALWAYS AS (a+1);
+ALTER TABLE test_at_change_depend CHANGE b b1 varchar(8) NULL;
+ALTER TABLE test_at_change_depend CHANGE b1 b int NULL;
+SELECT * FROM test_at_change_view1 ORDER BY 1;
+DROP VIEW test_at_change_view1;
+DROP VIEW test_at_change_view;
+CREATE materialized VIEW test_at_change_view AS SELECT b FROM test_at_change_depend; -- ERROR
+
+-- --TABLE reference column.
+DELETE FROM test_at_change_depend;
+ALTER TABLE test_at_change_depend CHANGE b b INT PRIMARY KEY;
+CREATE TABLE test_at_change_ref(
+ a int,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_depend(b) ON DELETE SET NULL
+) WITH(STORAGE_TYPE=USTORE);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 varchar(8);
+INSERT INTO test_at_change_ref VALUES(0,0); -- ERROR
+INSERT INTO test_at_change_depend VALUES(0,0);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+\d+ test_at_change_ref
+DROP TABLE test_at_change_ref;
+
+-- --TABLE reference self column.
+CREATE TABLE test_at_change_ref(
+ a int PRIMARY KEY,
+ b int,
+ FOREIGN KEY (b) REFERENCES test_at_change_ref(a) ON DELETE SET NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_ref VALUES(0,0);
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8) GENERATED ALWAYS AS (a+1) STORED; -- ERROR
+ALTER TABLE test_at_change_ref CHANGE COLUMN b b1 varchar(8);
+ALTER TABLE test_at_change_ref CHANGE COLUMN a a1 varchar(8);
+INSERT INTO test_at_change_ref VALUES('a','a');
+DROP TABLE test_at_change_ref;
+
+-- --RULE reference column.
+CREATE RULE test_at_change_rule AS ON INSERT TO test_at_change_depend WHERE (b1 is null) DO INSTEAD UPDATE test_at_change_depend SET b1=0;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b bigint not null; -- ERROR
+DROP RULE test_at_change_rule ON test_at_change_depend;
+
+-- --RLSPOLICY reference column.
+DROP TABLE test_at_change_depend;
+CREATE ROLE at_change_role PASSWORD 'Gauss@123';
+CREATE TABLE test_at_change_depend(
+ a int,
+ b int NOT NULL
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_change_depend VALUES(0,0);
+GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_change_role;
+GRANT SELECT ON test_at_change_depend TO at_change_role;
+ALTER TABLE test_at_change_depend ENABLE ROW LEVEL SECURITY;
+CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_role USING(b >= 20);
+ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 int not null;
+INSERT INTO test_at_change_depend VALUES(21,21);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b2 bool not null;
+ALTER TABLE test_at_change_depend CHANGE COLUMN b2 b3 int not null;
+INSERT INTO test_at_change_depend VALUES(22,22);
+SET ROLE at_change_role PASSWORD 'Gauss@123';
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+RESET ROLE;
+SELECT * FROM test_at_change_depend ORDER BY 1,2;
+DROP TABLE test_at_change_depend;
+REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_change_role;
+DROP ROLE at_change_role;
+
+-- test alter command order
+CREATE TABLE test_at_pass(
+ a int,
+ b int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_pass VALUES(1,0);
+ALTER TABLE test_at_pass ADD COLUMN c int, DROP COLUMN c; -- ERROR
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, MODIFY COLUMN c bigint; -- ERROR
+ALTER TABLE test_at_pass ADD COLUMN c int DEFAULT 0, CHANGE COLUMN c c1 bigint; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, MODIFY COLUMN b float4; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, CHANGE COLUMN b b1 float4; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, CHANGE COLUMN b1 b2 bigint; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, DROP COLUMN b; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b; -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, DROP COLUMN b1; -- ERROR
+ALTER TABLE test_at_pass MODIFY a bigint, MODIFY COLUMN a VARCHAR(8); -- ERROR
+ALTER TABLE test_at_pass CHANGE COLUMN b a bigint, CHANGE COLUMN a b VARCHAR(8); -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint, ALTER COLUMN b SET DEFAULT 100;
+\d test_at_pass
+ALTER TABLE test_at_pass MODIFY COLUMN b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint, ALTER COLUMN b1 SET DEFAULT 100;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b1 b bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT;
+\d test_at_pass
+ALTER TABLE test_at_pass CHANGE COLUMN b b1 bigint DEFAULT 100, ALTER COLUMN b DROP DEFAULT; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, DROP CONSTRAINT atpass_pk; -- ERROR
+ALTER TABLE test_at_pass MODIFY COLUMN a bigint CONSTRAINT atpass_pk PRIMARY KEY, ADD CONSTRAINT atpass_pk PRIMARY KEY(a); -- ERROR
+DROP TABLE test_at_pass;
+
+-- test complex commands combined
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN a varchar(8), MODIFY COLUMN b int AUTO_INCREMENT UNIQUE;
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+DROP TABLE test_at_complex;
+
+CREATE TABLE test_at_complex(
+ a int,
+ b int GENERATED ALWAYS AS (a+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_complex VALUES(0,DEFAULT);
+INSERT INTO test_at_complex VALUES(1,DEFAULT);
+INSERT INTO test_at_complex VALUES(2,DEFAULT);
+INSERT INTO test_at_complex VALUES(-1,DEFAULT);
+ALTER TABLE test_at_complex MODIFY COLUMN b int AUTO_INCREMENT UNIQUE, MODIFY COLUMN a varchar(8);
+INSERT INTO test_at_complex VALUES(3,DEFAULT);
+SELECT * FROM test_at_complex ORDER BY a::int,b::int;
+DROP TABLE test_at_complex;
+
+-- test modify partitioned table column without data
+CREATE TABLE pt_at_modify (a int, b int NOT NULL, PRIMARY KEY(b,a)) WITH(STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (1000),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE)
+);
+ALTER TABLE pt_at_modify MODIFY a int8 DEFAULT 0; -- ERROR
+ALTER TABLE pt_at_modify MODIFY a int DEFAULT 0;
+ALTER TABLE pt_at_modify MODIFY a int GENERATED ALWAYS AS (b+1) STORED; -- ERROR
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 NULL;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 DEFAULT 0;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int AUTO_INCREMENT;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int2 UNIQUE;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int CHECK (b < 10000);
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b varchar(8) COLLATE "POSIX";
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b int8 GENERATED ALWAYS AS (a+1) STORED;
+\d+ pt_at_modify;
+ALTER TABLE pt_at_modify MODIFY b varchar(8) NOT NULL;
+\d+ pt_at_modify;
+select pg_get_tabledef('pt_at_modify'::regclass);
+INSERT INTO pt_at_modify VALUES(1,1);
+DROP TABLE pt_at_modify;
+
+-- test alter modify first after
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY c float4 FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c float4 GENERATED ALWAYS AS (b+100) STORED AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY c bigint AUTO_INCREMENT PRIMARY KEY AFTER a;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,NULL);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int GENERATED ALWAYS AS (b+1) STORED
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(11,12,DEFAULT);
+INSERT INTO test_at_modify_fa VALUES(21,22,DEFAULT);
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, MODIFY b float4 GENERATED ALWAYS AS (a+1000) STORED FIRST; -- ERROR
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (b+100) STORED AFTER a, MODIFY a float4 GENERATED ALWAYS AS (b+1000) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ALTER TABLE test_at_modify_fa ADD COLUMN d int GENERATED ALWAYS AS (a+100) STORED AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+SELECT a,b,c FROM test_at_modify_fa ORDER BY 1,2,3;
+ALTER TABLE test_at_modify_fa ADD COLUMN d bigint AUTO_INCREMENT PRIMARY KEY AFTER a, ADD COLUMN e int GENERATED ALWAYS AS (b+100) STORED FIRST;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+INSERT INTO test_at_modify_fa(a,b,c) VALUES(31,32,33);
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3,4;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,13);
+INSERT INTO test_at_modify_fa VALUES(21,22,23);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c float4 GENERATED ALWAYS AS (a+100) STORED FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (a+100) STORED AFTER c;
+SELECT * FROM test_at_modify_fa ORDER BY 1,2,3;
+DROP TABLE test_at_modify_fa;
+
+CREATE TABLE test_at_modify_fa(
+ a int,
+ b int,
+ c int
+) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_fa VALUES(1,2,3);
+INSERT INTO test_at_modify_fa VALUES(11,12,0);
+INSERT INTO test_at_modify_fa VALUES(21,22,0);
+ALTER TABLE test_at_modify_fa MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST, MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c; -- ERROR
+ALTER TABLE test_at_modify_fa MODIFY COLUMN b int GENERATED ALWAYS AS (c+100) STORED AFTER c, MODIFY COLUMN c bigint AUTO_INCREMENT PRIMARY KEY FIRST; -- ERROR
+DROP TABLE test_at_modify_fa;
+
+-- primary key should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11)) WITH(STORAGE_TYPE=USTORE);
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int AFTER f13;
+\d test11
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+\d test11
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+drop table test11;
+-- primary keys should be not null after modify
+create table test11(f11 int, f12 varchar(20), f13 bool, CONSTRAINT pk_test11_f11 primary key (f11,f12)) WITH(STORAGE_TYPE=USTORE);
+\d test11
+ALTER TABLE test11 MODIFY COLUMN f11 int;
+\d test11
+ALTER TABLE test11 MODIFY f11 int AFTER f13;
+\d test11
+ALTER TABLE test11 DROP CONSTRAINT pk_test11_f11, MODIFY COLUMN f11 int NULL;
+\d test11
+ALTER TABLE test11 ADD CONSTRAINT pk_test11_f11 primary key (f11), MODIFY COLUMN f11 int NULL;
+\d test11
+insert into test11(f11,f12,f13) values(NULL,'1',true); --ERROR
+drop table test11;
+-- primary keys in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (id, birthday);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (id);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN id int AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary key in partition table should be not null after modify
+create table range_range(id int, gender varchar not null, birthday date not null) WITH(STORAGE_TYPE=USTORE)
+partition by range (id) subpartition by range (birthday)
+(
+ partition p_1 values less than(100)
+ (
+ subpartition p_1_a values less than('2022-01-01'),
+ subpartition p_1_b values less than(MAXVALUE)
+ ),
+ partition p_2 values less than(200)
+ (
+ subpartition p_2_a values less than('2022-01-01'),
+ subpartition p_2_b values less than(MAXVALUE)
+ ),
+ partition p_3 values less than(MAXVALUE)
+ (
+ subpartition p_3_a values less than('2022-01-01'),
+ subpartition p_3_b values less than(MAXVALUE)
+ )
+);
+ALTER TABLE range_range ADD CONSTRAINT range_range_pkey primary KEY USING ubtree (gender);
+\d+ range_range
+ALTER TABLE range_range MODIFY COLUMN gender varchar AFTER birthday;
+\d+ range_range
+drop table if exists range_range cascade;
+-- primary keys in multi range keys partition table should be not null after modify
+create table multi_keys_range(f1 int, f2 int, f3 int) WITH(STORAGE_TYPE=USTORE)
+partition by range(f1, f2)
+(
+ partition multi_keys_range_p0 values less than (10, 0),
+ partition multi_keys_range_p1 values less than (20, 0),
+ partition multi_keys_range_p2 values less than (30, 0)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_range modify f1 int after f3, ADD CONSTRAINT multi_keys_range_pkey PRIMARY KEY USING ubtree (f1,f2);
+\d+ multi_keys_range
+alter table multi_keys_range modify f2 int after f3;
+\d+ multi_keys_range
+drop table if exists multi_keys_range cascade;
+-- primary keys in multi list keys partition table should be not null after modify
+create table multi_keys_list(f1 int, f2 int, f3 int) WITH(STORAGE_TYPE=USTORE)
+partition by list(f1, f2)
+(
+ partition multi_keys_list_p0 values ((10, 0)),
+ partition multi_keys_list_p1 values ((20, 0)),
+ partition multi_keys_list_p2 values (DEFAULT)
+);
+-- primary key should be LOCAL INDEX
+alter table multi_keys_list modify f1 int after f3, ADD CONSTRAINT multi_keys_list_pkey PRIMARY KEY USING ubtree (f1,f2);
+\d+ multi_keys_list
+alter table multi_keys_list modify f2 int after f3;
+\d+ multi_keys_list
+drop table if exists multi_keys_list cascade;
+
+-- test moidfy/change VIEW depends column
+-- --modify
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20), ADD COLUMN f0 int;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST; -- ERROR
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test modify view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column MODIFY column f1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column MODIFY column f4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+ALTER TABLE test_at_modify_view_column MODIFY column f4 int AFTER f1, DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+-- --change
+-- -- --test select *
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * with add column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_star AS select * from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20), ADD COLUMN f0 int; -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int, ADD COLUMN f0 int;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST, ADD COLUMN f5 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test select * special
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+create view test_modify_view_star(col1,col2) as
+SELECT * FROM
+(
+ SELECT
+ CAST(f1/10000 AS DECIMAL(18,2)),
+ CAST(CAST(f4 AS DECIMAL(18,4))/f1*100 AS DECIMAL(18,2))
+ FROM test_at_modify_view_column
+);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20);
+SELECT pg_get_viewdef('test_modify_view_star'::regclass);
+SELECT * FROM test_modify_view_star;
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST; -- ERROR
+ALTER TABLE test_at_modify_view_column ADD COLUMN f5 int FIRST; -- ERROR
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test CHANGE view column
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW test_modify_view_f1f2 WITH(security_barrier=TRUE) AS select F1,F2 from test_at_modify_view_column;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c1 f1 int FIRST;
+SELECT pg_get_viewdef('test_modify_view_f1f2'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test view and column name
+CREATE TABLE test_at_modify_view_column (f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 int;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20);
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER c1;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+DROP TABLE test_at_modify_view_column CASCADE;
+-- -- --test drop column
+CREATE TABLE test_at_modify_view_column (f5 int, f4 int primary key, f3 text, f2 text, f1 int unique) WITH(STORAGE_TYPE=USTORE);
+INSERT INTO test_at_modify_view_column VALUES(5, 4, '3', '2', 1);
+CREATE VIEW "test_modify_view_f1f2F3" AS select F1,F2,F3 AS "F3" from test_at_modify_view_column where f4 > 0;
+ALTER TABLE test_at_modify_view_column CHANGE column f1 c1 varchar(20); -- ERROR
+ALTER TABLE test_at_modify_view_column CHANGE column f4 c4 varchar(20), DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+ALTER TABLE test_at_modify_view_column CHANGE column c4 f4 int AFTER f1, DROP COLUMN f5;
+SELECT pg_get_viewdef('"test_modify_view_f1f2F3"'::regclass);
+SELECT * FROM "test_modify_view_f1f2F3";
+DROP VIEW "test_modify_view_f1f2F3";
+DROP TABLE test_at_modify_view_column CASCADE;
+
+-- END
+RESET CURRENT_SCHEMA;
+DROP SCHEMA atbdb_ustore_schema CASCADE;
+\c regression
+clean connection to all force for database atbdb_ustore;
+drop database if exists atbdb_ustore;
\ No newline at end of file
diff --git a/src/test/regress/sql/create_schema2.sql b/src/test/regress/sql/create_schema2.sql
index 1f1076c88..58c5157be 100644
--- a/src/test/regress/sql/create_schema2.sql
+++ b/src/test/regress/sql/create_schema2.sql
@@ -4,10 +4,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema ' || var_name;
+ query_str := 'create schema "' || var_name || '"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name ||' CASCADE';
+ query_str := 'drop schema "' || var_name ||'" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
@@ -18,10 +18,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema authorization ' || var_name;
+ query_str := 'create schema authorization "' || var_name || '"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name ||' CASCADE';
+ query_str := 'drop schema "' || var_name ||'" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
@@ -32,10 +32,10 @@ declare
begin
select rolname into var_name from pg_authid where oid=10;
- query_str := 'create schema ' || var_name ||'_123';
+ query_str := 'create schema "' || var_name ||'_123"';
EXECUTE IMMEDIATE query_str;
- query_str := 'drop schema ' || var_name || '_123 CASCADE';
+ query_str := 'drop schema "' || var_name || '_123" CASCADE';
EXECUTE IMMEDIATE query_str;
end;
/
diff --git a/src/test/regress/sql/decode_compatible_with_o.sql b/src/test/regress/sql/decode_compatible_with_o.sql
index bedef3b53..d6cbaf510 100644
--- a/src/test/regress/sql/decode_compatible_with_o.sql
+++ b/src/test/regress/sql/decode_compatible_with_o.sql
@@ -1065,7 +1065,39 @@ select decode(c_nvarchar2, c_reltime, 'Conversion successfully!', 'Conversion fa
select decode(c_text, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test;
select decode(c_interval, c_reltime, 'Conversion successfully!', 'Conversion failed!') from tb_test;
+----
+-- testcase - fix o compatibility of a_style_coerce
+----
+
+-- 1. return type
+set sql_beta_feature = 'a_style_coerce';
+select pg_typeof(decode(1, 1, 1, '1'));
+select pg_typeof(decode(1, 1, '1', 1));
+select pg_typeof(case 1 when 1 then 1 else '1' end);
+select pg_typeof(case 1 when 1 then '1' else 1 end);
+
set sql_beta_feature = 'none';
+select pg_typeof(decode(1, 1, 1, '1'));
+select pg_typeof(decode(1, 1, '1', 1));
+select pg_typeof(case 1 when 1 then 1 else '1' end);
+select pg_typeof(case 1 when 1 then '1' else 1 end);
+
+-- 2. operator match
+set sql_beta_feature = 'a_style_coerce';
+select decode(1, '1.0', 'same', 'different');
+select decode('1.0', 1, 'same', 'different');
+select decode(1, '1.0'::text, 'same', 'different');
+select decode('1.0'::text, 1, 'same', 'different');
+select case 1 when '1.0' then 'same' else 'different' end;
+select case '1.0' when 1 then 'same' else 'different' end;
+
+set sql_beta_feature = 'none';
+select decode(1, '1.0', 'same', 'different');
+select decode('1.0', 1, 'same', 'different');
+select decode(1, '1.0'::text, 'same', 'different');
+select decode('1.0'::text, 1, 'same', 'different');
+select case 1 when '1.0' then 'same' else 'different' end;
+select case '1.0' when 1 then 'same' else 'different' end;
\c regression
clean connection to all force for database decode_compatibility;
diff --git a/src/test/regress/sql/event.sql b/src/test/regress/sql/event.sql
new file mode 100644
index 000000000..da2469bdc
--- /dev/null
+++ b/src/test/regress/sql/event.sql
@@ -0,0 +1,333 @@
+drop database if exists event_b;
+create database event_b with dbcompatibility 'b';
+\c event_b
+create user event_a sysadmin password 'event_123';
+create user event_b sysadmin password 'event_123';
+--CREATE EVENT
+--Schedule Parameter Test
+--CHECK Schedule AT .. situation
+create event IF NOT EXISTS ee11 on schedule at '2022-12-09 17:24:11' disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at sysdate disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_DATE disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIME (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIME disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIME (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP (1) disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at CURRENT_TIMESTAMP + interval 1 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at SYSDATE + interval 10 second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at SYSDATE + interval 0.5 second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 1 hour disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at LOCALTIMESTAMP + interval '00:00' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 1 year + interval '00:00' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at now() + interval 666666666666666666666666666667 year + interval '00:00' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at sysdate + interval 1234567890 second + interval 1234567890 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+create event IF NOT EXISTS ee11 on schedule at sysdate + interval 1.5 second + interval 1.33 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists ee11;
+
+--CHECK Schedule EVERY ..situation
+create event IF NOT EXISTS evtest on schedule every 1 minute disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every '00:30' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute ends sysdate + interval 1 hour disable do insert into t values(0);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate + interval 1 day ends now() + interval 1 year disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+create event IF NOT EXISTS evtest on schedule every 1 minute starts sysdate + interval 1 day + interval '00:99' minute to second disable do insert into t values(0);
+select pg_sleep(0.2);
+select job_name, nspname from pg_job where dbname='event_b';
+drop event if exists evtest;
+
+
+--if not exists
+create event e on schedule every 1 month disable do select 1;
+select pg_sleep(0.2);
+create event e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+create event if not exists e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+drop event e;
+
+--auto_drop
+drop event e;
+create event e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+drop event e;
+
+create event e on schedule at sysdate on completion not preserve disable do select 1;
+select pg_sleep(0.2);
+drop event e;
+
+create event e on schedule at sysdate on completion preserve disable do select 1;
+drop event e;
+
+--job_status
+create event e on schedule every 1 hour do select 1;
+drop event e;
+create event e on schedule every 1 hour enable do select 1;
+drop event e;
+create event e on schedule every 1 hour disable do select 1;
+drop event e;
+create event e on schedule every 1 hour disable on slave do select 1;
+drop event e;
+
+--comment
+create event e on schedule at sysdate disable do select 1;
+select pg_sleep(0.2);
+drop event e;
+create event e on schedule at sysdate disable comment '======' do select 1;
+select pg_sleep(0.2);
+drop event e;
+create event e on schedule at sysdate disable comment 'fsdfjksadfhkjsfafkjsdfhjkahfdsknvxhguiyeurfbsdbccguyaHUFAWEFKSJBFCNJNDAgudagsHJBHDSBHJFBSAHBkjbhjbhjBHJBUbhbhBYGUIOInkb' do select 1;
+select pg_sleep(0.2);
+drop event e;
+create event e on schedule at now() disable
+comment '=================================================================================================
+==========================================================================================================
+==========================================================================================================
+==========================================================================================================
+==========================================================================================================
+'
+do select 1;
+select pg_sleep(0.2);
+drop event e;
+
+--sql body
+--abort
+CREATE TABLE customer_demographics_t1
+(
+ CD_DEMO_SK INTEGER NOT NULL,
+ CD_GENDER CHAR(1) ,
+ CD_MARITAL_STATUS CHAR(1) ,
+ CD_EDUCATION_STATUS CHAR(20) ,
+ CD_PURCHASE_ESTIMATE INTEGER ,
+ CD_CREDIT_RATING CHAR(10) ,
+ CD_DEP_COUNT INTEGER ,
+ CD_DEP_EMPLOYED_COUNT INTEGER ,
+ CD_DEP_COLLEGE_COUNT INTEGER
+)
+WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE)
+;
+INSERT INTO customer_demographics_t1 VALUES(1920801,'M', 'U', 'DOCTOR DEGREE', 200, 'GOOD', 1, 0,0);
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+START TRANSACTION;
+UPDATE customer_demographics_t1 SET cd_education_status= 'Unknown';
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+create event e on schedule at sysdate do ABORT;
+SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801;
+DROP TABLE customer_demographics_t1;
+ABORT;
+
+--CALL
+CREATE FUNCTION func_add_sql(num1 integer, num2 integer) RETURN integer
+AS
+BEGIN
+RETURN num1 + num2;
+END;
+/
+create event e on schedule at sysdate disable do CALL func_add_sql(1, 3);
+DROP FUNCTION func_add_sql;
+drop event e;
+
+--ALTER EVENT
+--alter schedule
+\c event_b
+drop event e;
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+show events where job_name='e';
+alter definer=event_a event e on schedule at '2023-01-16 21:05:40' + interval 1 year;
+show events where job_name='e';
+alter definer=event_a event e on schedule every 1 year;
+show events where job_name='e';
+alter definer=event_a event e on schedule every 0.5 minute starts '2023-01-16 21:05:40' + interval '00:50' minute to second;
+show events where job_name='e';
+alter definer=event_a event e on schedule at '2023-01-16 21:05:40' + interval 500 second;
+show events where job_name='e';
+drop event e;
+
+--alter auto_drop
+drop event e;
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+show events where job_name='e';
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+drop event e;
+create definer=event_a event e on schedule at '2023-01-16 21:05:40' on completion preserve disable do select 1;
+show events where job_name='e';
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+alter definer=event_a event e on completion not preserve;
+show events where job_name='e';
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+alter definer=event_a event e on completion preserve;
+show events where job_name='e';
+select * from gs_job_attribute where job_name='e' and attribute_name='auto_drop';
+drop event e;
+
+--alter event_name
+drop event e;
+create event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+select job_name, nspname from pg_job where dbname='event_b';
+alter event e rename to e_new;
+select job_name, nspname from pg_job where dbname='event_b';
+select what,job_name from pg_job_proc where job_name='e_new';
+alter event e_new rename to e;
+select job_name, nspname from pg_job where dbname='event_b';
+select what,job_name from pg_job_proc where job_name='e';
+drop event e;
+
+--alter status
+drop table if exists a;
+create table a(a int);
+create event e on schedule at '2023-01-16 21:05:40' disable do insert into a values(0);
+select * from a;
+alter event e on schedule every 1 year enable do insert into a values(0);
+select * from a;
+truncate table a;
+alter event e disable;
+select * from a;
+drop event e;
+create event e on schedule every 1 minute starts '3000-01-16 21:05:40' do select 1;
+select enable from pg_job where job_name='e';
+alter event e disable;
+select enable from pg_job where job_name='e';
+alter event e enable;
+select enable from pg_job where job_name='e';
+drop event e;
+
+--Alter event combination test.
+drop event e;
+create event e on schedule at '2023-01-16 21:05:40' disable do select 1;
+alter definer=event_b event e on schedule every 1 year ends '2023-01-16 21:05:40' + interval 1 year;
+alter event e disable;
+alter event e do select 2;
+alter event e rename to ee comment 'test ee' do select sysdate;
+alter event ee comment '========test=========';
+alter event ee on schedule at '2023-01-16 21:05:40' + interval 1 year on completion preserve rename to test_e;
+drop event if exists test_e;
+
+--Test owner
+create user evtest_owner password 'event_123';
+create event e on schedule at sysdate disable do select 1;
+alter definer=evtest_owner event e;
+select log_user, priv_user from pg_job where job_name='e';
+alter event e rename to ee;
+alter definer=evtest_owner event ee rename to e;
+select log_user, priv_user from pg_job where job_name='e';
+create definer=evtest_owner event e_a on schedule at sysdate disable do select 1;
+select log_user, priv_user from pg_job where job_name='e_a';
+alter event e_a rename to ea;
+alter definer=evtest_owner event ea rename to e_a;
+select log_user, priv_user from pg_job where job_name='e_a';
+select log_user, priv_user from pg_job where dbname='event_b';
+drop user evtest_owner;
+select log_user, priv_user from pg_job where dbname='event_b';
+select * from gs_job_attribute where job_name='e' or job_name='e_a';
+
+--SHOW EVENTS
+drop event if exists e1;
+create definer=event_a event e1 on schedule at '2023-01-16 21:05:40' disable do select 1;
+
+select job_name, nspname from pg_job where dbname='event_b';
+show events in a;
+show events from a;
+show events like 'e';
+show events like 'e%';
+show events like 'e_';
+show events where job_name='e1';
+drop event if exists e1;
+
+--test sql help
+\h CREATE EVENT
+\h ALTER EVENT
+\h DROP EVENT
+\h SHOW EVENTS
+
+drop table if exists event_a.a;
+drop table if exists event_b.t;
+drop schema if exists event_a;
+drop schema if exists event_b;
+drop user if exists event_a;
+drop user if exists event_b;
+\c regression
+drop database if exists event_b;
diff --git a/src/test/regress/sql/hw_partition_b_db.sql b/src/test/regress/sql/hw_partition_b_db.sql
new file mode 100644
index 000000000..625572d15
--- /dev/null
+++ b/src/test/regress/sql/hw_partition_b_db.sql
@@ -0,0 +1,1493 @@
+CREATE SCHEMA partition_a_db_schema;
+SET CURRENT_SCHEMA TO partition_a_db_schema;
+-- -----------------------------------test partitions clause with A compatibility
+-- range with partitions clause
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 3;
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+-- list with partitions clause
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 3;
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES ((0,0)),
+ PARTITION p2 VALUES ((1,1), (1,2)),
+ PARTITION p3 VALUES ((2,1), (2,2), (2,3))
+);
+-- hash with partitions clause
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 3;
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 0;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS -1;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 2.5;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS '5';
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 1048576;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 3
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 4
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+
+-- range-range with subpartitions clause
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY RANGE(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+-- range-list with subpartitions clause
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY LIST(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+-- range-hash with subpartitions clause
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 0.2E+1
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2.5
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS '5'
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 1048576
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+
+-- list-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY RANGE(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+ )
+);
+-- list-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY LIST(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+-- list-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES (100),
+ PARTITION p2 VALUES (200)
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+
+-- hash-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- hash-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- hash-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1,
+ PARTITION p2
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+SELECT pg_get_tabledef('T_HASH_HASH_SUBPARTITIONS_CLAUSE');
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2;
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2 SUBPARTITION BY HASH(c);
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) SUBPARTITION BY HASH(c);
+SELECT pg_get_tabledef('T_HASH_HASH_SUBPARTITIONS_CLAUSE');
+DROP TABLE T_HASH_HASH_SUBPARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_PARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a) PARTITIONS 2;
+SELECT pg_get_tabledef('T_HASH_PARTITIONS_CLAUSE');
+DROP TABLE T_HASH_PARTITIONS_CLAUSE;
+CREATE TABLE T_HASH_PARTITIONS_CLAUSE (a int, b int, c int)
+PARTITION BY HASH (a);
+SELECT pg_get_tabledef('T_HASH_PARTITIONS_CLAUSE');
+DROP TABLE T_HASH_PARTITIONS_CLAUSE;
+
+-- -----------------------------------test A compatibility syntax error
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE(a)
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200),
+ PARTITION p3 VALUES LESS THAN (300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE (a,b) PARTITIONS 5
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES IN ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES IN ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST (a,b) PARTITIONS 5
+(
+ PARTITION p1 VALUES ( (0,0), (NULL,NULL) ),
+ PARTITION p2 VALUES ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES ( (1,0), (2,0), (2,1), (3,0), (3,1) ),
+ PARTITION p4 VALUES ( (1,3), (2,2), (2,3), (3,2), (3,3) )
+);
+
+CREATE TABLE t_part_by_key (a int, b int, c int)
+PARTITION BY KEY(a)
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+
+CREATE TABLE t_multi_keys_list_tbspc (a int, b varchar(4), c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,NULL) ) TABLESPACE = pg_default,
+ PARTITION p2 VALUES ( (0,'1'), (0,'2'), (0,'3'), (1,'1'), (1,'2') )
+);
+DROP TABLE t_multi_keys_list_tbspc;
+-- -----------------------------------test multi list keys with A compatibility
+CREATE TABLE t_multi_keys_list_default (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( DEFAULT ),
+ PARTITION p2 VALUES ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES ( (3,2), (NULL,NULL) ),
+ PARTITION pd VALUES ( DEFAULT )
+);
+CREATE TABLE t_multi_keys_list_default (a int, b varchar(4), c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,NULL) ),
+ PARTITION p2 VALUES ( (0,'1'), (0,'2'), (0,'3'), (1,'1'), (1,'2') ),
+ PARTITION p3 VALUES ( (NULL,'0'), (2,'1') ),
+ PARTITION p4 VALUES ( (3,'2'), (NULL,NULL) ),
+ PARTITION pd VALUES ( DEFAULT )
+);
+CREATE INDEX t_multi_keys_list_default_idx_l ON t_multi_keys_list_default(a,b,c) LOCAL;
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+
+INSERT INTO t_multi_keys_list_default VALUES(0,NULL,0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p1) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_default VALUES(0,'1',0);
+INSERT INTO t_multi_keys_list_default VALUES(0,'2',0);
+INSERT INTO t_multi_keys_list_default VALUES(0,'3',0);
+INSERT INTO t_multi_keys_list_default VALUES(1,'1',0);
+INSERT INTO t_multi_keys_list_default VALUES(1,'2',0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p2) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_default VALUES(NULL,0,0);
+INSERT INTO t_multi_keys_list_default VALUES(2,'1',0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_default VALUES(3,'2',0);
+INSERT INTO t_multi_keys_list_default VALUES(NULL,NULL,0);
+SELECT * FROM t_multi_keys_list_default PARTITION(p4) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_default VALUES(4,'4',4);
+SELECT * FROM t_multi_keys_list_default PARTITION(pd) ORDER BY a,b;
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a IS NULL;
+SELECT a FROM t_multi_keys_list_default WHERE a IS NULL;
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a = 0;
+SELECT a FROM t_multi_keys_list_default WHERE a = 0;
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_default WHERE b IS NULL;
+SELECT b FROM t_multi_keys_list_default WHERE b IS NULL;
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_default WHERE b = '1';
+SELECT b FROM t_multi_keys_list_default WHERE b = '1';
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_default WHERE a = 4;
+SELECT a FROM t_multi_keys_list_default WHERE a = 4;
+EXPLAIN (costs false)
+SELECT a,b FROM t_multi_keys_list_default WHERE a < 1 ORDER BY 1,2;
+SELECT a,b FROM t_multi_keys_list_default WHERE a < 1 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (0,'1');
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (0,'1');
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (NULL,'0');
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) = (NULL,'0');
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b = '0';
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b = '0';
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL AND a = 0) AND b = '0' ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL AND a = 0) AND b = '0' ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a = 3) AND b = '2' ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a = 3) AND b = '2' ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a IS NOT NULL) AND b = '0' ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (a IS NULL OR a IS NOT NULL) AND b = '0' ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE b IS NOT NULL AND a = 0 ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE b IS NOT NULL AND a = 0 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL AND b = '1') AND a = 2 ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL AND b = '1') AND a = 2 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL OR b = '0') AND a = 0 ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (b IS NOT NULL OR b = '0') AND a = 0 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND b IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND b IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR b IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR a IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL OR a IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND a IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NOT NULL AND a IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL OR a IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NOT NULL ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a IS NULL AND a IS NOT NULL ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a = 0 OR b = '0' ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a = 0 OR b = '0' ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) IN ((NULL,'0'), (3,'2')) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) IN ((NULL,'0'), (3,'2')) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) =ANY(ARRAY[(2,'1'::varchar), (3,'2'::varchar)]) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE (a,b) =ANY(ARRAY[(2,'1'::varchar), (3,'2'::varchar)]) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_default WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_default WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+PREPARE part_bdb_stmt(varchar) as SELECT a,b FROM t_multi_keys_list_default WHERE b = $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt('3');
+EXECUTE part_bdb_stmt('1');
+EXECUTE part_bdb_stmt('2');
+PREPARE part_bdb_stmt1(int) as SELECT a,b FROM t_multi_keys_list_default WHERE a != $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt1(3);
+EXECUTE part_bdb_stmt1(1);
+EXECUTE part_bdb_stmt1(2);
+PREPARE part_bdb_stmt2(int) as SELECT a,b FROM t_multi_keys_list_default WHERE a >= $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt2(3);
+EXECUTE part_bdb_stmt2(1);
+EXECUTE part_bdb_stmt2(2);
+PREPARE part_bdb_stmt3(int, varchar) as SELECT a,b FROM t_multi_keys_list_default WHERE (a,b) = ($1,$2);
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt3(0,'1');
+EXECUTE part_bdb_stmt3(0,'1');
+EXECUTE part_bdb_stmt3(3,'2');
+
+UPDATE t_multi_keys_list_default SET a=2, b='1' where a=1 and b='1';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+UPDATE t_multi_keys_list_default SET a=NULL, b='0' where a=1 and b='2';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION(p3) where b = '0';
+DELETE t_multi_keys_list_default PARTITION(p3) where b = '0';
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b;
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION FOR(0,NULL);
+DELETE t_multi_keys_list_default PARTITION FOR(0,NULL);
+SELECT * FROM t_multi_keys_list_default PARTITION(p1) ORDER BY a,b;
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_default PARTITION FOR(0,'3');
+DELETE t_multi_keys_list_default PARTITION FOR(0,'3');
+SELECT * FROM t_multi_keys_list_default PARTITION(p2) ORDER BY a,b;
+-- alter table partition
+CREATE INDEX test_multi_list_key_gi on t_multi_keys_list_default(c);
+CREATE TABLESPACE part_adb_temp_tbspc RELATIVE LOCATION 'tablespace/part_adb_temp_tbspc';
+ALTER TABLE t_multi_keys_list_default MOVE PARTITION FOR(0,NULL) TABLESPACE part_adb_temp_tbspc;
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+
+CREATE TABLE t_alter_partition_temp (a int, b varchar(4), c int);
+INSERT INTO t_alter_partition_temp VALUES(NULL,'0',1);
+INSERT INTO t_alter_partition_temp VALUES(2,'1',2);
+CREATE INDEX t_alter_partition_temp_idx_l ON t_alter_partition_temp(a,b,c);
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b,c;
+ALTER TABLE t_multi_keys_list_default EXCHANGE PARTITION (p3) WITH TABLE t_alter_partition_temp UPDATE GLOBAL INDEX;
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+SELECT * FROM t_multi_keys_list_default PARTITION(p3) ORDER BY a,b,c;
+DROP TABLE IF EXISTS t_alter_partition_temp;
+
+ALTER TABLE t_multi_keys_list_default ADD PARTITION p5 VALUES ((2,1));
+
+ALTER TABLE t_multi_keys_list_default DROP PARTITION FOR (1,'5');
+ALTER TABLE t_multi_keys_list_default DROP PARTITION FOR (2,'1') UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (1,'5') ORDER BY a,b;
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (2,'1') ORDER BY a,b;
+ALTER TABLE t_multi_keys_list_default TRUNCATE PARTITION FOR (NULL,NULL) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_default PARTITION FOR (NULL,NULL) ORDER BY a,b;
+ALTER TABLE t_multi_keys_list_default RENAME PARTITION FOR (0,NULL) TO p0;
+ALTER TABLE t_multi_keys_list_default ADD PARTITION pd VALUES (DEFAULT);
+SELECT pg_get_tabledef('t_multi_keys_list_default'::regclass);
+
+-- test views
+SELECT table_name,partitioning_type,partition_count,partitioning_key_count,subpartitioning_type FROM MY_PART_TABLES WHERE table_name = 't_multi_keys_list_default' ORDER BY 1;
+SELECT table_name,partition_name,high_value,subpartition_count FROM MY_TAB_PARTITIONS WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2;
+SELECT table_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_TAB_SUBPARTITIONS WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2,3;
+SELECT table_name,index_name,partition_count,partitioning_key_count,partitioning_type,subpartitioning_type FROM MY_PART_INDEXES WHERE table_name = 't_multi_keys_list_default' ORDER BY 1,2;
+SELECT index_name,partition_name,high_value,high_value_length FROM MY_IND_PARTITIONS WHERE index_name = 't_multi_keys_list_default_idx_l' ORDER BY 1,2;
+SELECT index_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_IND_SUBPARTITIONS WHERE index_name = 't_multi_keys_list_default_idx_l' ORDER BY 1,2,3;
+
+-- test partition key value and datatype not matched
+CREATE TABLE t_single_key_list_value (a int, b int, c int)
+PARTITION BY LIST (a)
+(
+ PARTITION p1 VALUES ( 0 ),
+ PARTITION p2 VALUES ( 1 ),
+ PARTITION p3 VALUES ( 2, date '12-10-2010' )
+); -- ERROR
+CREATE TABLE t_multi_keys_list_value (a int, b int, c int)
+PARTITION BY LIST (a,b)
+(
+ PARTITION p1 VALUES ( (0,0) ),
+ PARTITION p2 VALUES ( (0,1) ),
+ PARTITION p3 VALUES ( (2,1), (NULL,date '12-10-2010') )
+); -- ERROR
+
+DROP TABLE IF EXISTS t_multi_keys_list_default;
+DROP TABLESPACE part_adb_temp_tbspc;
+DROP SCHEMA partition_a_db_schema CASCADE;
+
+-- -----------------------------------test with B compatibility
+create database part_bdb WITH ENCODING 'UTF-8' dbcompatibility 'B';
+\c part_bdb
+CREATE SCHEMA partition_b_db_schema;
+SET CURRENT_SCHEMA TO partition_b_db_schema;
+
+-- -----------------------------------test partitions clause with B compatibility
+-- range with partitions clause
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 3;
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+DROP TABLE t_range_partitions_clause;
+CREATE TABLE t_range_partitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b) PARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+-- list with partitions clause
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 3;
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 3
+(
+ PARTITION p1 VALUES IN ((0,0)),
+ PARTITION p2 VALUES IN ((1,1), (1,2)),
+ PARTITION p3 VALUES IN ((2,1), (2,2), (2,3))
+);
+DROP TABLE t_list_partitions_clause;
+CREATE TABLE t_list_partitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b) PARTITIONS 1
+(
+ PARTITION p1 VALUES IN ((0,0)),
+ PARTITION p2 VALUES IN ((1,1), (1,2)),
+ PARTITION p3 VALUES IN ((2,1), (2,2), (2,3))
+);
+-- key with partitions clause
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 3;
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 0;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS -1;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 2.5;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS '5';
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 1048576;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 3
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+DROP TABLE t_hash_partitions_clause;
+CREATE TABLE t_hash_partitions_clause (a int, b int, c int)
+PARTITION BY KEY(a) PARTITIONS 4
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3
+);
+
+-- range-range with subpartitions clause
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_range_range_subpartitions_clause;
+CREATE TABLE t_range_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 3 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+-- range-list with subpartitions clause
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_range_list_subpartitions_clause;
+CREATE TABLE t_range_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 1 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+-- range-key with subpartitions clause
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY key(c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 0.2E+1
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 2.5
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS '5'
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY key(c) SUBPARTITIONS 1048576
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2,
+ SUBPARTITION p1sub3
+ )
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2,
+ SUBPARTITION p1sub3
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p2sub3,
+ SUBPARTITION p2sub4
+ )
+);
+
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p2sub3
+ )
+);
+DROP TABLE t_range_hash_subpartitions_clause;
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 3
+(
+ PARTITION p1 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2,
+ SUBPARTITION p1sp0
+ )
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 11
+(
+ PARTITION p11111111111111111111111111111111111111111111111111111111111 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+CREATE TABLE t_range_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 11
+(
+ PARTITION p1111111111111111111111111111111111111111111111111111111111 VALUES LESS THAN (100),
+ PARTITION p2 VALUES LESS THAN (200)
+);
+SELECT pg_get_tabledef('t_range_hash_subpartitions_clause'::regclass);
+DROP TABLE t_range_hash_subpartitions_clause;
+
+-- list-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- list-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- list-key with subpartitions clause
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY KEY(c)
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) PARTITIONS 2 SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100) (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 VALUES IN (200) (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2
+(
+ PARTITION p1 VALUES IN (100),
+ PARTITION p2 VALUES IN (200)
+);
+DROP TABLE t_list_hash_subpartitions_clause;
+CREATE TABLE t_list_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY LIST COLUMNS (a) SUBPARTITION BY KEY(c) SUBPARTITIONS 2;
+
+-- key-range with subpartitions clause
+CREATE TABLE t_hash_range_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY RANGE(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES LESS THAN (100),
+ SUBPARTITION p1sub2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES LESS THAN (100),
+ SUBPARTITION p2sub2 VALUES LESS THAN (MAXVALUE)
+ )
+);
+DROP TABLE t_hash_range_subpartitions_clause;
+-- key-list with subpartitions clause
+CREATE TABLE t_hash_list_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY LIST(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1 VALUES (0),
+ SUBPARTITION p1sub2 VALUES (1)
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1 VALUES (0),
+ SUBPARTITION p2sub2 VALUES (1)
+ )
+);
+DROP TABLE t_hash_list_subpartitions_clause;
+-- key-hash with subpartitions clause
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c)
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 1 SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) PARTITIONS 2 SUBPARTITION BY HASH(c) SUBPARTITIONS 3
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1 (
+ SUBPARTITION p1sub1,
+ SUBPARTITION p1sub2
+ ),
+ PARTITION p2 (
+ SUBPARTITION p2sub1,
+ SUBPARTITION p2sub2
+ )
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+CREATE TABLE t_hash_hash_subpartitions_clause (a int, b int, c int)
+PARTITION BY KEY (a) SUBPARTITION BY HASH(c) SUBPARTITIONS 2
+(
+ PARTITION p1,
+ PARTITION p2
+);
+DROP TABLE t_hash_hash_subpartitions_clause;
+
+-- test the key of partition and subpartition is same column
+CREATE TABLE t_single_key_range_subpart(id int, birthdate int)
+ PARTITION BY RANGE (birthdate)
+ SUBPARTITION BY HASH (birthdate)
+ SUBPARTITIONS 2 (
+ PARTITION p0 VALUES LESS THAN (1990),
+ PARTITION p1 VALUES LESS THAN (2000),
+ PARTITION p2 VALUES LESS THAN MAXVALUE
+);
+DROP TABLE IF EXISTS t_single_key_range_subpart;
+
+-- --------------------------------------------------------test range columns syntax with B compatibility
+CREATE TABLE t_single_key_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a)
+(
+ PARTITION p1 VALUES LESS THAN (100) TABLESPACE = pg_default,
+ PARTITION p2 VALUES LESS THAN (200),
+ PARTITION p3 VALUES LESS THAN (300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+SELECT pg_get_tabledef('t_single_key_range'::regclass);
+DROP TABLE IF EXISTS t_single_key_range;
+
+-- error
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+
+CREATE TABLE t_multi_keys_range (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a,b)
+(
+ PARTITION p1 VALUES LESS THAN (100,100),
+ PARTITION p2 VALUES LESS THAN (200,200),
+ PARTITION p3 VALUES LESS THAN (300,300),
+ PARTITION p4 VALUES LESS THAN (400,MAXVALUE),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
+);
+\d+ t_multi_keys_range
+SELECT pg_get_tabledef('t_multi_keys_range'::regclass);
+DROP TABLE IF EXISTS t_multi_keys_range;
+
+-- --------------------------------------------------------test number of columns
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100)
+);
+CREATE TABLE t_multi_keys_range_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY RANGE COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES LESS THAN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES LESS THAN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES LESS THAN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES LESS THAN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+DROP TABLE IF EXISTS t_multi_keys_range_num;
+
+-- --------------------------------------------------------test key partition with B compatibility
+CREATE TABLE t_part_by_key (a int, b int, c int)
+PARTITION BY KEY(a)
+(
+ PARTITION p1 TABLESPACE = pg_default,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+\d+ t_part_by_key
+SELECT pg_get_tabledef('t_part_by_key'::regclass);
+DROP TABLE IF EXISTS t_part_by_key;
+
+CREATE TABLE t_part_by_key_num (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY KEY(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1,
+ PARTITION p2,
+ PARTITION p3,
+ PARTITION p4,
+ PARTITION p5
+);
+
+-- --------------------------------------------------------test list partition with B compatibility
+-- errors
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES IN (100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p2 VALUES IN (200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p3 VALUES IN (300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100),
+ PARTITION p4 VALUES IN (400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN (100,100,100,100),
+ PARTITION p2 VALUES IN (200,100,100,100),
+ PARTITION p3 VALUES IN (300,100,100,100),
+ PARTITION p4 VALUES IN (400,100,100,100)
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100))
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,NULL,100),(100,100,NULL,100)),
+ PARTITION p2 VALUES IN ((200,200,100,100),(100,100,100,200)),
+ PARTITION p3 VALUES IN ((300,300,100,100),(100,100,100,300)),
+ PARTITION p4 VALUES IN ((400,400,100,100),(100,100,100,400))
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d)
+(
+ PARTITION p1 VALUES IN ((100,100,NULL,100),(100,100,100,400)),
+ PARTITION p2 VALUES IN ((200,200,100,100),(100,100,100,300)),
+ PARTITION p3 VALUES IN ((300,300,100,100),(100,100,100,200)),
+ PARTITION p4 VALUES IN ((400,400,100,100),(100,100,NULL,100))
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,MAXVALUE) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,DEFAULT) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN (MAXVALUE),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) )
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (NULL, NULL)),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,A), (1,1), (1,2) )
+);
+CREATE TABLE t_multi_keys_list_err (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100))
+);
+
+-- normal
+CREATE TABLE t_multi_keys_list (a int, b int, c int, d int, e int, f int, g int, h int, i int, j int, k int, l int, m int, n int, o int, p int, q int)
+PARTITION BY LIST COLUMNS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
+(
+ PARTITION p1 VALUES IN ((100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p2 VALUES IN ((200,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p3 VALUES IN ((300,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)),
+ PARTITION p4 VALUES IN ((400,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100))
+);
+DROP TABLE IF EXISTS t_multi_keys_list;
+CREATE TABLE t_multi_keys_list (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,0) ) TABLESPACE = pg_default,
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (2,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (3,3) )
+);
+SELECT pg_get_tabledef('t_multi_keys_list'::regclass);
+
+INSERT INTO t_multi_keys_list VALUES(0,0,0);
+SELECT * FROM t_multi_keys_list PARTITION(p1) ORDER BY 1,2;
+INSERT INTO t_multi_keys_list VALUES(0,1,0);
+INSERT INTO t_multi_keys_list VALUES(0,2,0);
+INSERT INTO t_multi_keys_list VALUES(0,3,0);
+INSERT INTO t_multi_keys_list VALUES(1,1,0);
+INSERT INTO t_multi_keys_list VALUES(1,2,0);
+SELECT * FROM t_multi_keys_list PARTITION(p2) ORDER BY 1,2;
+INSERT INTO t_multi_keys_list VALUES(2,0,0);
+INSERT INTO t_multi_keys_list VALUES(2,1,0);
+SELECT * FROM t_multi_keys_list PARTITION(p3) ORDER BY 1,2;
+INSERT INTO t_multi_keys_list VALUES(3,2,0);
+INSERT INTO t_multi_keys_list VALUES(3,3,0);
+SELECT * FROM t_multi_keys_list PARTITION(p4) ORDER BY 1,2;
+INSERT INTO t_multi_keys_list VALUES(4,4,4);
+DROP TABLE IF EXISTS t_multi_keys_list;
+
+-- test with null keys
+CREATE TABLE t_multi_keys_list_null (a int, b int, c int)
+PARTITION BY LIST COLUMNS(a,b)
+(
+ PARTITION p1 VALUES IN ( (0,NULL) ),
+ PARTITION p2 VALUES IN ( (0,1), (0,2), (0,3), (1,1), (1,2) ),
+ PARTITION p3 VALUES IN ( (NULL,0), (2,1) ),
+ PARTITION p4 VALUES IN ( (3,2), (NULL,NULL) )
+);
+CREATE INDEX t_multi_keys_list_null_idx_l ON t_multi_keys_list_null(a,b,c) LOCAL;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+
+INSERT INTO t_multi_keys_list_null VALUES(0,NULL,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p1) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_null VALUES(0,1,0);
+INSERT INTO t_multi_keys_list_null VALUES(0,2,0);
+INSERT INTO t_multi_keys_list_null VALUES(0,3,0);
+INSERT INTO t_multi_keys_list_null VALUES(1,1,0);
+INSERT INTO t_multi_keys_list_null VALUES(1,2,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p2) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_null VALUES(NULL,0,0);
+INSERT INTO t_multi_keys_list_null VALUES(2,1,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_null VALUES(3,2,0);
+INSERT INTO t_multi_keys_list_null VALUES(NULL,NULL,0);
+SELECT * FROM t_multi_keys_list_null PARTITION(p4) ORDER BY a,b;
+INSERT INTO t_multi_keys_list_null VALUES(4,4,4);
+
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a IS NULL;
+SELECT a FROM t_multi_keys_list_null WHERE a IS NULL;
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a = 0;
+SELECT a FROM t_multi_keys_list_null WHERE a = 0;
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_null WHERE b IS NULL;
+SELECT b FROM t_multi_keys_list_null WHERE b IS NULL;
+EXPLAIN (costs false)
+SELECT b FROM t_multi_keys_list_null WHERE b = 1;
+SELECT b FROM t_multi_keys_list_null WHERE b = 1;
+EXPLAIN (costs false)
+SELECT a FROM t_multi_keys_list_null WHERE a = 4;
+SELECT a FROM t_multi_keys_list_null WHERE a = 4;
+EXPLAIN (costs false)
+SELECT a,b FROM t_multi_keys_list_null WHERE a < 1 ORDER BY 1,2;
+SELECT a,b FROM t_multi_keys_list_null WHERE a < 1 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (0,1);
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (0,1);
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (NULL,0);
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) = (NULL,0);
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a IS NULL AND b = 0;
+SELECT * FROM t_multi_keys_list_null WHERE a IS NULL AND b = 0;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a = 0 OR b = 0 ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_null WHERE a = 0 OR b = 0 ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a IN (2,0) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_null WHERE a IN (2,0) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) IN ((1,1), (3,2)) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) IN ((1,1), (3,2)) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) =ANY(ARRAY[(2,1), (3,2)]) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_null WHERE (a,b) =ANY(ARRAY[(2,1), (3,2)]) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM t_multi_keys_list_null WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+SELECT * FROM t_multi_keys_list_null WHERE a =ANY(ARRAY[2, 3]) ORDER BY 1,2;
+PREPARE part_bdb_stmt(int) as SELECT a,b FROM t_multi_keys_list_null WHERE b = $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt(3);
+EXECUTE part_bdb_stmt(1);
+EXECUTE part_bdb_stmt(2);
+PREPARE part_bdb_stmt1(int) as SELECT a,b FROM t_multi_keys_list_null WHERE a != $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt1(3);
+EXECUTE part_bdb_stmt1(1);
+EXECUTE part_bdb_stmt1(2);
+PREPARE part_bdb_stmt2(int) as SELECT a,b FROM t_multi_keys_list_null WHERE a >= $1 ORDER BY 1,2;
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt2(3);
+EXECUTE part_bdb_stmt2(1);
+EXECUTE part_bdb_stmt2(2);
+PREPARE part_bdb_stmt3(int, int) as SELECT a,b FROM t_multi_keys_list_null WHERE (a,b) = ($1,$2);
+EXPLAIN (costs false)
+EXECUTE part_bdb_stmt3(0,1);
+EXECUTE part_bdb_stmt3(0,1);
+EXECUTE part_bdb_stmt3(3,2);
+
+UPDATE t_multi_keys_list_null SET a=2, b=1 where a=1 and b=1;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+UPDATE t_multi_keys_list_null SET a=NULL, b=0 where a=1 and b=2;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_null PARTITION(p3, p4) where b = 0;
+DELETE t_multi_keys_list_null PARTITION(p3, p4) where b = 0;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+EXPLAIN (costs false)
+DELETE t_multi_keys_list_null PARTITION FOR(0,NULL);
+DELETE t_multi_keys_list_null PARTITION FOR(0,NULL);
+SELECT * FROM t_multi_keys_list_null PARTITION(p1) ORDER BY a,b;
+-- alter table partition
+CREATE INDEX test_multi_list_key_gi on t_multi_keys_list_null(c);
+CREATE TABLESPACE part_bdb_temp_tbspc RELATIVE LOCATION 'tablespace/part_bdb_temp_tbspc';
+ALTER TABLE t_multi_keys_list_null MOVE PARTITION FOR(0,NULL) TABLESPACE part_bdb_temp_tbspc;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+
+CREATE TABLE t_alter_partition_temp (a int, b int, c int);
+INSERT INTO t_alter_partition_temp VALUES(NULL,0,1);
+INSERT INTO t_alter_partition_temp VALUES(2,1,2);
+CREATE INDEX t_alter_partition_temp_idx_l ON t_alter_partition_temp(a,b,c);
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b,c;
+ALTER TABLE t_multi_keys_list_null EXCHANGE PARTITION (p3) WITH TABLE t_alter_partition_temp UPDATE GLOBAL INDEX;
+SELECT * FROM t_alter_partition_temp ORDER BY a,b,c;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b,c;
+DROP TABLE IF EXISTS t_alter_partition_temp;
+
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES (1);
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((2,1));
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((2,1,1));
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((4,NULL),(2,2),(4,NULL));
+ALTER TABLE t_multi_keys_list_null ADD PARTITION p5 VALUES ((4,4));
+INSERT INTO t_multi_keys_list_null VALUES(4,4,4);
+SELECT * FROM t_multi_keys_list_null PARTITION(p5) ORDER BY a,b;
+
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1);
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1,5,8);
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (1,5);
+ALTER TABLE t_multi_keys_list_null DROP PARTITION FOR (2,1) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_null PARTITION(p3) ORDER BY a,b;
+ALTER TABLE t_multi_keys_list_null TRUNCATE PARTITION FOR (4,4) UPDATE GLOBAL INDEX;
+SELECT * FROM t_multi_keys_list_null PARTITION(p5) ORDER BY a,b;
+ALTER TABLE t_multi_keys_list_null RENAME PARTITION FOR (0,NULL) TO p0;
+SELECT pg_get_tabledef('t_multi_keys_list_null'::regclass);
+
+-- test views
+SELECT table_name,partitioning_type,partition_count,partitioning_key_count,subpartitioning_type FROM MY_PART_TABLES ORDER BY 1;
+SELECT table_name,partition_name,high_value,subpartition_count FROM MY_TAB_PARTITIONS ORDER BY 1,2;
+SELECT table_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_TAB_SUBPARTITIONS ORDER BY 1,2,3;
+SELECT table_name,index_name,partition_count,partitioning_key_count,partitioning_type,subpartitioning_type FROM MY_PART_INDEXES ORDER BY 1,2;
+SELECT index_name,partition_name,high_value,high_value_length FROM MY_IND_PARTITIONS ORDER BY 1,2;
+SELECT index_name,partition_name,subpartition_name,high_value,high_value_length FROM MY_IND_SUBPARTITIONS ORDER BY 1,2,3;
+
+DROP TABLE IF EXISTS t_multi_keys_list_null;
+DROP TABLESPACE part_bdb_temp_tbspc;
+
+-- test subpart
+CREATE TABLE t_keys_range_list (a int, b int, c int)
+PARTITION BY RANGE COLUMNS(a) SUBPARTITION BY LIST (b,c)
+(
+ PARTITION p1 VALUES LESS THAN (100) (
+ SUBPARTITION sbp11 VALUES ((1,1)),
+ SUBPARTITION sbp12 VALUES ((1,2)),
+ ),
+ PARTITION p2 VALUES LESS THAN (200) (
+ SUBPARTITION sbp21 VALUES ((2,1)),
+ SUBPARTITION sbp22 VALUES ((2,2)),
+ ),
+ PARTITION p5 VALUES LESS THAN (MAXVALUE) (
+ SUBPARTITION sbp31 VALUES ((3,1)),
+ SUBPARTITION sbp32 VALUES ((3,2)),
+ )
+);
+\d+ t_keys_range_list
+SELECT pg_get_tabledef('t_keys_range_list'::regclass);
+DROP TABLE IF EXISTS t_keys_range_list;
+
+-- MAXVALUE in subpartiton
+CREATE TABLE range_011
+(
+co1 SMALLINT
+,co2 INTEGER
+,co3 BIGINT
+)
+PARTITION BY range COLUMNS(co1) PARTITIONS 3 SUBPARTITION BY range (co2)
+(
+PARTITION p_range_1 values less than (10)
+(
+SUBPARTITION p_range_1_1 values less than ( 20 ),
+SUBPARTITION p_range_1_2 values less than ( 100 ),
+SUBPARTITION p_range_1_5 values less than (MAXVALUE)
+),
+PARTITION p_range_2 values less than (20)
+(
+SUBPARTITION p_range_2_1 values less than ( 20 ),
+SUBPARTITION p_range_2_2 values less than ( 100 ),
+SUBPARTITION p_range_2_5 values less than (MAXVALUE)
+),
+PARTITION p_range_3 values less than MAXVALUE
+(
+SUBPARTITION p_range_3_1 values less than ( 20 ),
+SUBPARTITION p_range_3_2 values less than ( 100 ),
+SUBPARTITION p_range_3_5 values less than (MAXVALUE)
+)) ENABLE ROW MOVEMENT;
+DROP TABLE range_011;
+CREATE TABLE range_011
+(
+co1 SMALLINT
+,co2 INTEGER
+,co3 BIGINT
+)
+PARTITION BY range COLUMNS(co1) PARTITIONS 3 SUBPARTITION BY range (co2)
+(
+PARTITION p_range_1 values less than (10)
+(
+SUBPARTITION p_range_1_1 values less than ( 20 ),
+SUBPARTITION p_range_1_2 values less than ( 100 ),
+SUBPARTITION p_range_1_5 values less than (MAXVALUE)
+),
+PARTITION p_range_2 values less than (20)
+(
+SUBPARTITION p_range_2_1 values less than ( 20 ),
+SUBPARTITION p_range_2_2 values less than ( 100 ),
+SUBPARTITION p_range_2_5 values less than (MAXVALUE)
+),
+PARTITION p_range_3 values less than MAXVALUE
+(
+SUBPARTITION p_range_3_1 values less than ( 20 ),
+SUBPARTITION p_range_3_2 values less than ( 100 ),
+SUBPARTITION p_range_3_5 values less than MAXVALUE
+)) ENABLE ROW MOVEMENT; -- ERROR
+
+-- END
+DROP SCHEMA partition_b_db_schema CASCADE;
+\c regression
+drop database part_bdb;
\ No newline at end of file
diff --git a/src/test/regress/sql/hw_partition_list_ddl.sql b/src/test/regress/sql/hw_partition_list_ddl.sql
index d7f014e6c..ed9648942 100644
--- a/src/test/regress/sql/hw_partition_list_ddl.sql
+++ b/src/test/regress/sql/hw_partition_list_ddl.sql
@@ -250,4 +250,28 @@ partition p81 values ( 81 ),
partition p82 values ( 82 )
);
drop table test_list;
+
+create table test_listkey_datatype
+(
+col_2 INT2,
+col_3 INT4,
+col_4 INT4,
+col_5 INT4,
+col_6 INT4,
+col_32 NUMERIC,
+col_33 VARCHAR(10),
+col_34 CHAR,
+col_35 BPCHAR,
+col_36 TIMESTAMP WITHOUT TIME ZONE,
+col_37 DATE
+) partition by list(col_5,col_4,col_6,col_37)
+(
+ partition p1 values ((2,1,2,'2022-02-03'),(6,3,6,'2022-02-07')),
+ partition p2 values ((5,4,5,'2022-02-08')),
+ partition p3 values ((7,6,7,'2022-02-09')),
+ partition p7 values (default)
+);
+insert into test_listkey_datatype(col_5,col_4,col_6,col_37) values(6,3,6,'2022-02-07');
+select col_5 from test_listkey_datatype partition (p1);
+drop table test_listkey_datatype;
drop schema FVT_COMPRESS_QWER cascade;
diff --git a/src/test/regress/sql/hw_partition_parallel.sql b/src/test/regress/sql/hw_partition_parallel.sql
new file mode 100644
index 000000000..9421bc74c
--- /dev/null
+++ b/src/test/regress/sql/hw_partition_parallel.sql
@@ -0,0 +1,392 @@
+-- prepare
+DROP SCHEMA partition_parallel CASCADE;
+CREATE SCHEMA partition_parallel;
+SET CURRENT_SCHEMA TO partition_parallel;
+
+--
+----range table----
+--
+--prepare
+CREATE TABLE range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (time_id)
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+INSERT INTO range_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX range_sales_idx1 ON range_sales(channel_id) LOCAL;
+CREATE INDEX range_sales_idx2 ON range_sales(customer_id) GLOBAL;
+
+--create a temp table to exchange
+CREATE TABLE range_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON range_temp(channel_id);
+INSERT INTO range_temp SELECT * FROM range_sales WHERE time_id < '2009-01-01';
+
+--drop
+\parallel on
+ALTER TABLE range_sales DROP PARTITION time_2008 UPDATE GLOBAL INDEX;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2010);
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+
+--split
+ALTER TABLE range_sales RENAME PARTITION time_2009 TO time_2009_temp;
+\parallel on
+ALTER TABLE range_sales SPLIT PARTITION time_2009_temp AT ('2009-01-01')
+ INTO (PARTITION time_2008, PARTITION time_2009) UPDATE GLOBAL INDEX;
+UPDATE range_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM range_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+
+--truncate
+\parallel on
+ALTER TABLE range_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx1)*/ COUNT(channel_id) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx2)*/ COUNT(customer_id) FROM range_sales;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+
+--exchange
+\parallel on
+ALTER TABLE range_sales EXCHANGE PARTITION (time_2008) WITH TABLE range_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_sales)*/ COUNT(*) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx1)*/ COUNT(channel_id) FROM range_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(range_sales range_sales_idx2)*/ COUNT(customer_id) FROM range_sales;
+UPDATE range_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM range_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+
+--merge
+\parallel on
+ALTER TABLE range_sales MERGE PARTITIONS time_2008, time_2009 INTO PARTITION time_2009 UPDATE GLOBAL INDEX;
+UPDATE range_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO range_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM range_sales WHERE channel_id = 'X';
+
+--finish
+DROP TABLE range_sales;
+DROP TABLE range_temp;
+
+--
+----list table----
+--
+--prepare
+CREATE TABLE list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY LIST (channel_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2'),
+ PARTITION channel2 VALUES ('3', '4', '5'),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+);
+INSERT INTO list_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX list_sales_idx1 ON list_sales(channel_id) LOCAL;
+CREATE INDEX list_sales_idx2 ON list_sales(type_id) GLOBAL;
+
+--create a temp table to exchange
+CREATE TABLE list_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON list_temp(channel_id);
+INSERT INTO list_temp SELECT * FROM list_sales WHERE channel_id in ('0', '1', '2');
+
+--drop
+\parallel on
+ALTER TABLE list_sales DROP PARTITION channel1 UPDATE GLOBAL INDEX;
+UPDATE list_sales SET type_id = -1 WHERE channel_id = '6';
+INSERT INTO list_sales VALUES(1,1,'2011-06-01', '8',-1,1,1);
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel3);
+\parallel off
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+
+--add
+\parallel on
+ALTER TABLE list_sales ADD PARTITION channel1 VALUES ('0', '1', '2');
+UPDATE list_sales PARTITION (channel3) SET type_id = 1 WHERE type_id = -1;
+DELETE FROM list_sales WHERE type_id = -1 AND channel_id in ('8', '9');
+\parallel off
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+
+--truncate
+\parallel on
+ALTER TABLE list_sales TRUNCATE PARTITION channel1 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx1)*/ COUNT(channel_id) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx2)*/ COUNT(type_id) FROM list_sales;
+UPDATE list_sales SET type_id = -1 WHERE channel_id = '6';
+INSERT INTO list_sales VALUES(1,1,'2011-06-01', '8',-1,1,1);
+\parallel off
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+
+--exchange
+\parallel on
+ALTER TABLE list_sales EXCHANGE PARTITION (channel1) WITH TABLE list_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(list_sales)*/ COUNT(*) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx1)*/ COUNT(channel_id) FROM list_sales PARTITION (channel1);
+SELECT /*+ indexonlyscan(list_sales list_sales_idx2)*/ COUNT(type_id) FROM list_sales;
+UPDATE list_sales PARTITION (channel3) SET type_id = 1 WHERE type_id = -1;
+DELETE FROM list_sales WHERE type_id = -1 AND channel_id in ('8', '9');
+\parallel off
+SELECT COUNT(*) FROM list_sales WHERE type_id = -1;
+
+--finish
+DROP TABLE list_sales;
+DROP TABLE list_temp;
+
+--
+----interval table----
+--
+--prepare
+CREATE TABLE interval_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (time_id) INTERVAL ('1 year')
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+INSERT INTO interval_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX interval_sales_idx1 ON interval_sales(channel_id) LOCAL;
+CREATE INDEX interval_sales_idx2 ON interval_sales(customer_id) GLOBAL;
+
+--create a temp table to exchange
+CREATE TABLE interval_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON interval_temp(channel_id);
+INSERT INTO interval_temp SELECT * FROM interval_sales WHERE time_id < '2009-01-01';
+
+--drop
+\parallel on
+ALTER TABLE interval_sales DROP PARTITION time_2008 UPDATE GLOBAL INDEX;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2010);
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--split
+ALTER TABLE interval_sales RENAME PARTITION time_2009 TO time_2009_temp;
+\parallel on
+ALTER TABLE interval_sales SPLIT PARTITION time_2009_temp AT ('2009-01-01')
+ INTO (PARTITION time_2008, PARTITION time_2009) UPDATE GLOBAL INDEX;
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--truncate
+\parallel on
+ALTER TABLE interval_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx1)*/ COUNT(channel_id) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx2)*/ COUNT(customer_id) FROM interval_sales;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--exchange
+\parallel on
+ALTER TABLE interval_sales EXCHANGE PARTITION (time_2008) WITH TABLE interval_temp UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(interval_sales)*/ COUNT(*) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx1)*/ COUNT(channel_id) FROM interval_sales PARTITION (time_2008);
+SELECT /*+ indexonlyscan(interval_sales interval_sales_idx2)*/ COUNT(customer_id) FROM interval_sales;
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--merge
+\parallel on
+ALTER TABLE interval_sales MERGE PARTITIONS time_2008, time_2009 INTO PARTITION time_2009 UPDATE GLOBAL INDEX;
+UPDATE interval_sales SET channel_id = 'X' WHERE time_id = '2010-06-01';
+INSERT INTO interval_sales VALUES(1,1,'2011-06-01', 'X',1,1,1);
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--insert
+\parallel on
+INSERT INTO interval_sales VALUES (1,1,'2017-06-01','1',1,1);
+INSERT INTO interval_sales VALUES (1,1,'2017-07-01','1',1,1);
+INSERT INTO interval_sales VALUES (1,1,'2018-06-01','1',1,1);
+UPDATE interval_sales PARTITION (time_2010) SET channel_id = '0' WHERE channel_id = 'X';
+DELETE FROM interval_sales WHERE channel_id = 'X' AND time_id >= '2011-01-01';
+\parallel off
+SELECT COUNT(*) FROM interval_sales WHERE channel_id = 'X';
+
+--finish
+DROP TABLE interval_sales;
+DROP TABLE interval_temp;
+
+--
+----range-list table----
+--
+--prepare
+CREATE TABLE range_list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE)
+PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id)
+(
+ PARTITION customer1 VALUES LESS THAN (200)
+ (
+ SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'),
+ SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'),
+ SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'),
+ SUBPARTITION customer1_channel4 VALUES (DEFAULT)
+ ),
+ PARTITION customer2 VALUES LESS THAN (500)
+ (
+ SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'),
+ SUBPARTITION customer2_channel2 VALUES (DEFAULT)
+ ),
+ PARTITION customer3 VALUES LESS THAN (800),
+ PARTITION customer4 VALUES LESS THAN (1200)
+ (
+ SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
+ )
+);
+INSERT INTO range_list_sales SELECT generate_series(1,1000),
+ generate_series(1,1000),
+ date_pli('2008-01-01', generate_series(1,1000)),
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%10,
+ generate_series(1,1000)%1000,
+ generate_series(1,1000);
+CREATE INDEX range_list_sales_idx1 ON range_list_sales(customer_id) LOCAL;
+CREATE INDEX range_list_sales_idx2 ON range_list_sales(channel_id) GLOBAL;
+
+--create a temp table to exchange
+CREATE TABLE range_list_temp
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 NOT NULL,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+) WITH (STORAGE_TYPE=USTORE);
+CREATE INDEX ON range_list_temp(customer_id);
+INSERT INTO range_list_temp SELECT * FROM range_list_sales WHERE customer_id < 200 AND channel_id in ('0', '1', '2');
+
+--drop
+\parallel on
+ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1 UPDATE GLOBAL INDEX;
+UPDATE range_list_sales SET type_id = -1 WHERE customer_id = 700;
+INSERT INTO range_list_sales VALUES(1,1000,'2011-06-01', '1',-1,1,1);
+SELECT /*+ tablescan(range_list_sales)*/ COUNT(*) FROM range_list_sales SUBPARTITION (customer1_channel4);
+\parallel off
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+
+--split
+\parallel on
+ALTER TABLE range_list_sales SPLIT SUBPARTITION customer1_channel4 VALUES ('0', '1', '2')
+ INTO (SUBPARTITION customer1_channel1, SUBPARTITION customer1_channel4_temp) UPDATE GLOBAL INDEX;
+UPDATE range_list_sales PARTITION (customer3) SET type_id = 1 WHERE customer_id = 700;
+DELETE FROM range_list_sales WHERE type_id = -1 AND customer_id >= 800;
+\parallel off
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+
+--truncate
+\parallel on
+ALTER TABLE range_list_sales TRUNCATE PARTITION customer1 UPDATE GLOBAL INDEX;
+SELECT /*+ tablescan(range_list_sales)*/ COUNT(*) FROM range_list_sales SUBPARTITION (customer1_channel2);
+SELECT /*+ indexonlyscan(range_list_sales range_list_sales_idx1)*/ COUNT(customer_id) FROM range_list_sales SUBPARTITION (customer1_channel3);
+SELECT /*+ indexonlyscan(range_list_sales range_list_sales_idx2)*/ COUNT(channel_id) FROM range_list_sales;
+UPDATE range_list_sales SET type_id = -1 WHERE customer_id = 700;
+INSERT INTO range_list_sales VALUES(1,1000,'2011-06-01', '1',-1,1,1);
+\parallel off
+SELECT COUNT(*) FROM range_list_sales WHERE type_id = -1;
+
+--finish
+DROP TABLE range_list_sales;
+DROP TABLE range_list_temp;
+
+-- clean
+DROP SCHEMA partition_parallel CASCADE;
diff --git a/src/test/regress/sql/hw_partitionno.sql b/src/test/regress/sql/hw_partitionno.sql
new file mode 100644
index 000000000..f7ec666b9
--- /dev/null
+++ b/src/test/regress/sql/hw_partitionno.sql
@@ -0,0 +1,251 @@
+-- prepare
+DROP SCHEMA partitionno CASCADE;
+CREATE SCHEMA partitionno;
+SET CURRENT_SCHEMA TO partitionno;
+
+PREPARE partition_get_partitionno AS
+SELECT relname, partitionno, subpartitionno, boundaries
+FROM pg_partition
+WHERE parentid = (
+ SELECT c.oid
+ FROM pg_class c
+ JOIN pg_namespace n ON c.relnamespace = n.oid
+ WHERE c.relname = $1
+ AND n.nspname = CURRENT_SCHEMA
+)
+ORDER BY relname;
+
+PREPARE subpartition_get_partitionno AS
+WITH partition_oid AS (
+ SELECT oid
+ FROM pg_partition
+ WHERE parentid = (
+ SELECT c.oid
+ FROM pg_class c
+ JOIN pg_namespace n ON c.relnamespace = n.oid
+ WHERE c.relname = $1
+ AND n.nspname = CURRENT_SCHEMA
+ )
+ )
+SELECT relname, partitionno, subpartitionno, boundaries
+FROM pg_partition p
+ JOIN partition_oid part
+ ON p.oid = part.oid OR p.parentid = part.oid
+ORDER BY relname;
+
+--
+-- 1. test for range partition
+--
+CREATE TABLE range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY RANGE (time_id)
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01'),
+ PARTITION time_2011 VALUES LESS THAN ('2012-01-01')
+);
+CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL;
+CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL;
+EXECUTE partition_get_partitionno('range_sales');
+
+-- add/drop partition
+ALTER TABLE range_sales ADD PARTITION time_default VALUES LESS THAN (MAXVALUE);
+ALTER TABLE range_sales DROP PARTITION time_2008;
+EXECUTE partition_get_partitionno('range_sales');
+
+-- merge/split partition
+ALTER TABLE range_sales SPLIT PARTITION time_default AT ('2013-01-01') INTO (PARTITION time_2012, PARTITION time_default_temp);
+ALTER TABLE range_sales RENAME PARTITION time_default_temp TO time_default;
+ALTER TABLE range_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('range_sales');
+
+-- truncate partition with gpi
+ALTER TABLE range_sales TRUNCATE PARTITION time_2011 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('range_sales');
+
+-- vacuum full
+VACUUM FULL range_sales;
+EXECUTE partition_get_partitionno('range_sales');
+
+--reset
+ALTER TABLE range_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('range_sales');
+
+DROP TABLE range_sales;
+
+--
+-- 2. test for interval partition
+--
+CREATE TABLE interval_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY RANGE (time_id) INTERVAL ('1 year')
+(
+ PARTITION time_2008 VALUES LESS THAN ('2009-01-01'),
+ PARTITION time_2009 VALUES LESS THAN ('2010-01-01'),
+ PARTITION time_2010 VALUES LESS THAN ('2011-01-01')
+);
+CREATE INDEX interval_sales_idx1 ON interval_sales(product_id) LOCAL;
+CREATE INDEX interval_sales_idx2 ON interval_sales(time_id) GLOBAL;
+
+EXECUTE partition_get_partitionno('interval_sales');
+
+-- add/drop partition
+INSERT INTO interval_sales VALUES (1,1,'2013-01-01','A',1,1,1);
+INSERT INTO interval_sales VALUES (2,2,'2012-01-01','B',2,2,2);
+ALTER TABLE interval_sales DROP PARTITION time_2008;
+EXECUTE partition_get_partitionno('interval_sales');
+
+-- merge/split partition
+ALTER TABLE interval_sales SPLIT PARTITION time_2009 AT ('2009-01-01') INTO (PARTITION time_2008, PARTITION time_2009_temp);
+ALTER TABLE interval_sales RENAME PARTITION time_2009_temp TO time_2009;
+ALTER TABLE interval_sales MERGE PARTITIONS time_2009, time_2010 INTO PARTITION time_2010_old UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('interval_sales');
+
+-- truncate partition with gpi
+ALTER TABLE interval_sales TRUNCATE PARTITION time_2008 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('interval_sales');
+
+-- vacuum full
+VACUUM FULL interval_sales;
+EXECUTE partition_get_partitionno('interval_sales');
+
+--reset
+ALTER TABLE interval_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('interval_sales');
+
+DROP TABLE interval_sales;
+
+--
+-- 3. test for list partition
+--
+CREATE TABLE list_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY LIST (channel_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2'),
+ PARTITION channel2 VALUES ('3', '4', '5'),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+);
+CREATE INDEX list_sales_idx1 ON list_sales(product_id) LOCAL;
+CREATE INDEX list_sales_idx2 ON list_sales(time_id) GLOBAL;
+
+EXECUTE partition_get_partitionno('list_sales');
+
+-- add/drop partition
+ALTER TABLE list_sales ADD PARTITION channel_default VALUES (DEFAULT);
+ALTER TABLE list_sales DROP PARTITION channel4;
+EXECUTE partition_get_partitionno('list_sales');
+
+-- truncate partition with gpi
+ALTER TABLE list_sales TRUNCATE PARTITION channel2 UPDATE GLOBAL INDEX;
+EXECUTE partition_get_partitionno('list_sales');
+
+-- vacuum full
+VACUUM FULL list_sales;
+EXECUTE partition_get_partitionno('list_sales');
+
+--reset
+ALTER TABLE list_sales RESET PARTITION;
+EXECUTE partition_get_partitionno('list_sales');
+
+DROP TABLE list_sales;
+
+--
+-- 4. test for list-range partition
+--
+CREATE TABLE list_range_sales
+(
+ product_id INT4 NOT NULL,
+ customer_id INT4 PRIMARY KEY,
+ time_id DATE,
+ channel_id CHAR(1),
+ type_id INT4,
+ quantity_sold NUMERIC(3),
+ amount_sold NUMERIC(10,2)
+)
+PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id)
+(
+ PARTITION channel1 VALUES ('0', '1', '2')
+ (
+ SUBPARTITION channel1_customer1 VALUES LESS THAN (200),
+ SUBPARTITION channel1_customer2 VALUES LESS THAN (500),
+ SUBPARTITION channel1_customer3 VALUES LESS THAN (800),
+ SUBPARTITION channel1_customer4 VALUES LESS THAN (1200)
+ ),
+ PARTITION channel2 VALUES ('3', '4', '5')
+ (
+ SUBPARTITION channel2_customer1 VALUES LESS THAN (500),
+ SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE)
+ ),
+ PARTITION channel3 VALUES ('6', '7'),
+ PARTITION channel4 VALUES ('8', '9')
+ (
+ SUBPARTITION channel4_customer1 VALUES LESS THAN (1200)
+ )
+);
+CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) GLOBAL;
+
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+-- add/drop partition
+ALTER TABLE list_range_sales DROP PARTITION channel3;
+ALTER TABLE list_range_sales ADD PARTITION channel3 VALUES ('6', '7')
+(
+ SUBPARTITION channel3_customer1 VALUES LESS THAN (200),
+ SUBPARTITION channel3_customer2 VALUES LESS THAN (500),
+ SUBPARTITION channel3_customer3 VALUES LESS THAN (800)
+);
+ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES (DEFAULT);
+ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_customer2 VALUES LESS THAN (2000);
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+-- merge/split partition
+ALTER TABLE list_range_sales SPLIT SUBPARTITION channel2_customer2 AT (800) INTO (SUBPARTITION channel2_customer3, SUBPARTITION channel2_customer4);
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+-- truncate partition with gpi
+ALTER TABLE list_range_sales TRUNCATE PARTITION channel1 UPDATE GLOBAL INDEX;
+ALTER TABLE list_range_sales TRUNCATE SUBPARTITION channel4_customer1 UPDATE GLOBAL INDEX;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+-- vacuum full
+VACUUM FULL list_range_sales;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+--reset
+ALTER TABLE list_range_sales RESET PARTITION;
+EXECUTE subpartition_get_partitionno('list_range_sales');
+
+DROP TABLE list_range_sales;
+
+-- clean
+DEALLOCATE partition_get_partitionno;
+DEALLOCATE subpartition_get_partitionno;
+
+DROP SCHEMA partitionno CASCADE;
diff --git a/src/test/regress/sql/hw_subpartition_createtable.sql b/src/test/regress/sql/hw_subpartition_createtable.sql
index cd72ff03a..a3193db87 100644
--- a/src/test/regress/sql/hw_subpartition_createtable.sql
+++ b/src/test/regress/sql/hw_subpartition_createtable.sql
@@ -815,29 +815,6 @@ drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-
--二级分区的键值一样
CREATE TABLE list_list
@@ -1567,6 +1544,290 @@ PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code)
create table t1(like range_list including partition);
drop table range_list;
+-- test the key of partition and subpartition is same column
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '201902' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+insert into list_list values('201902', '1', '1', 1);
+insert into list_list values('201903', '2', '1', 1);
+insert into list_list values('2', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM list_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_list WHERE month_code = '201902' ORDER BY 1,2;
+drop table list_list;
+
+CREATE TABLE list_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into list_hash values('201902', '1', '1', 1);
+insert into list_hash values('201902', '2', '1', 1);
+insert into list_hash values('201903', '5', '1', 1);
+insert into list_hash values('201903', '6', '1', 1);
+SELECT * FROM list_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM list_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table list_hash;
+
+CREATE TABLE list_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a values less than ('1'),
+ SUBPARTITION p_201901_b values less than ('2')
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a values less than ('3'),
+ SUBPARTITION p_201902_b values less than ('4')
+ )
+);
+insert into list_range values('201902', '1', '1', 1);
+insert into list_range values('201903', '2', '1', 1);
+SELECT * FROM list_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_range SUBPARTITION FOR ('201903','201903') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_range WHERE month_code = '201903' ORDER BY 1,2;
+drop table list_range;
+
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('201901'),
+ SUBPARTITION p_201901_b values ('201902')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('201903'),
+ SUBPARTITION p_201902_b values ('201904')
+ )
+);
+insert into range_list values('201901', '1', '1', 1);
+insert into range_list values('201902', '2', '1', 1);
+insert into range_list values('201903', '1', '1', 1);
+insert into range_list values('201904', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM range_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_list WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_list;
+
+CREATE TABLE range_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into range_hash values('201901', '1', '1', 1);
+insert into range_hash values('201902', '2', '1', 1);
+insert into range_hash values('201903', '2', '1', 1);
+insert into range_hash values('20190322', '1', '1', 1);
+SELECT * FROM range_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM range_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_hash;
+
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '20190220' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '20190230' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '20190320' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '20190330' )
+ )
+);
+insert into range_range values('201902', '1', '1', 1);
+insert into range_range values('20190222', '2', '1', 1);
+insert into range_range values('201903', '2', '1', 1);
+insert into range_range values('20190322', '1', '1', 1);
+insert into range_range values('20190333', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM range_range SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_range WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_range;
+
+CREATE TABLE hash_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES ( '201901' ),
+ SUBPARTITION p_201901_b VALUES ( '201902' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES ( '201901' ),
+ SUBPARTITION p_201902_b VALUES ( '201902' )
+ )
+);
+insert into hash_list values('201901', '1', '1', 1);
+insert into hash_list values('201901', '2', '1', 1);
+insert into hash_list values('201902', '1', '1', 1);
+insert into hash_list values('201902', '2', '1', 1);
+insert into hash_list values('201903', '2', '1', 1);
+SELECT * FROM hash_list SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201901_b) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201902_a) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201902_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_list WHERE month_code = '201901' ORDER BY 1,2;
+drop table hash_list;
+
+CREATE TABLE hash_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY hash (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into hash_hash values('201901', '1', '1', 1);
+insert into hash_hash values('201901', '2', '1', 1);
+insert into hash_hash values('201903', '1', '1', 1);
+insert into hash_hash values('201903', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM hash_hash SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table hash_hash;
+
+CREATE TABLE hash_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+)
+PARTITION BY hash (month_code) SUBPARTITION BY range (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN ( '201903' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN ( '201903' )
+ )
+);
+insert into hash_range values('201901', '1', '1', 1);
+insert into hash_range values('201901', '2', '1', 1);
+insert into hash_range values('201902', '1', '1', 1);
+insert into hash_range values('201902', '2', '1', 1);
+insert into hash_range values('201903', '2', '1', 1);
+SELECT * FROM hash_range SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201901_b) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201902_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_range SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_range WHERE month_code = '201902' ORDER BY 1,2;
+drop table hash_range;
+
--clean
DROP SCHEMA subpartition_createtable CASCADE;
diff --git a/src/test/regress/sql/hw_subpartition_split.sql b/src/test/regress/sql/hw_subpartition_split.sql
index dd950d1b7..9cfb64467 100644
--- a/src/test/regress/sql/hw_subpartition_split.sql
+++ b/src/test/regress/sql/hw_subpartition_split.sql
@@ -75,6 +75,11 @@ alter table list_list split subpartition p_201902_a values (3) into
subpartition p_201902_ab,
subpartition p_201902_ac
);
+alter table list_list split subpartition p_201902_c values ((2,3),(3,4)) into
+(
+ subpartition p_201902_c,
+ subpartition p_201902_d
+);
drop table list_list;
diff --git a/src/test/regress/sql/merge_where_col.sql b/src/test/regress/sql/merge_where_col.sql
index e573757c9..0b58437f1 100644
--- a/src/test/regress/sql/merge_where_col.sql
+++ b/src/test/regress/sql/merge_where_col.sql
@@ -83,5 +83,25 @@ USING tb_a at
SELECT * FROM tb_b ORDER BY 1;
ROLLBACK;
+create table col_com_base_1(
+col_int integer,
+col_double double precision,
+col_date date
+);
+
+create table col_com_base_2(
+col_int integer,
+col_double double precision,
+col_date date
+);
+
+MERGE INTO col_com_base_1 Table_004 USING col_com_base_2 Table_003
+ ON ( Table_003.col_double = Table_004.col_double )
+WHEN MATCHED THEN UPDATE SET col_date = col_date
+WHERE Table_004.col_int = ( select SUM(Table_004.col_int) from col_com_base_1);
+
+UPDATE col_com_base_1 Table_004 SET col_int = 2 where Table_004.col_int = ( select SUM(Table_004.col_int) from col_com_base_1);
+UPDATE col_com_base_1 Table_004 SET col_int = 2 where Table_004.col_int = ( select SUM(col_int) from col_com_base_1);
+
-- clean up
DROP SCHEMA merge_where_col CASCADE;
diff --git a/src/test/regress/sql/mysql_delimiter.sql b/src/test/regress/sql/mysql_delimiter.sql
index 0ab1459d3..7cf165287 100644
--- a/src/test/regress/sql/mysql_delimiter.sql
+++ b/src/test/regress/sql/mysql_delimiter.sql
@@ -19,7 +19,12 @@ select 1//
delimiter ;//
--Test delimiter length
-delimiter aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
+delimiter ""
+delimiter ''
+delimiter aaaaaaaaaaaaaaaa
+delimiter "aaaaaaaaaaaaaaaa"
+delimiter aaaaaaaaaaaaaaa
+delimiter ;
--Test delimiter %
delimiter %;
diff --git a/src/test/regress/sql/partition_pruning.sql b/src/test/regress/sql/partition_pruning.sql
new file mode 100644
index 000000000..4772021af
--- /dev/null
+++ b/src/test/regress/sql/partition_pruning.sql
@@ -0,0 +1,51 @@
+DROP SCHEMA partition_pruning;
+CREATE SCHEMA partition_pruning;
+SET CURRENT_SCHEMA TO partition_pruning;
+
+drop table test_range;
+create table test_range (a int, b int, c int) WITH (STORAGE_TYPE=USTORE)
+partition by range(a)
+(
+ partition p1 values less than (2000),
+ partition p2 values less than (3000),
+ partition p3 values less than (4000),
+ partition p4 values less than (5000),
+ partition p5 values less than (maxvalue)
+)ENABLE ROW MOVEMENT;
+
+insert into test_range values(1,1,1);
+insert into test_range values(3001,1,1);
+
+prepare p1 as select * from test_range where ctid = '(0,1)' and a = $1;
+explain (costs off)execute p1(1);
+execute p1(1);
+execute p1(3001);
+drop table test_range;
+
+drop table test_range_pt;
+create table test_range_pt (a int, b int, c int)
+partition by range(a)
+(
+ partition p1 values less than (2000),
+ partition p2 values less than (3000),
+ partition p3 values less than (4000),
+ partition p4 values less than (5000),
+ partition p5 values less than (maxvalue)
+)ENABLE ROW MOVEMENT;
+insert into test_range_pt values(1,1),(2001,2),(3001,3),(4001,4),(5001,5);
+
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 or a is null;
+explain (costs off)execute p1(2001);
+execute p1(2001);
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 or a = $2;
+explain (costs off)execute p1(2001,3001);
+execute p1(2001,3001);
+deallocate p1;
+prepare p1 as select * from test_range_pt where a = $1 and a = $2;
+explain (costs off)execute p1(2001,3001);
+execute p1(2001,3001);
+drop table test_range_pt;
+
+DROP SCHEMA partition_pruning;
diff --git a/src/test/regress/sql/plan_hint.sql b/src/test/regress/sql/plan_hint.sql
index 4e5813282..6fff38948 100644
--- a/src/test/regress/sql/plan_hint.sql
+++ b/src/test/regress/sql/plan_hint.sql
@@ -772,6 +772,98 @@ explain (costs off) select /*+ no broadcast(hint_vec) */ a from hint_vec where a
explain (costs off) select /*+ no redistribute(hint_vec) */ a from hint_vec where a = 10;
explain (costs off) select /*+ skew(hint_vec(a)) */ a from hint_vec where a = 10;
+explain (costs off) select /*+indexscan(''')*/ 1;
+explain (costs off) select /*+indexscan(""")*/ 1;
+explain (costs off) select /*+indexscan($$$)*/ 1;
+create table subpartition_hash_hash (
+ c1 int,
+ c2 int,
+ c3 text,
+ c4 varchar(20),
+ c5 int generated always as(2 * c1) stored
+) partition by hash(c1) subpartition by hash(c2) (
+ partition p1 (
+ subpartition p1_1,
+ subpartition p1_2,
+ subpartition p1_3,
+ subpartition p1_4,
+ subpartition p1_5
+ ),
+ partition p2 (
+ subpartition p2_1,
+ subpartition p2_2,
+ subpartition p2_3,
+ subpartition p2_4,
+ subpartition p2_5
+ ),
+ partition p3 (
+ subpartition p3_1,
+ subpartition p3_2,
+ subpartition p3_3,
+ subpartition p3_4,
+ subpartition p3_5
+ ),
+ partition p4 (
+ subpartition p4_1,
+ subpartition p4_2,
+ subpartition p4_3,
+ subpartition p4_4,
+ subpartition p4_5
+ ),
+ partition p5 (
+ subpartition p5_1,
+ subpartition p5_2,
+ subpartition p5_3,
+ subpartition p5_4,
+ subpartition p5_5
+ )
+);
+create index subpartition_hash_hash_i1 on subpartition_hash_hash(c1) local;
+create index subpartition_hash_hash_i2 on subpartition_hash_hash(c2) local;
+create index subpartition_hash_hash_i3 on subpartition_hash_hash(c3) local;
+create index subpartition_hash_hash_i4 on subpartition_hash_hash(c4) local;
+create index subpartition_hash_hash_i5 on subpartition_hash_hash(c5) local;
+create table partition_range (c1 int, c2 int, c3 text, c4 varchar(20)) with(orientation = column) partition by range(c1, c2) (
+ partition p1
+ values less than(10000, 10000),
+ partition p2
+ values less than(20000, 20000),
+ partition p3
+ values less than(30000, 30000),
+ partition p4
+ values less than(40000, 40000),
+ partition p5
+ values less than(50000, 50000),
+ partition p6
+ values less than(60000, 60000),
+ partition p7
+ values less than(70000, 70000),
+ partition p8
+ values less than(80000, 80000),
+ partition p9
+ values less than(90000, 90000),
+ partition p10
+ values less than(MAXVALUE, MAXVALUE)
+);
+create index partition_range_i1 on partition_range using btree(c1) local;
+create index partition_range_i2 on partition_range using psort(c2) local;
+create index partition_range_i3 on partition_range using btree(c3) local;
+create index partition_range_i4 on partition_range using btree(c4) local;
+
+explain (analyse,timing off,costs off) create table tb_create_merge_append6 as (
+ select
+ /*+ indexscan(subpartition_hash_hash subpartition_hash_hash_i1)*/
+ subpartition_hash_hash.c1 c1,
+ subpartition_hash_hash.c3 c2,
+ partition_range.c1 c3
+ from subpartition_hash_hash
+ join partition_range on subpartition_hash_hash.c2 = partition_range.c2
+ and subpartition_hash_hash.c1 > 8888
+ and subpartition_hash_hash.c1 < 88888
+ order by subpartition_hash_hash.c1
+ limit 100 offset 10
+);
+
drop view hint_view_1;
drop view hint_view_2;
drop view hint_view_3;
diff --git a/src/test/regress/sql/query_rewrite.sql b/src/test/regress/sql/query_rewrite.sql
index e8b7a2235..03589f6d7 100644
--- a/src/test/regress/sql/query_rewrite.sql
+++ b/src/test/regress/sql/query_rewrite.sql
@@ -170,5 +170,22 @@ create table customer(c_birth_month int);
-- Scenario:3
select 1 from customer where c_birth_month not in (with tmp1 as (select 1 from now()) select * from tmp1);
+--fix bug: Error hint: TableScan(seq_t0), relation name "seq_t0" is not found.
+drop table if exists seq_t0;
+drop table if exists seq_t1;
+create table seq_t0(a int, b int8 );
+create table seq_t1(a int, b int8 );
+explain (costs off) select /*+ tablescan(seq_t0) */ b from seq_t0 union all select /*+ tablescan(seq_t1) */ b from seq_t1;
+--test pulling up sublinks: in orclause.
+drop table if exists t1;
+drop table if exists t2;
+create table t1(c1 int, c2 int, c3 int);
+create table t2(c1 int, c2 int, c3 int);
+insert into t1 values(1,0),(2,0),(1,0),(2,1),(1,1),(1,0),(2,0),(1,0),(2,1),(1,1),(2,3),(2,1),(1,2);
+insert into t2 values(1,0,1),(2,0,2),(1,0,1),(2,1,1),(1,1,0),(1,0,1),(2,0,2),(1,0,1),(2,1,1),(1,1,0),(0,0,1);
+explain (verbose, costs off) select * from t2 where t2.c1 in (select t1.c1 from t1 group by t1.c1, t1.c2) or t2.c2 = 1;
+drop table if exists t1;
+drop table if exists t2;
+
drop schema query_rewrite cascade;
reset current_schema;
diff --git a/src/test/regress/sql/segment_subpartition_createtable.sql b/src/test/regress/sql/segment_subpartition_createtable.sql
index 794c9e904..6983afec6 100644
--- a/src/test/regress/sql/segment_subpartition_createtable.sql
+++ b/src/test/regress/sql/segment_subpartition_createtable.sql
@@ -815,29 +815,6 @@ drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-) WITH (SEGMENT=ON)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-
--二级分区的键值一样
CREATE TABLE list_list
@@ -1399,6 +1376,372 @@ PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2)
PARTITION p_hash_7
) ENABLE ROW MOVEMENT;
drop table hash_range;
+
+-- test create table like only support range_range in subpartition
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '2' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '3' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '2' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '3' )
+ )
+);
+
+create table t1(like range_range including partition);
+insert into t1 values('201902', '1', '1', 1);
+insert into t1 values('201902', '2', '1', 1);
+insert into t1 values('201902', '1', '1', 1);
+insert into t1 values('201903', '2', '1', 1);
+insert into t1 values('201903', '1', '1', 1);
+insert into t1 values('201903', '2', '1', 1);
+
+explain (costs off) select * from t1;
+select * from t1;
+drop table t1;
+drop table range_range;
+
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '1' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+create table t1(like list_list including partition);
+drop table list_list;
+
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('1'),
+ SUBPARTITION p_201901_b values ('2')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('1'),
+ SUBPARTITION p_201902_b values ('2')
+ )
+);
+create table t1(like range_list including partition);
+drop table range_list;
+
+-- test the key of partition and subpartition is same column
+CREATE TABLE list_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a VALUES ( '201902' ),
+ SUBPARTITION p_201901_b VALUES ( '2' )
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a VALUES ( '1' ),
+ SUBPARTITION p_201902_b VALUES ( '2' )
+ )
+);
+insert into list_list values('201902', '1', '1', 1);
+insert into list_list values('201903', '2', '1', 1);
+insert into list_list values('2', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM list_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_list WHERE month_code = '201902' ORDER BY 1,2;
+drop table list_list;
+
+CREATE TABLE list_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into list_hash values('201902', '1', '1', 1);
+insert into list_hash values('201902', '2', '1', 1);
+insert into list_hash values('201903', '5', '1', 1);
+insert into list_hash values('201903', '6', '1', 1);
+SELECT * FROM list_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM list_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table list_hash;
+
+CREATE TABLE list_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES ( '201902' )
+ (
+ SUBPARTITION p_201901_a values less than ('1'),
+ SUBPARTITION p_201901_b values less than ('2')
+ ),
+ PARTITION p_201902 VALUES ( '201903' )
+ (
+ SUBPARTITION p_201902_a values less than ('3'),
+ SUBPARTITION p_201902_b values less than ('4')
+ )
+);
+insert into list_range values('201902', '1', '1', 1);
+insert into list_range values('201903', '2', '1', 1);
+SELECT * FROM list_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_range SUBPARTITION FOR ('201903','201903') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM list_range WHERE month_code = '201903' ORDER BY 1,2;
+drop table list_range;
+
+CREATE TABLE range_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a values ('201901'),
+ SUBPARTITION p_201901_b values ('201902')
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a values ('201903'),
+ SUBPARTITION p_201902_b values ('201904')
+ )
+);
+insert into range_list values('201901', '1', '1', 1);
+insert into range_list values('201902', '2', '1', 1);
+insert into range_list values('201903', '1', '1', 1);
+insert into range_list values('201904', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM range_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_list WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_list;
+
+CREATE TABLE range_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into range_hash values('201901', '1', '1', 1);
+insert into range_hash values('201902', '2', '1', 1);
+insert into range_hash values('201903', '2', '1', 1);
+insert into range_hash values('20190322', '1', '1', 1);
+SELECT * FROM range_hash SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM range_hash SUBPARTITION (p_201901_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_hash SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_hash;
+
+CREATE TABLE range_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (month_code)
+(
+ PARTITION p_201901 VALUES LESS THAN( '201903' )
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN( '20190220' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN( '20190230' )
+ ),
+ PARTITION p_201902 VALUES LESS THAN( '201904' )
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN( '20190320' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN( '20190330' )
+ )
+);
+insert into range_range values('201902', '1', '1', 1);
+insert into range_range values('20190222', '2', '1', 1);
+insert into range_range values('201903', '2', '1', 1);
+insert into range_range values('20190322', '1', '1', 1);
+insert into range_range values('20190333', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM range_range SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM range_range WHERE month_code = '201903' ORDER BY 1,2;
+drop table range_range;
+
+CREATE TABLE hash_list
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY LIST (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES ( '201901' ),
+ SUBPARTITION p_201901_b VALUES ( '201902' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES ( '201901' ),
+ SUBPARTITION p_201902_b VALUES ( '201902' )
+ )
+);
+insert into hash_list values('201901', '1', '1', 1);
+insert into hash_list values('201901', '2', '1', 1);
+insert into hash_list values('201902', '1', '1', 1);
+insert into hash_list values('201902', '2', '1', 1);
+insert into hash_list values('201903', '2', '1', 1);
+SELECT * FROM hash_list SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201901_b) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201902_a) ORDER BY 1,2;
+SELECT * FROM hash_list SUBPARTITION (p_201902_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_list SUBPARTITION FOR ('201902','201902') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_list WHERE month_code = '201901' ORDER BY 1,2;
+drop table hash_list;
+
+CREATE TABLE hash_hash
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY hash (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a,
+ SUBPARTITION p_201901_b
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a,
+ SUBPARTITION p_201902_b
+ )
+);
+insert into hash_hash values('201901', '1', '1', 1);
+insert into hash_hash values('201901', '2', '1', 1);
+insert into hash_hash values('201903', '1', '1', 1);
+insert into hash_hash values('201903', '2', '1', 1);
+EXPLAIN (costs false)
+SELECT * FROM hash_hash SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_hash WHERE month_code = '201903' ORDER BY 1,2;
+drop table hash_hash;
+
+CREATE TABLE hash_range
+(
+ month_code VARCHAR2 ( 30 ) NOT NULL ,
+ dept_code VARCHAR2 ( 30 ) NOT NULL ,
+ user_no VARCHAR2 ( 30 ) NOT NULL ,
+ sales_amt int
+) WITH (SEGMENT=ON)
+PARTITION BY hash (month_code) SUBPARTITION BY range (month_code)
+(
+ PARTITION p_201901
+ (
+ SUBPARTITION p_201901_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201901_b VALUES LESS THAN ( '201903' )
+ ),
+ PARTITION p_201902
+ (
+ SUBPARTITION p_201902_a VALUES LESS THAN ( '201902' ),
+ SUBPARTITION p_201902_b VALUES LESS THAN ( '201903' )
+ )
+);
+insert into hash_range values('201901', '1', '1', 1);
+insert into hash_range values('201901', '2', '1', 1);
+insert into hash_range values('201902', '1', '1', 1);
+insert into hash_range values('201902', '2', '1', 1);
+insert into hash_range values('201903', '2', '1', 1);
+SELECT * FROM hash_range SUBPARTITION (p_201901_a) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201901_b) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201902_a) ORDER BY 1,2;
+SELECT * FROM hash_range SUBPARTITION (p_201902_b) ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_range SUBPARTITION FOR ('201901','201901') ORDER BY 1,2;
+EXPLAIN (costs false)
+SELECT * FROM hash_range WHERE month_code = '201902' ORDER BY 1,2;
+drop table hash_range;
+
--clean
DROP SCHEMA segment_subpartition_createtable CASCADE;
RESET CURRENT_SCHEMA;
diff --git a/src/test/regress/sql/set_transaction_test.sql b/src/test/regress/sql/set_transaction_test.sql
new file mode 100644
index 000000000..5762fbbf4
--- /dev/null
+++ b/src/test/regress/sql/set_transaction_test.sql
@@ -0,0 +1,57 @@
+\h SET TRANSACTION
+SET GLOBAL TRANSACTION READ ONLY;
+SET SESSION TRANSACTION READ ONLY;
+create database test_set_tran dbcompatibility 'b';
+\c test_set_tran
+SET SESSION TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran1;
+set b_format_behavior_compat_options = 'set_session_transaction';
+SET SESSION TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran2;
+\c test_set_tran;
+CREATE DATABASE test_set_tran2;
+\c test_set_tran;
+SET GLOBAL TRANSACTION READ ONLY;
+CREATE DATABASE test_set_tran3;
+\c test_set_tran;
+CREATE DATABASE test_set_tran4;
+SET GLOBAL TRANSACTION READ WRITE;
+CREATE DATABASE test_set_tran4;
+\c test_set_tran;
+CREATE DATABASE test_set_tran4;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ WRITE;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ WRITE;
+\c test_set_tran;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED READ WRITE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED READ WRITE;
+\c test_set_tran;
+CREATE USER newuser PASSWORD 'gauss@123';
+SET SESSION AUTHORIZATION newuser PASSWORD 'gauss@123';
+SET GLOBAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+RESET SESSION AUTHORIZATION;
+\c regression;
+DROP DATABASE test_set_tran;
+DROP DATABASE test_set_tran1;
+DROP DATABASE test_set_tran2;
+DROP DATABASE test_set_tran3;
+DROP DATABASE test_set_tran4;
\ No newline at end of file
diff --git a/src/test/regress/sql/single_node_foreign_data.sql b/src/test/regress/sql/single_node_foreign_data.sql
index 9bcc1c7a7..4bce9c784 100644
--- a/src/test/regress/sql/single_node_foreign_data.sql
+++ b/src/test/regress/sql/single_node_foreign_data.sql
@@ -310,6 +310,8 @@ ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000;
ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100);
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1;
+ALTER FOREIGN TABLE ft1 MODIFY COLUMN c8 integer NOT NULL; -- ERROR
+ALTER FOREIGN TABLE ft1 CHANGE COLUMN c8 c88 integer NOT NULL; -- ERROR
\d+ ft1
-- can't change the column type if it's used elsewhere
CREATE TABLE use_ft1_column_type (x ft1);
diff --git a/src/test/regress/sql/single_node_update.sql b/src/test/regress/sql/single_node_update.sql
index 53dfd9d59..8f54ca728 100644
--- a/src/test/regress/sql/single_node_update.sql
+++ b/src/test/regress/sql/single_node_update.sql
@@ -215,6 +215,13 @@ alter table t4 modify b timestamp on update current_timestamp;
alter table t4 modify b timestamp on update localtimestamp;
\d t4
+alter table t4 alter b set default now();
+\d t4;
+alter table t4 change b b1 timestamp on update current_timestamp;
+\d t4
+alter table t4 change b1 b2 timestamp not null default now() on update localtimestamp;
+\d t4
+
CREATE TABLE t5(id int, a timestamp default now() on update current_timestamp, b timestamp on update current_timestamp, c timestamp default now());
\d t5
create table t6 (like t5 including defaults);
diff --git a/src/test/regress/sql/smp.sql b/src/test/regress/sql/smp.sql
index ec4b1a7d4..132d28c92 100644
--- a/src/test/regress/sql/smp.sql
+++ b/src/test/regress/sql/smp.sql
@@ -78,6 +78,32 @@ select (select max(id) from t4);
explain (costs off) select * from (select a, rownum as row from (select a from t3) where rownum <= 10) where row >=5;
select * from (select a, rownum as row from (select a from t3) where rownum <= 10) where row >=5;
+create table col_table_001 (id int, name char[] ) with (orientation=column);
+create table col_table_002 (id int, aid int,name char[] ,apple char[]) with (orientation=column);
+insert into col_table_001 values(1, '{a,b,c}' );
+insert into col_table_001 values(2, '{b,b,b}' );
+insert into col_table_001 values(3, '{c,c,c}' );
+insert into col_table_001 values(4, '{a}' );
+insert into col_table_001 values(5, '{b}' );
+insert into col_table_001 values(6, '{c}' );
+insert into col_table_001 values(7, '{a,b,c}' );
+insert into col_table_001 values(8, '{b,c,a}' );
+insert into col_table_001 values(9, '{c,a,b}' );
+insert into col_table_001 values(10, '{c,a,b}' );
+insert into col_table_002 values(11, 1,'{a,s,d}' );
+insert into col_table_002 values(12, 1,'{b,n,m}' );
+insert into col_table_002 values(13, 2,'{c,v,b}' );
+insert into col_table_002 values(14, 1,'{a}' );
+insert into col_table_002 values(15, 1,'{b}' );
+insert into col_table_002 values(15, 2,'{c}' );
+insert into col_table_002 values(17, 1,'{a,s,d}','{a,b,c}' );
+insert into col_table_002 values(18, 1,'{b,n,m}','{a,b,c}' );
+insert into col_table_002 values(19, 2,'{c,v,b}','{a,b,c}');
+insert into col_table_002 values(20, 2,'{c,v,b}','{b,c,a}');
+insert into col_table_002 values(21, 21,'{c,c,b}','{b,c,a}');
+select * from col_table_001 where EXISTS (select * from col_table_002 where col_table_001.name[1] =col_table_002.apple[1]) order by id;
+select * from col_table_001 where EXISTS (select * from col_table_002 where col_table_001.name[1:3] =col_table_002.apple[1:3]) order by id;
+
CREATE TABLE bmsql_item (
i_id int NoT NULL,
i_name varchar(24),
diff --git a/src/test/regress/sql/sw_bugfix-2.sql b/src/test/regress/sql/sw_bugfix-2.sql
index e19e30bc0..ffdde15c6 100644
--- a/src/test/regress/sql/sw_bugfix-2.sql
+++ b/src/test/regress/sql/sw_bugfix-2.sql
@@ -666,3 +666,77 @@ GROUP BY t.manager_id
ORDER BY t.manager_id;
drop table swcb_employees;
+
+-- test start with has sub clause
+DROP TABLE IF EXISTS DAT_DEPARTMENT;
+CREATE TABLE DAT_DEPARTMENT(
+ stru_id nvarchar2(10) NOT NULL,
+ sup_stru nvarchar2(10),
+ stru_state nvarchar2(8)
+)
+WITH (orientation=row, compression=no);
+CREATE INDEX sup_stru_dat_department ON DAT_DEPARTMENT USING btree(sup_stru) TABLESPACE pg_default;
+CREATE INDEX idx_br_dept_stru_id ON DAT_DEPARTMENT USING btree(stru_id) TABLESPACE pg_default;
+insert into DAT_DEPARTMENT(stru_id,sup_stru,stru_state) values('01','02','2');
+insert into DAT_DEPARTMENT(stru_id,sup_stru,stru_state) values('02','01','2');
+SELECT A.STRU_ID DEPTID,LEVEL,CONNECT_BY_ISCYCLE
+FROM DAT_DEPARTMENT A
+START WITH A.STRU_ID IN
+(SELECT B.STRU_ID DEPTID
+FROM DAT_DEPARTMENT B
+WHERE B.SUP_STRU = '01' OR B.SUP_STRU='02'
+)
+CONNECT BY NOCYCLE PRIOR A.STRU_ID =A.SUP_STRU;
+DROP TABLE DAT_DEPARTMENT;
+
+-- test RTE_JOIN in start with
+DROP TABLE IF EXISTS zb_layer;
+DROP TABLE IF EXISTS rtms_dict;
+DROP TABLE IF EXISTS zb_model;
+CREATE TABLE zb_layer(
+ id character varying(20) NOT NULL,
+ zb_code character varying(20),
+ zb_name character varying(20),
+ zb_organ character varying(50),
+ zb_apply character varying(20),
+ zb_layer_standard character varying(20),
+ zb_threshold_value character varying(30),
+ zb_warning_value character varying(20)
+)
+WITH (orientation=row, compression=no);
+CREATE TABLE rtms_dict(
+ id character varying(10),
+ area character varying(20),
+ cn_area character varying(30),
+ code character varying(50),
+ cname character varying(50),
+ locale character varying(10)
+)
+WITH (orientation=row, compression=no);
+CREATE TABLE zb_model(
+ id character varying(10) NOT NULL,
+ zb_code character varying(20),
+ zb_name character varying(300),
+ zb_risk_area character varying(3),
+ zb_parent_id character varying(20),
+ zb_weight character varying(10),
+ zb_layer_flag character varying(3),
+ zb_status character varying(3)
+)
+WITH (orientation=row, compression=no);
+SELECT DISTINCT I.ZB_CODE,D.CNAME,DECODE(I.ZB_LAYER_FLAG,NULL,D.CNAME,I.ZB_NAME) ZBNAME
+FROM ZB_MODEL I
+LEFT JOIN ZB_LAYER N ON I.ZB_CODE = N.ZB_CODE
+LEFT JOIN RTMS_DICT D ON D.CODE = I.ZB_RISK_AREA AND D.AREA = 'RICK_AREA'
+WHERE NVL(I.ZB_STATUS,1) = 1
+AND I.ZB_CODE NOT IN
+(
+ SELECT T.ZB_CODE FROM ZB_MODEL T WHERE T.ZB_RISK_AREA = 2
+)
+CONNECT BY PRIOR I.ZB_CODE = I.ZB_PARENT_ID
+START WITH I.ZB_CODE IN
+(SELECT ZB_CODE FROM ZB_MODEL)
+ORDER BY I.ZB_CODE;
+DROP TABLE zb_layer;
+DROP TABLE rtms_dict;
+DROP TABLE zb_model;
diff --git a/src/test/regress/sql/sw_icbc.sql b/src/test/regress/sql/sw_icbc.sql
index 77f27d859..bf2b59a3b 100644
--- a/src/test/regress/sql/sw_icbc.sql
+++ b/src/test/regress/sql/sw_icbc.sql
@@ -76,13 +76,16 @@ CONNECT BY PRIOR tt.id = tt.pid
START WITH tt.id = 1;
--test correlated sublink in targetlist
-explain select b.id, (select count(a.id) from t1 a where a.pid = b.id) c from t1 b
+explain (costs off) select b.id, (select count(a.id) from t1 a where a.pid = b.id) c from t1 b
start with b.id=1 connect by prior b.id = b.pid;
-explain select * from t1 as test
+explain (costs off) select * from t1 as test
where not exists (select 1 from t1 where test.id = t1.id)
start with test.id = 1 connect by prior test.id = test.pid;
+--test start with in correlated sublink
+explain (costs off) select * from t1 where t1.name = 'test' and exists(select * from t2 where t1.id = id start with name = 'test' connect by prior id = pid);
+
--multiple tables case
explain (costs off) select * from t1, t2 where t1.id = t2.id start with t1.id = t2.id and t1.id = 1 connect by prior t1.id = t1.pid;
explain (costs off) select * from t1 join t2 on t1.id = t2.id start with t1.id = t2.id and t1.id = 1 connect by prior t1.id = t1.pid;
diff --git a/src/test/regress/sql/test_auto_increment.sql b/src/test/regress/sql/test_auto_increment.sql
index dc010441a..63e89bbea 100644
--- a/src/test/regress/sql/test_auto_increment.sql
+++ b/src/test/regress/sql/test_auto_increment.sql
@@ -27,7 +27,8 @@ DROP TABLE test_create_autoinc;
CREATE TABLE test_create_autoinc(
a int AUTO_INCREMENT UNIQUE KEY,
b varchar(32)
-); -- ERROR
+);
+DROP TABLE test_create_autoinc;
CREATE TABLE test_create_autoinc(
a int AUTO_INCREMENT UNIQUE,
@@ -75,6 +76,8 @@ CREATE TABLE test_create_autoinc_err(id int auto_increment primary key CHECK (id
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int CHECK ((id + a) < 500));
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key DEFAULT 100, name varchar(200),a int);
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key GENERATED ALWAYS AS (a+1) STORED, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int GENERATED ALWAYS AS (id+1) STORED);
+CREATE TABLE test_create_autoinc_err(id int GENERATED ALWAYS AS (a+1) STORED, name varchar(200),a int auto_increment primary key);
--auto_increment value error
CREATE TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int) auto_increment=-1;
@@ -86,13 +89,13 @@ CREATE TEMPORARY TABLE test_create_autoinc_err(id int auto_increment primary key
CREATE TEMPORARY TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int) auto_increment=170141183460469231731687303715884105728;
CREATE TEMPORARY TABLE test_create_autoinc_err(id int auto_increment primary key, name varchar(200),a int) auto_increment=1.1;
-- datatype error
-CREATE TABLE test_create_autoinc_err(id SERIAL auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id DECIMAL(10,4) auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id NUMERIC(10,4) auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id text auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id oid auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id int[] auto_increment primary key, name varchar(200),a int);
-CREATE TABLE test_create_autoinc_err(id int16 auto_increment, name varchar(200),a int, unique(id)) auto_increment=170141183460469231731687303715884105727;
+CREATE TABLE test_create_autoinc_err1(id SERIAL auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id DECIMAL(10,4) auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id NUMERIC(10,4) auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id text auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id oid auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id int[] auto_increment primary key, name varchar(200),a int);
+CREATE TABLE test_create_autoinc_err1(id int16 auto_increment, name varchar(200),a int, unique(id)) auto_increment=170141183460469231731687303715884105727;
-- table type error
CREATE TABLE test_create_autoinc_err(id INTEGER auto_increment, name varchar(200),a int, primary key(id)) with (ORIENTATION=column);
CREATE TABLE test_create_autoinc_err(id INTEGER auto_increment, name varchar(200),a int, primary key(id)) with (ORIENTATION=orc);
@@ -164,6 +167,7 @@ CREATE TABLE test_create_autoinc_like_err(LIKE test_create_autoinc_source INCLUD
CREATE TABLE test_create_autoinc_like_err(a int auto_increment, LIKE test_create_autoinc_source INCLUDING INDEXES);
CREATE TABLE test_create_autoinc_like_err(a int auto_increment primary key, LIKE test_create_autoinc_source);
CREATE TABLE test_create_autoinc_like_err(LIKE test_create_autoinc_source INCLUDING INDEXES) with (ORIENTATION=column);
+CREATE TABLE test_create_autoinc_like_err(LIKE test_create_autoinc_source INCLUDING INDEXES, a int GENERATED ALWAYS AS (id+1) STORED);
--row table
CREATE TABLE test_create_autoinc_like(LIKE test_create_autoinc_source INCLUDING INDEXES);
INSERT INTO test_create_autoinc_like VALUES(DEFAULT);
@@ -215,11 +219,16 @@ CREATE TABLE test_alter_autoinc_col(col int) with (ORIENTATION=column);
INSERT INTO test_alter_autoinc_col VALUES(1);
ALTER TABLE test_alter_autoinc_col ADD COLUMN id int AUTO_INCREMENT primary key;
DROP TABLE test_alter_autoinc_col;
+-- auto_increment and generated column
+CREATE TABLE test_alter_autoinc(col int);
+ALTER TABLE test_alter_autoinc ADD COLUMN a int GENERATED ALWAYS AS (b+1), ADD COLUMN b int auto_increment primary key;
+ALTER TABLE test_alter_autoinc ADD COLUMN a int auto_increment primary key, ADD COLUMN b int GENERATED ALWAYS AS (a+1);
+DROP TABLE test_alter_autoinc;
--astore with data
CREATE TABLE test_alter_autoinc(col int);
INSERT INTO test_alter_autoinc VALUES(1);
INSERT INTO test_alter_autoinc VALUES(2);
-
+ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT; -- ERROR
ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT primary key;
SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
insert into test_alter_autoinc(col) values (3),(4),(5);
@@ -252,7 +261,6 @@ ALTER TABLE test_alter_autoinc DROP CONSTRAINT test_alter_autoinc_pkey;
ALTER TABLE test_alter_autoinc auto_increment=-1;
ALTER TABLE test_alter_autoinc auto_increment=1701411834604692317316873037158841057278;
ALTER TABLE test_alter_autoinc auto_increment=1.1;
-ALTER TABLE test_alter_autoinc MODIFY id BIGINT;
ALTER LARGE SEQUENCE test_alter_autoinc_id_seq1 RESTART;
ALTER LARGE SEQUENCE test_alter_autoinc_id_seq1 maxvalue 90;
ALTER LARGE SEQUENCE test_alter_autoinc_id_seq1 OWNED BY test_alter_autoinc.col;
@@ -271,14 +279,31 @@ ALTER TABLE test_alter_autoinc DROP COLUMN id, ADD id int AUTO_INCREMENT UNIQUE;
INSERT INTO test_alter_autoinc VALUES(8,DEFAULT);
SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
DROP TABLE test_alter_autoinc;
+
--test alter table add AUTO_INCREMENT NULL UNIQUE
CREATE TABLE test_alter_autoinc(col int);
INSERT INTO test_alter_autoinc VALUES(1);
INSERT INTO test_alter_autoinc VALUES(2);
ALTER TABLE test_alter_autoinc ADD COLUMN id int AUTO_INCREMENT NULL UNIQUE;
-INSERT INTO test_alter_autoinc VALUES(3,0);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+INSERT INTO test_alter_autoinc VALUES(3,NULL);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+INSERT INTO test_alter_autoinc VALUES(4,0);
SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
DROP TABLE test_alter_autoinc;
+
+--test alter table add NULL AUTO_INCREMENT UNIQUE
+CREATE TABLE test_alter_autoinc(col int);
+INSERT INTO test_alter_autoinc VALUES(1);
+INSERT INTO test_alter_autoinc VALUES(2);
+ALTER TABLE test_alter_autoinc ADD COLUMN id int NULL AUTO_INCREMENT UNIQUE;
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+INSERT INTO test_alter_autoinc VALUES(3,NULL);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+INSERT INTO test_alter_autoinc VALUES(4,0);
+SELECT id, col FROM test_alter_autoinc ORDER BY 1, 2;
+DROP TABLE test_alter_autoinc;
+
--local temp table with data
CREATE TEMPORARY TABLE test_alter_autoinc_ltemp(col int);
INSERT INTO test_alter_autoinc_ltemp VALUES(1);
@@ -333,7 +358,6 @@ ALTER TABLE test_alter_autoinc_ltemp DROP CONSTRAINT test_alter_autoinc_ltemp_u1
ALTER TABLE test_alter_autoinc_ltemp auto_increment=-1;
ALTER TABLE test_alter_autoinc_ltemp auto_increment=1701411834604692317316873037158841057278;
ALTER TABLE test_alter_autoinc_ltemp auto_increment=1.1;
-ALTER TABLE test_alter_autoinc_ltemp MODIFY id BIGINT;
DROP TABLE test_alter_autoinc_ltemp;
--global temp table with data
CREATE GLOBAL TEMPORARY TABLE test_alter_autoinc_gtemp(col int);
@@ -386,7 +410,7 @@ CREATE TABLE test_alter_autoinc(
a int,
b varchar(32)
);
-ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE ((seq + 1), seq); -- ERROR
+ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE (a, seq); -- ERROR
ALTER TABLE test_alter_autoinc ADD COLUMN seq int AUTO_INCREMENT, ADD CONSTRAINT test_alter_autoinc_uk UNIQUE (seq);
CREATE INDEX test_alter_autoinc_idx1 ON test_alter_autoinc (seq,a);
SELECT pg_get_tabledef('test_alter_autoinc'::regclass);
@@ -458,6 +482,38 @@ SELECT col FROM single_autoinc_uk ORDER BY 1;
SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
DROP TABLE single_autoinc_uk;
+-- auto_increment in table with single column NULL auto_increment UNIQUE
+CREATE TABLE single_autoinc_uk(col int NULL auto_increment UNIQUE KEY) AUTO_INCREMENT = 10;
+INSERT INTO single_autoinc_uk VALUES(NULL);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(1 - 1);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(100);
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(DEFAULT);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
+DROP TABLE single_autoinc_uk;
+
+-- auto_increment in table with single column auto_increment UNIQUE
+CREATE TABLE single_autoinc_uk(col int auto_increment UNIQUE KEY) AUTO_INCREMENT = 10;
+INSERT INTO single_autoinc_uk VALUES(NULL);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(1 - 1);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(100);
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+INSERT INTO single_autoinc_uk VALUES(DEFAULT);
+SELECT LAST_INSERT_ID();
+SELECT col FROM single_autoinc_uk ORDER BY 1;
+SELECT pg_catalog.pg_get_tabledef('single_autoinc_uk');
+DROP TABLE single_autoinc_uk;
+
-- test auto_increment with rollback
CREATE TABLE single_autoinc_rollback(col int auto_increment PRIMARY KEY) AUTO_INCREMENT = 10;
@@ -1513,4 +1569,5 @@ SELECT col1,col2 FROM test_autoinc_batch_copy ORDER BY 1;
drop table test_autoinc_batch_copy;
\c regression
+clean connection to all force for database autoinc_b_db;
drop database if exists autoinc_b_db;
\ No newline at end of file
diff --git a/src/test/regress/sql/test_b_format_collate.sql b/src/test/regress/sql/test_b_format_collate.sql
new file mode 100644
index 000000000..3a467556d
--- /dev/null
+++ b/src/test/regress/sql/test_b_format_collate.sql
@@ -0,0 +1,465 @@
+create database test_collate_A dbcompatibility = 'A';
+create database test_collate_B dbcompatibility = 'B';
+\c test_collate_A
+-- test A format
+select 'abCdEf' = 'abcdef' collate "utf8mb4_general_ci";
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_general_ci";
+select 'abCdEf' = 'abcdef' collate "utf8mb4_unicode_ci";
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_unicode_ci";
+select 'abCdEf' = 'abcdef' collate "utf8mb4_bin";
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_bin";
+drop table if exists t1;
+create table t1(a varchar(10) collate "utf8mb4_general_ci");
+drop table if exists t1;
+create table t1(a text);
+create index idx_1 on t1(a collate "utf8mb4_unicode_ci");
+create unique index idx_2 on t1(a collate "utf8mb4_unicode_ci");
+
+-- test binary
+drop table if exists t1;
+create table t1(a blob collate binary);
+create table t1(a blob collate utf8mb4_bin);
+create table t1(a blob);
+
+-- test B format
+\c test_collate_B
+
+-- test create table/alter table
+drop table if exists t_collate;
+create table t_collate(id int, f1 text collate "utf8mb4_general_ci");
+alter table t_collate add column f2 text collate "utf8mb4_unicode_ci",add column f3 varchar collate "utf8mb4_general_ci";
+alter table t_collate alter f1 type text collate "utf8mb4_bin";
+\d+ t_collate
+
+-- test create index
+insert into t_collate select generate_series(1,1000), repeat(chr(int4(random()*26)+65),4),repeat(chr(int4(random()*26)+97),4),repeat(chr(int4(random()*26)+97),4);
+create index idx_f1_default on t_collate(f3);
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+drop index if exists idx_f1_default;
+
+create index idx_f1_utf8mb4 on t_collate(f3 collate "utf8mb4_general_ci");
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+drop index if exists idx_f1_utf8mb4;
+
+create index idx_f1_C on t_collate(f3 collate "C");
+explain (verbose, costs off) select * from t_collate where f3 in ('aaaa','bbbb');
+drop index if exists idx_f1_C;
+
+drop table if exists t_collate;
+
+--test unique/primary key
+drop table if exists t_uft8_general_text;
+create table t_uft8_general_text(f1 text unique collate "utf8mb4_general_ci");
+insert into t_uft8_general_text values('S');
+insert into t_uft8_general_text values('s'); -- fail
+insert into t_uft8_general_text values('ś'); -- fail
+insert into t_uft8_general_text values('Š'); -- fail
+
+drop table if exists t_uft8_general_char;
+create table t_uft8_general_char(f2 char(10) primary key collate "utf8mb4_general_ci");
+insert into t_uft8_general_char values('S');
+insert into t_uft8_general_char values('s'); -- fail
+insert into t_uft8_general_char values('ś'); -- fail
+insert into t_uft8_general_char values('Š'); -- fail
+
+drop table if exists t_uft8_unicode_text;
+create table t_uft8_unicode_text(f1 text unique collate "utf8mb4_unicode_ci");
+insert into t_uft8_unicode_text values('S');
+insert into t_uft8_unicode_text values('s'); -- fail
+insert into t_uft8_unicode_text values('ś'); -- fail
+insert into t_uft8_unicode_text values('Š'); -- fail
+
+drop table if exists t_uft8_unicode_char;
+create table t_uft8_unicode_char(f2 char(10) primary key collate "utf8mb4_unicode_ci");
+insert into t_uft8_unicode_char values('S');
+insert into t_uft8_unicode_char values('s'); -- fail
+insert into t_uft8_unicode_char values('ś'); -- fail
+insert into t_uft8_unicode_char values('Š'); -- fail
+
+--
+-- test collate utf8mb4_general_ci
+--
+
+-- test collation used in expr
+select 'abCdEf' = 'abcdef' collate "utf8mb4_general_ci";
+select 'abCdEf' != 'abcdef' collate "utf8mb4_general_ci";
+select 'abCdEf' > 'abcdef' collate "utf8mb4_general_ci";
+select 'abCdEf' < 'abcdef' collate "utf8mb4_general_ci";
+
+select 'abCdEf'::character varying = 'abcdef'::character varying collate "utf8mb4_general_ci";
+select 'abCdEf'::clob = 'abcdef'::clob collate "utf8mb4_general_ci";
+select 'abCdEf'::bpchar = 'abcdef'::bpchar collate "utf8mb4_general_ci";
+
+select 'abCdEf'::char(10) = 'abcdef'::char(10);
+select 'abCdEf'::char(10) = 'abcdef'::char(10) collate "utf8mb4_general_ci";
+select 'abcdefg'::char(10) = 'abcdef'::char(10) collate "utf8mb4_general_ci";
+select 'abCdEf'::char(10) != 'abcdef'::char(10) collate "utf8mb4_general_ci";
+select 'abCdEf'::char(10) > 'abcdef'::char(10) collate "utf8mb4_general_ci";
+select 'abCdEf'::char(10) < 'abcdef'::char(10) collate "utf8mb4_general_ci";
+
+select 'abCdEf'::nchar(10) = 'abcdef'::nchar(10) collate "utf8mb4_general_ci";
+select 'abcdefg'::nchar(10) = 'abcdef'::nchar(10) collate "utf8mb4_general_ci";
+select 'abCdEf'::character(10) = 'abcdef'::character(10) collate "utf8mb4_general_ci";
+select 'abcdefg'::character(10) = 'abcdef'::character(10) collate "utf8mb4_general_ci";
+
+select 'ś' = 'Š' collate "utf8mb4_general_ci" , 'Š' = 's' collate "utf8mb4_general_ci";
+select 'ś' != 'Š' collate "utf8mb4_general_ci", 'Š' != 's' collate "utf8mb4_general_ci";
+select 'ŠSśs' = 'ssss' collate "utf8mb4_general_ci";
+select 'ŠSśs'::character varying = 'ssss'::character varying collate "utf8mb4_general_ci";
+select 'ŠSśs'::clob = 'ssss'::clob collate "utf8mb4_general_ci";
+select 'ŠSśs'::bpchar = 'ssss'::bpchar collate "utf8mb4_general_ci";
+
+select 's'::char(3) = 'Š'::char(3) collate "utf8mb4_general_ci";
+select 'ŠSśs'::char = 'ssss'::char collate "utf8mb4_general_ci";
+select 'ŠSśs'::char(10) = 'ssss'::char(10) collate "utf8mb4_general_ci";
+select 'ŠSśs'::nchar(10) = 'ssss'::nchar(10) collate "utf8mb4_general_ci";
+select 'ŠSśs'::character(10) = 'ssss'::character(10) collate "utf8mb4_general_ci";
+
+-- compare between different types, expected success
+select 'ŠSśs'::character(10) = 'ssss'::varchar collate "utf8mb4_general_ci";
+select 'ŠSśs'::clob = 'ssss'::char(10) collate "utf8mb4_general_ci";
+
+-- compare str with different collation, expected fail
+select 'abCdEf' collate "utf8mb4_general_ci" = 'abcdef' collate "utf8mb4_general_ci";
+select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "utf8mb4_general_ci";
+select 'abCdEf' collate "utf8mb4_bin" = 'abcdef' collate "C";
+
+-- types not support collation, expected fail
+select 100 > 50 collate "utf8mb4_general_ci";
+select '0'::bool = '1'::bool collate "utf8mb4_general_ci";
+select '100'::money > '50'::money collate "utf8mb4_general_ci";
+select '00:00:02'::time > '00:00:01'::time collate "utf8mb4_general_ci";
+
+-- test column collation
+drop table if exists column_collate;
+create table column_collate(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into column_collate values('S','S'),('s','s'),('ś','ś'),('Š','Š'),('z','z'),('Z','Z'),('c','c'),('A','A'),('C','C');
+insert into column_collate values('AaA','AaA'),('bb','bb'),('aAA','aAA'),('Bb','Bb'),('dD','dd'),('Cc','Cc'),('AAA','AAA');
+insert into column_collate values('A1中文','A1中文'), ('b1中文','b1中文'), ('a2中文','a2中文'),
+('B2中文','B2中文'), ('中文d1','中文d1'), ('中文C1','中文C1'), ('中文A3','中文A3');
+
+-- test where clause
+select f1 from column_collate where f1 = 's';
+select f1 from column_collate where f1 = 'aaa';
+select f2 from column_collate where f2 = 's';
+select f2 from column_collate where f2 = 'aaa';
+
+-- test order by clause
+select f1 from column_collate order by f1;
+select f2 from column_collate order by f2;
+
+-- test distinct clause
+insert into column_collate values ('AbcdEf','AbcdEf'), ('abcdEF','abcdEF'), ('中文AbCdEFG','中文AbCdEFG'),
+('中文abcdEFG','中文abcdEFG'), ('中文Ab','中文Ab'), ('中文ab','中文ab');
+select distinct f1 from column_collate;
+select distinct f2 from column_collate;
+explain (verbose, costs off) select distinct (f1) from column_collate order by f1;
+select distinct f1 from column_collate order by f1;
+select distinct f2 from column_collate order by f2;
+
+--test unique node
+analyze column_collate;
+explain (verbose, costs off) select distinct (f1) from column_collate order by f1;
+select distinct f1 from column_collate order by f1;
+select distinct f2 from column_collate order by f2;
+
+-- test group by
+select count(f1),f1 from column_collate group by f1;
+select count(f2),f2 from column_collate group by f2;
+
+-- test like
+select f1 from column_collate where f1 like 'A_%';
+select f1 from column_collate where f1 like '%s%';
+select f1 from column_collate where f1 like 'A%f';
+select f1 from column_collate where f1 like 'A__';
+select f1 from column_collate where f1 like '\A__';
+select f1 from column_collate where f1 like 'A%\'; -- error
+select f1 from column_collate where f1 like 'A_\';-- error
+
+select f2 from column_collate where f2 like 'A_%';
+select f2 from column_collate where f2 like 'A%\'; -- error
+select f2 from column_collate where f2 like 'A_\';-- error
+
+-- test notlike
+select f1 from column_collate where f1 not like 'A_%';
+select f1 from column_collate where f1 not like '%s%';
+
+-- test hashjoin
+drop table if exists test_join1;
+drop table if exists test_join2;
+create table test_join1(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into test_join1 values('S','S'),('s','s'),('ś','ś'),('Š','Š');
+
+create table test_join2(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci");
+insert into test_join2 values('S','S');
+
+create table test_join3(f1 text collate "utf8mb4_unicode_ci", f2 char(15) collate "utf8mb4_unicode_ci");
+insert into test_join3 values('S','S');
+
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin"
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+
+-- test nestloop
+set enable_hashjoin=off;
+set enable_nestloop=on;
+set enable_mergejoin=off;
+
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin"
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+
+-- test mergejoin
+set enable_hashjoin=off;
+set enable_nestloop=off;
+set enable_mergejoin=on;
+
+explain (verbose, costs off) select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1;
+select tab1.f1, tab2.f1 from test_join1 as tab1, test_join2 as tab2 where tab1.f1 = tab2.f1 collate "C";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_bin";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci";
+select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail
+
+-- test union
+drop table if exists test_sep_option1;
+drop table if exists test_sep_option2;
+drop table if exists test_sep_option3;
+drop table if exists test_sep_option4;
+create table test_sep_option1(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option2(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option3(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+create table test_sep_option4(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+
+insert into test_sep_option1 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option2 values ('S','S');
+insert into test_sep_option3 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option4 values ('S','S');
+
+select * from test_sep_option1 union select * from test_sep_option2;
+select * from test_sep_option3 union select * from test_sep_option4;
+select * from test_sep_option1 union select * from test_sep_option3; -- fail
+
+-- test setop
+drop table if exists test_sep_option1;
+drop table if exists test_sep_option2;
+drop table if exists test_sep_option3;
+drop table if exists test_sep_option4;
+create table test_sep_option1(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option2(f1 text collate "utf8mb4_general_ci", f2 text collate "utf8mb4_general_ci");
+create table test_sep_option3(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+create table test_sep_option4(f1 text collate "utf8mb4_bin", f2 text collate "utf8mb4_bin");
+
+insert into test_sep_option1 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option2 values ('S','S');
+insert into test_sep_option3 values ('s','s'),('ś','ś'),('Š','Š');
+insert into test_sep_option4 values ('S','S');
+
+-- test constraint
+drop table if exists test_primary_key;
+create table test_primary_key(f1 text primary key collate "utf8mb4_general_ci");
+insert into test_primary_key values ('a');
+insert into test_primary_key values ('A'); -- fail
+drop table if exists test_unique;
+create table test_unique(f1 text unique collate "utf8mb4_general_ci");
+insert into test_unique values ('a');
+insert into test_unique values ('A'); -- fail
+drop table if exists test_unique;
+create table test_unique(f1 text collate "utf8mb4_general_ci");
+insert into test_unique values('aaa'), ('AaA');
+create unique index u_idx_1 on test_unique(f1); -- fail
+drop table if exists test_constraunt;
+create table test_constraunt (f1 text);
+alter table test_constraunt add column f text collate "utf8mb4_general_ci"; --success
+
+--
+-- test ustore with collation utf8mb4_general_ci
+--
+
+drop table if exists ustore_column_collate;
+create table ustore_column_collate(f1 text collate "utf8mb4_general_ci", f2 char(15) collate "utf8mb4_general_ci") with (storage_type=ustore);
+-- create table column_collate(f1 text collate "utf8mb4_unicode_ci", f2 char(10) collate "utf8mb4_unicode_ci");
+insert into ustore_column_collate values('S','S'),('s','s'),('ś','ś'),('Š','Š'),('z','z'),('Z','Z'),('c','c'),('A','A'),('C','C');
+insert into ustore_column_collate values('AaA','AaA'),('bb','bb'),('aAA','aAA'),('Bb','Bb'),('dD','dd'),('Cc','Cc'),('AAA','AAA');
+insert into ustore_column_collate values('A1中文','A1中文'), ('b1中文','b1中文'), ('a2中文','a2中文'),
+('B2中文','B2中文'), ('中文d1','中文d1'), ('中文C1','中文C1'), ('中文A3','中文A3');
+
+-- test where clause
+select f1 from ustore_column_collate where f1 = 'aaa';
+select f2 from ustore_column_collate where f2 = 'aaa';
+
+-- test order by clause
+select f1 from ustore_column_collate order by f1;
+select f2 from ustore_column_collate order by f2;
+
+-- test distinct clause
+insert into ustore_column_collate values ('AbcdEf','AbcdEf'), ('abcdEF','abcdEF'), ('中文AbCdEFG','中文AbCdEFG'),
+('中文abcdEFG','中文abcdEFG'), ('中文Ab','中文Ab'), ('中文ab','中文ab');
+select distinct f1 from ustore_column_collate;
+select distinct f2 from ustore_column_collate;
+select distinct f1 from ustore_column_collate order by f1;
+select distinct f2 from ustore_column_collate order by f2;
+
+-- test group by
+select count(f1),f1 from ustore_column_collate group by f1;
+select count(f2),f2 from ustore_column_collate group by f2;
+
+-- test like
+select f1 from ustore_column_collate where f1 like 'A_%';
+select f1 from ustore_column_collate where f1 like 'A%f';
+select f1 from ustore_column_collate where f1 like 'A__';
+select f1 from ustore_column_collate where f1 like '\A__';
+select f1 from ustore_column_collate where f1 like 'A%\'; -- error
+select f1 from ustore_column_collate where f1 like 'A_\'; -- error
+
+select f2 from ustore_column_collate where f2 like 'A_%';
+select f2 from ustore_column_collate where f2 like 'A%f';
+select f2 from ustore_column_collate where f2 like 'A__';
+select f2 from ustore_column_collate where f2 like '\A__';
+select f2 from ustore_column_collate where f2 like 'A%\'; -- error
+select f2 from ustore_column_collate where f2 like 'A_\'; -- error
+
+-- test grouping sets
+create table date_dim(d_year int, d_moy int, d_date_sk int);
+create table store_sales(ss_sold_date_sk int, ss_item_sk int, ss_ext_sales_price int );
+create table item(i_category text, i_item_sk int ,i_manager_id int );
+insert into date_dim values(2000, 11, 1);
+insert into store_sales values(1, 1, 1000);
+insert into item values('Music', 1, 1);
+select dt.d_year, ss_ext_sales_price, item.i_category, grouping(dt.d_year), grouping(ss_ext_sales_price), grouping(item.i_category)
+from date_dim dt, store_sales, item
+where dt.d_date_sk = store_sales.ss_sold_date_sk and store_sales.ss_item_sk = item.i_item_sk and item.i_manager_id = 1 and dt.d_moy = 11 and dt.d_year = 2000 and i_category = 'Music'
+group by grouping sets(dt.d_year,ss_ext_sales_price),item.i_category having grouping(i_category) = 0 order by 1,2,3,4,5,6;
+
+-- test collate utf8mb4_unicode_ci
+-- test collation used in expr
+select 'abCdEf' = 'abcdef' collate "utf8mb4_unicode_ci";
+select 'abCdEf' != 'abcdef' collate "utf8mb4_unicode_ci";
+select 'abCdEf' > 'abcdef' collate "utf8mb4_unicode_ci";
+select 'abCdEf' < 'abcdef' collate "utf8mb4_unicode_ci";
+select 'AAaabb'::char = 'AAaABb'::char collate "utf8mb4_unicode_ci";
+select 'AAaabb'::char != 'AAaABb'::char collate "utf8mb4_unicode_ci";
+select 'AAaabb'::char > 'AAaABb'::char collate "utf8mb4_unicode_ci";
+select 'AAaabb'::char < 'AAaABb'::char collate "utf8mb4_unicode_ci";
+
+select 'ś' = 'Š' collate "utf8mb4_unicode_ci" , 'Š' = 's' collate "utf8mb4_unicode_ci";
+select 'ŠSśs' = 'ssss' collate "utf8mb4_unicode_ci";
+select 's'::char(3) = 'Š'::char(3) collate "utf8mb4_unicode_ci";
+select 'ŠSśs'::char(10) = 'ssss'::char(10) collate "utf8mb4_unicode_ci";
+
+-- test collate utf8mb4_bin
+select 'abCdEf' = 'abcdef' collate "utf8mb4_bin";
+select 'abCdEf' > 'abcdef' collate "utf8mb4_bin";
+select 'abCdEf' < 'abcdef' collate "utf8mb4_bin";
+select 'abCdEf' = 'ab' collate "utf8mb4_bin";
+select 'abCdEf' > 'ab' collate "utf8mb4_bin";
+select 'abCdEf' < 'ab' collate "utf8mb4_bin";
+select 'a' > 'A' collate "utf8mb4_bin", 'B' > 'A' collate "utf8mb4_bin", 'a' > 'B' collate "utf8mb4_bin",'b' > 'a' collate "utf8mb4_bin";
+
+-- test binary
+create table t1(a blob collate utf8mb4_bin);
+create table t1(a blob collate "C");
+drop table if exists t1;
+create table t1(a blob collate binary);
+
+-- test partition table
+drop table if exists test_part_collate;
+
+create table test_part_collate (
+f1 int,
+f2 text collate utf8mb4_general_ci,
+f3 text collate utf8mb4_bin
+) partition by range(f1) (
+partition p1 values less than (5),
+partition p2 values less than (10),
+partition p3 values less than MAXVALUE
+);
+insert into test_part_collate values(1, 'bbb', 'a');
+insert into test_part_collate values(2, 'aba', 'A');
+insert into test_part_collate values(6, 'Bbb', 'b');
+insert into test_part_collate values(15, 'BBB', 'B');
+insert into test_part_collate values(3, 'ccc', 'C');
+
+select * from test_part_collate order by f2;
+select * from test_part_collate order by f3;
+select distinct f2 from test_part_collate order by f2;
+select distinct f3 from test_part_collate order by f3;
+select * from test_part_collate where f2 = 'bbb';
+select * from test_part_collate where f3 = 'b';
+select f2,count(*) from test_part_collate group by f2;
+select f3,count(*) from test_part_collate group by f3;
+
+-- test table collate
+drop table if exists test_table_collate;
+create table test_table_collate (a text, b char(10),c character(10) collate "utf8mb4_bin") collate = utf8mb4_general_ci;
+insert into test_table_collate values('bb','bb','bb');
+insert into test_table_collate values('bB','bB','bB');
+insert into test_table_collate values('BB','BB','BB');
+insert into test_table_collate values('ba','ba','ba');
+select * from test_table_collate where b = 'bb';
+select * from test_table_collate where b = 'bb' collate "utf8mb4_bin";
+select * from test_table_collate where c = 'bb';
+select * from test_table_collate where c = 'bb' collate "utf8mb4_general_ci";
+
+select 'a' > 'A' collate utf8mb4_bin;
+select 'a' > 'A' collate 'utf8mb4_bin';
+select 'a' > 'A' collate "utf8mb4_bin";
+create table test1(a text charset utf8mb4 collate utf8mb4_bin);
+create table test2(a text charset 'utf8mb4' collate 'utf8mb4_bin');
+create table test3(a text charset "utf8mb4" collate 'utf8mb4_bin');
+
+-- test table charset binary
+create table test4(a text) charset "binary";
+alter table test4 charset utf8mb4;
+alter table test4 add a2 varchar(20);
+alter table test4 add a3 varchar(20) collate 'utf8mb4_bin';
+select pg_get_tabledef('test4');
+
+create table test5(a blob charset "binary");
+create table test6(a int charset "binary");
+create table test6(a float charset "binary");
+
+select 'a' > 'A' collate UTF8MB4_BIN;
+select 'a' > 'A' collate 'UTF8MB4_BIN';
+select 'a' > 'A' collate "UTF8MB4_BIN";
+select 'a' > 'A' collate "UTF8MB4_bin";
+create table test7(a text charset 'UTF8MB4' collate 'UTF8MB4_BIN');
+create table test8(a text) charset 'UTF8MB4' collate 'UTF8MB4_bin';
+create table test9(a text collate 'UTF8MB4_BIN');
+create table test10(a text charset 'UTF8MB4');
+create table test11(a text charset 'aaa' collate 'UTF8MB4_BIN');
+
+create table test12(a text collate 'utf8mb4_bin.utf8');
+create table test13(a text collate utf8mb4_bin.utf8);
+create table test14(a text collate 'pg_catalog.utf8mb4_bin');
+create table test15(a text collate pg_catalog.utf8mb4_bin); -- ok
+create table test16(a text collate 'aa_DJ.utf8'); -- ok
+create table test17(a text collate aa_DJ.utf8);
+create table test18(a text collate 'pg_catalog.aa_DJ.utf8');
+create table test19(a text collate pg_catalog.aa_DJ.utf8);
+create table test20(a text collate pg_catalog.utf8);
+
+-- test create table as
+create table test21(a text collate utf8mb4_bin, b text collate utf8mb4_general_ci, c text);
+create table test22 as select * from test21;
+select * from pg_get_tabledef('test22');
+create table test23 as select a, c from test21;
+select * from pg_get_tabledef('test23');
+set b_format_behavior_compat_options = enable_set_variables;
+set @v1 = 'aa', @v2 = 'bb';
+create table test24 as select @v1 collate 'utf8mb4_bin';
+select * from pg_get_tabledef('test24');
+create table test25 as select @v1 collate 'utf8mb4_bin', @v2;
+select * from pg_get_tabledef('test25');
+
+\c regression
+clean connection to all force for database test_collate_A;
+clean connection to all force for database test_collate_B;
+DROP DATABASE IF EXISTS test_collate_A;
+DROP DATABASE IF EXISTS test_collate_B;
\ No newline at end of file
diff --git a/src/test/regress/sql/ustore_subpartition_createtable.sql b/src/test/regress/sql/ustore_subpartition_createtable.sql
index 1ff8d3bba..b5a573d22 100644
--- a/src/test/regress/sql/ustore_subpartition_createtable.sql
+++ b/src/test/regress/sql/ustore_subpartition_createtable.sql
@@ -718,29 +718,6 @@ drop table list_list;
--1.4 subpartition key check
--- 一级分区和二级分区分区键是同一列
-
-CREATE TABLE list_list
-(
- month_code VARCHAR2 ( 30 ) NOT NULL ,
- dept_code VARCHAR2 ( 30 ) NOT NULL ,
- user_no VARCHAR2 ( 30 ) NOT NULL ,
- sales_amt int
-) WITH (STORAGE_TYPE=USTORE)
-PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code)
-(
- PARTITION p_201901 VALUES ( '201902' )
- (
- SUBPARTITION p_201901_a VALUES ( '1' ),
- SUBPARTITION p_201901_b VALUES ( '2' )
- ),
- PARTITION p_201902 VALUES ( '201903' )
- (
- SUBPARTITION p_201902_a VALUES ( '1' ),
- SUBPARTITION p_201902_b VALUES ( '2' )
- )
-);
-
--二级分区的键值一样
CREATE TABLE list_list
diff --git a/src/test/regress/sql/xc_rownum.sql b/src/test/regress/sql/xc_rownum.sql
index e1f498499..9f2750eb8 100644
--- a/src/test/regress/sql/xc_rownum.sql
+++ b/src/test/regress/sql/xc_rownum.sql
@@ -646,4 +646,59 @@ select * from partition_hash where rownum < 5;
drop table partition_hash;
+create table test_rownum_subquery
+(
+ pk integer,
+ no varchar2
+);
+insert into test_rownum_subquery values (1,'1');
+insert into test_rownum_subquery values (2,'2');
+insert into test_rownum_subquery values (3,'3');
+insert into test_rownum_subquery values (4,'4');
+insert into test_rownum_subquery values (5,'5');
+select * from test_rownum_subquery;
+
+update test_rownum_subquery t set t.no = to_char(100 - 1 + (
+ select vou_no from (
+ select rownum as vou_no, no from (
+ select distinct no from test_rownum_subquery b order by 1
+ )
+ ) where nvl(no, 0) = nvl(t.no, 0)
+));
+select * from test_rownum_subquery;
+drop table test_rownum_subquery;
+
+create table test_rownum_push_qual(id int);
+
+insert into test_rownum_push_qual values(generate_series(1, 20));
+
+-- having qual should not be pushed if accompanied by rownum reference
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 and id between 10 and 20 order by 1;
+
+select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 and id between 10 and 20 order by 1; -- expect 0 rows
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 or id between 10 and 20 order by 1;
+
+select rownum, * from test_rownum_push_qual group by id,rownum having ROWNUM < 10 or id between 10 and 20 order by 1; -- expect 20 rows
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual group by id,rownum having case when ROWNUM < 10 then 'true'::boolean else 'false'::boolean end and id between 10 and 20 order by 1;
+
+select rownum, * from test_rownum_push_qual group by id,rownum having case when ROWNUM < 10 then 'true'::boolean else 'false'::boolean end and id between 10 and 20 order by 1; -- expect 0 rows
+
+-- do not transform rownum op const to limit const -1, if limit clause is stated
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum < 10 limit 10 offset 10;
+
+select rownum, * from test_rownum_push_qual where rownum < 10 limit 10 offset 10; -- expected 0 rows
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum > 10 limit 10 offset 10;
+
+select rownum, * from test_rownum_push_qual where rownum > 10 limit 10 offset 10; -- expected 0 rows
+
+explain (verbose on, costs off) select rownum, * from test_rownum_push_qual where rownum < 15 limit 10 offset 10;
+
+select rownum, * from test_rownum_push_qual where rownum < 15 limit 10 offset 10; -- expected 4 rows
+
+explain (verbose on, costs off) select rownum, * from (select * from test_rownum_push_qual order by 1) where rownum < 10 limit 10 offset 10;
+
+select rownum, * from (select * from test_rownum_push_qual order by 1) where rownum < 10 limit 10 offset 10; -- expected 0 rows
\ No newline at end of file