diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 263b203a5..ce428ec85 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -18809,18 +18809,24 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo) PART_OBJ_TYPE_TABLE_PARTITION, newStrategy); for (i = 1; i <= partkeynum; i++) { - if (partkeyexprIsNull) { - if (i == partkeynum) + + if (!partkeyexprIsNull) { + if (i == partkeynum) + appendPQExpBuffer(partitionq, "p.boundaries[%d] ASC NULLS LAST", i); + else + appendPQExpBuffer(partitionq, "p.boundaries[%d] NULLS LAST,", i); + } else if (partStrategy == PART_STRATEGY_HASH) { + if (i == partkeynum) + appendPQExpBuffer(partitionq, "p.boundaries[%d]::int ASC NULLS LAST", i); + else + appendPQExpBuffer(partitionq, "p.boundaries[%d]::int NULLS LAST", i); + } else { + if (i == partkeynum) appendPQExpBuffer( partitionq, "p.boundaries[%d]::%s ASC NULLS LAST", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]); else appendPQExpBuffer( - partitionq, "p.boundaries[%d]::%s NULLS LAST, ", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]); - } else { - if (i == partkeynum) - appendPQExpBuffer(partitionq, "p.boundaries[%d] ASC NULLS LAST", i); - else - appendPQExpBuffer(partitionq, "p.boundaries[%d] NULLS LAST, ", i); + partitionq, "p.boundaries[%d]::%s NULLS LAST", i, tbinfo->atttypnames[partkeycols[i - 1] - 1]); } } } else { diff --git a/src/test/regress/input/gs_dump_2hash.source b/src/test/regress/input/gs_dump_2hash.source new file mode 100644 index 000000000..3aa26533b --- /dev/null +++ b/src/test/regress/input/gs_dump_2hash.source @@ -0,0 +1,36 @@ +create user u_hash password '1234@abcd'; +grant all privileges to u_hash; + +create database db_hash; +create database db_restore; +\c db_hash +create table t_hash_2nd_partition +( + c1_date date not null, + c2_int int not null, + c3_varchar2 varchar2(30) +) +partition by hash(c1_date) subpartition by range(c2_int) +( + partition p1 + ( + subpartition p1_a values less than(100), + subpartition p1_b values less than(200) + ), + partition p2 + ( + subpartition p2_a values less than(100), + subpartition p2_b values less than(200) + ) +); + +\! @abs_bindir@/gs_dump -p @portstring@ db_hash -f @abs_bindir@/dump -F c -w > @abs_bindir@/gs_dump_2hash.log 2>&1 ; echo $? + +\! @abs_bindir@/gs_restore -h 127.0.0.1 -p @portstring@ -U u_hash -W 1234@abcd -d db_restore -F c @abs_bindir@/dump > @abs_bindir@/gs_dump_2hash.log 2>&1 ; echo $? + +\c db_restore +select *from t_hash_2nd_partition; + +\c postgres +drop database db_hash; +drop database db_restore; diff --git a/src/test/regress/output/gs_dump_2hash.source b/src/test/regress/output/gs_dump_2hash.source new file mode 100644 index 000000000..f25b598ea --- /dev/null +++ b/src/test/regress/output/gs_dump_2hash.source @@ -0,0 +1,37 @@ +create user u_hash password '1234@abcd'; +grant all privileges to u_hash; +create database db_hash; +create database db_restore; +\c db_hash +create table t_hash_2nd_partition +( + c1_date date not null, + c2_int int not null, + c3_varchar2 varchar2(30) +) +partition by hash(c1_date) subpartition by range(c2_int) +( + partition p1 + ( + subpartition p1_a values less than(100), + subpartition p1_b values less than(200) + ), + partition p2 + ( + subpartition p2_a values less than(100), + subpartition p2_b values less than(200) + ) +); +\! @abs_bindir@/gs_dump -p @portstring@ db_hash -f @abs_bindir@/dump -F c -w > @abs_bindir@/gs_dump_2hash.log 2>&1 ; echo $? +0 +\! @abs_bindir@/gs_restore -h 127.0.0.1 -p @portstring@ -U u_hash -W 1234@abcd -d db_restore -F c @abs_bindir@/dump > @abs_bindir@/gs_dump_2hash.log 2>&1 ; echo $? +0 +\c db_restore +select *from t_hash_2nd_partition; + c1_date | c2_int | c3_varchar2 +---------+--------+------------- +(0 rows) + +\c postgres +drop database db_hash; +drop database db_restore; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index df76ab182..df0d8fb07 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -77,6 +77,7 @@ test: gs_dump_package trigger_dump gs_dumpall gs_dump_synonym dump_trigger_defin test: out_param_func #test: sqlcode_cursor test: gs_dump_tableconstraint +test: gs_dump_2hash # test AI4DB test: plpgsql_override_out diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index b30fb3f5c..a9edaf3a6 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -495,3 +495,6 @@ test: gb_ora_rotate_unrotate # test backup tool audit log test: backup_tool_audit + + +test: gs_dump_2hash