diff --git a/contrib/pg_xlogdump/CMakeLists.txt b/contrib/pg_xlogdump/CMakeLists.txt index 535df26f3..bfc02ff09 100644 --- a/contrib/pg_xlogdump/CMakeLists.txt +++ b/contrib/pg_xlogdump/CMakeLists.txt @@ -8,6 +8,7 @@ execute_process( COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/gistdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/gistdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/hashdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/hashdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/heapdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/heapdesc.cpp + COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/logicalddlmsgdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/logicalddlmsgdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/motdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/motdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/mxactdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/mxactdesc.cpp COMMAND ln -fs ${PROJECT_SRC_DIR}/gausskernel/storage/access/rmgrdesc/nbtdesc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/nbtdesc.cpp diff --git a/contrib/pg_xlogdump/rmgrdesc.cpp b/contrib/pg_xlogdump/rmgrdesc.cpp index db4103e01..11708296a 100644 --- a/contrib/pg_xlogdump/rmgrdesc.cpp +++ b/contrib/pg_xlogdump/rmgrdesc.cpp @@ -30,6 +30,7 @@ #include "commands/tablespace.h" #include "replication/slot.h" #include "replication/origin.h" +#include "replication/ddlmessage.h" #ifdef PGXC #include "pgxc/barrier.h" #endif diff --git a/contrib/security_plugin/gs_policy_plugin.cpp b/contrib/security_plugin/gs_policy_plugin.cpp index d65a9a585..b660eac5e 100644 --- a/contrib/security_plugin/gs_policy_plugin.cpp +++ b/contrib/security_plugin/gs_policy_plugin.cpp @@ -814,10 +814,10 @@ static void gsaudit_ProcessUtility_hook(processutility_context* processutility_c !is_audit_policy_exist_load_policy_info()) { if (next_ProcessUtility_hook) { next_ProcessUtility_hook(processutility_cxt, dest, sentToRemote, completionTag, - context, false); + context, isCTAS); } else { standard_ProcessUtility(processutility_cxt, dest, sentToRemote, completionTag, - context, false); + context, isCTAS); } return; } diff --git a/contrib/test_decoding/test_decoding.cpp b/contrib/test_decoding/test_decoding.cpp index 76175dd9b..7f5dacfb9 100644 --- a/contrib/test_decoding/test_decoding.cpp +++ b/contrib/test_decoding/test_decoding.cpp @@ -29,6 +29,7 @@ #include "utils/typcache.h" #include "replication/output_plugin.h" #include "replication/logical.h" +#include "tcop/ddldeparse.h" PG_MODULE_MAGIC; @@ -48,6 +49,8 @@ static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); +static void pg_decode_ddl(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, DeparsedCommandType cmdtype, Size sz, const char *message); void _PG_init(void) { @@ -67,6 +70,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->prepare_cb = pg_decode_prepare_txn; cb->filter_by_origin_cb = pg_decode_filter; cb->shutdown_cb = pg_decode_shutdown; + cb->ddl_cb = pg_decode_ddl; } /* initialize this plugin */ @@ -307,3 +311,59 @@ static void pg_decode_change( OutputPluginWrite(ctx, true); } + +static char *deparse_command_type(DeparsedCommandType cmdtype) +{ + switch (cmdtype) { + case DCT_SimpleCmd: + return "Simple"; + case DCT_TableDropStart: + return "Drop table"; + case DCT_TableDropEnd: + return "Drop Table End"; + default: + Assert(false); + } + return NULL; +} + +static void pg_decode_ddl(LogicalDecodingContext* ctx, + ReorderBufferTXN* txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, + DeparsedCommandType cmdtype, + Size sz, const char *message) +{ + PluginTestDecodingData* data = NULL; + MemoryContext old; + + data = (PluginTestDecodingData*)ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + OutputPluginPrepareWrite(ctx, true); + + appendStringInfo(ctx->out, "message: prefix %s, relid %u, cmdtype: %s, sz: %lu content: %s", + prefix, + relid, + deparse_command_type(cmdtype), + sz, + message); + if (cmdtype != DCT_TableDropStart) { + char *tmp = pstrdup(message); + char *owner = NULL; + char *decodestring = deparse_ddl_json_to_string(tmp, &owner); + appendStringInfo(ctx->out, "\ndecode to : %s, [owner %s]", decodestring, owner ? owner : "none"); + pfree(tmp); + } + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} \ No newline at end of file diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 04016a800..162b99280 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -62,6 +62,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_rlspolicy.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_publication.h" #include "catalog/pg_type.h" #include "catalog/pg_attrdef.h" #include "catalog/pg_partition.h" @@ -298,6 +299,7 @@ const uint32 B_DUMP_TRIGGER_VERSION_NUM = 92843; const uint32 EVENT_VERSION = 92844; const uint32 EVENT_TRIGGER_VERSION_NUM = 92845; const uint32 RB_OBJECT_VERSION_NUM = 92831; +const uint32 PUBLICATION_DDL_VERSION_NUM = 92921; #ifdef DUMPSYSLOG char* syslogpath = NULL; @@ -4386,6 +4388,7 @@ void getPublications(Archive *fout) int i_pubinsert; int i_pubupdate; int i_pubdelete; + int i_pubddl = 0; int i, ntups; if (no_publications || GetVersionNum(fout) < SUBSCRIPTION_VERSION) { @@ -4397,12 +4400,21 @@ void getPublications(Archive *fout) resetPQExpBuffer(query); /* Get the publications. */ - appendPQExpBuffer(query, - "SELECT p.tableoid, p.oid, p.pubname, " - "(%s p.pubowner) AS rolname, " - "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete " - "FROM pg_catalog.pg_publication p", - username_subquery); + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + appendPQExpBuffer(query, + "SELECT p.tableoid, p.oid, p.pubname, " + "(%s p.pubowner) AS rolname, " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubddl " + "FROM pg_catalog.pg_publication p", + username_subquery); + } else { + appendPQExpBuffer(query, + "SELECT p.tableoid, p.oid, p.pubname, " + "(%s p.pubowner) AS rolname, " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete " + "FROM pg_catalog.pg_publication p", + username_subquery); + } res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -4421,6 +4433,9 @@ void getPublications(Archive *fout) i_pubinsert = PQfnumber(res, "pubinsert"); i_pubupdate = PQfnumber(res, "pubupdate"); i_pubdelete = PQfnumber(res, "pubdelete"); + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + i_pubddl = PQfnumber(res, "pubddl"); + } pubinfo = (PublicationInfo *)pg_malloc(ntups * sizeof(PublicationInfo)); @@ -4435,6 +4450,9 @@ void getPublications(Archive *fout) pubinfo[i].pubinsert = (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0); pubinfo[i].pubupdate = (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0); pubinfo[i].pubdelete = (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0); + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + pubinfo[i].pubddl = atol(PQgetvalue(res, i, i_pubddl)); + } if (strlen(pubinfo[i].rolname) == 0) { write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n", pubinfo[i].dobj.name); @@ -4498,6 +4516,23 @@ static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo) first = false; } + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM && pubinfo->pubddl != 0) { + if (!first) { + appendPQExpBufferStr(query, "',"); + } + + if (pubinfo->pubddl == PUBDDL_ALL) { + appendPQExpBufferStr(query, "ddl = 'all"); + } else { + appendPQExpBufferStr(query, "ddl = "); + + if (ENABLE_PUBDDL_TYPE(pubinfo->pubddl, PUBDDL_TABLE)) { + appendPQExpBuffer(query, "'table"); + } + } + first = false; + } + appendPQExpBufferStr(query, "');\n"); ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId, pubinfo->dobj.name, NULL, NULL, pubinfo->rolname, @@ -4638,6 +4673,7 @@ void getSubscriptions(Archive *fout) int i_subsynccommit; int i_subpublications; int i_subbinary; + int i_submatchddlowner; int i; int ntups; @@ -4657,12 +4693,18 @@ void getSubscriptions(Archive *fout) /* Get the subscriptions in current database. */ appendPQExpBuffer(query, "SELECT s.tableoid, s.oid, s.subname," "(%s s.subowner) AS rolname, s.subconninfo, s.subslotname, " - "s.subsynccommit, s.subpublications, \n", username_subquery); + "s.subsynccommit, s.subpublications \n", username_subquery); if (GetVersionNum(fout) >= SUBSCRIPTION_BINARY_VERSION_NUM) { - appendPQExpBuffer(query, " s.subbinary\n"); + appendPQExpBuffer(query, ", s.subbinary\n"); } else { - appendPQExpBuffer(query, " false AS subbinary\n"); + appendPQExpBuffer(query, ", false AS subbinary\n"); + } + + if (GetVersionNum(fout) >= PUBLICATION_DDL_VERSION_NUM) { + appendPQExpBuffer(query, ", s.submatchddlowner\n"); + } else { + appendPQExpBuffer(query, ", true AS submatchddlowner\n"); } appendPQExpBuffer(query, "FROM pg_catalog.pg_subscription s " @@ -4687,6 +4729,7 @@ void getSubscriptions(Archive *fout) i_subsynccommit = PQfnumber(res, "subsynccommit"); i_subpublications = PQfnumber(res, "subpublications"); i_subbinary = PQfnumber(res, "subbinary"); + i_submatchddlowner = PQfnumber(res, "submatchddlowner"); subinfo = (SubscriptionInfo *)pg_malloc(ntups * sizeof(SubscriptionInfo)); @@ -4706,6 +4749,7 @@ void getSubscriptions(Archive *fout) subinfo[i].subsynccommit = gs_strdup(PQgetvalue(res, i, i_subsynccommit)); subinfo[i].subpublications = gs_strdup(PQgetvalue(res, i, i_subpublications)); subinfo[i].subbinary = gs_strdup(PQgetvalue(res, i, i_subbinary)); + subinfo[i].submatchddlowner = gs_strdup(PQgetvalue(res, i, i_submatchddlowner)); if (strlen(subinfo[i].rolname) == 0) { write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n", subinfo[i].dobj.name); @@ -4776,6 +4820,10 @@ static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo) appendPQExpBuffer(query, ", binary = true"); } + if (strcmp(subinfo->submatchddlowner, "f") == 0) { + appendPQExpBuffer(query, ", match_ddl_owner = false"); + } + if (strcmp(subinfo->subsynccommit, "off") != 0) { appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit)); } @@ -23464,6 +23512,24 @@ getEventTriggers(Archive *fout, int *numEventTriggers) return evtinfo; } +static bool eventtrigger_filter(EventTriggerInfo *evtinfo) +{ + static const uint32 reserved_eventtrigger_prefix_len = 2; + static char *reserved_eventtrigger_prefix[] = { + PUB_EVENT_TRIG_PREFIX PUB_TRIG_DDL_CMD_END, + PUB_EVENT_TRIG_PREFIX PUB_TRIG_DDL_CMD_START + }; + + Assert(sizeof(reserved_eventtrigger_prefix)/sizeof(reserved_eventtrigger_prefix[0]) == reserved_eventtrigger_prefix_len); + + for (int i = 0; i < reserved_eventtrigger_prefix_len; ++i) { + if (!strncmp(evtinfo->dobj.name, reserved_eventtrigger_prefix[i], strlen(reserved_eventtrigger_prefix[i]))) { + return false; + } + } + return true; +} + static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo) { PQExpBuffer query; @@ -23472,7 +23538,10 @@ static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo) /* Skip if not to be dumped */ if (!evtinfo->dobj.dump || dataOnly) return; - + + if (!eventtrigger_filter(evtinfo)) + return; + query = createPQExpBuffer(); labelq = createPQExpBuffer(); diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 525fd23c8..072553d2c 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -511,6 +511,7 @@ typedef struct _PublicationInfo { bool pubinsert; bool pubupdate; bool pubdelete; + int64 pubddl; } PublicationInfo; /* @@ -534,6 +535,7 @@ typedef struct _SubscriptionInfo { char *subsynccommit; char *subpublications; char *subbinary; + char *submatchddlowner; } SubscriptionInfo; /* global decls */ diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 42bad2dee..2f734f76a 100644 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -9788,6 +9788,18 @@ AddFuncGroup( "pt_contained_poly", 1, AddBuiltinFunc(_0(1429), _1("pt_contained_poly"), _2(2), _3(true), _4(false), _5(pt_contained_poly), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 600, 604), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pt_contained_poly"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "pubddl_decode", 1, + AddBuiltinFunc(_0(4648), _1("pubddl_decode"), _2(1), _3(true), _4(false), _5(pubddl_decode), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 20), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pubddl_decode"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "publication_deparse_ddl_command_end", 1, + AddBuiltinFunc(_0(4642), _1("publication_deparse_ddl_command_end"), _2(2), _3(true), _4(false), _5(publication_deparse_ddl_command_end), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_ddl_command_end"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "publication_deparse_ddl_command_start", 1, + AddBuiltinFunc(_0(4643), _1("publication_deparse_ddl_command_start"), _2(2), _3(true), _4(false), _5(publication_deparse_ddl_command_start), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_ddl_command_start"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "pv_builtin_functions", 1, AddBuiltinFunc(_0(5345), _1("pv_builtin_functions"), _2(0), _3(false), _4(true), _5(pv_builtin_functions), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(3100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(32, 19, 26, 26, 26, 700, 700, 26, 24, 16, 16, 16, 16, 16, 16, 18, 21, 21, 26, 30, 1007, 1002, 1009, 194, 25, 25, 1009, 1034, 22, 16, 16, 16, 26), _22(32, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(32, "proname", "pronamespace", "proowner", "prolang", "procost", "prorows", "provariadic", "protransform", "proisagg", "proiswindow", "prosecdef", "proleakproof", "proisstrict", "proretset", "provolatile", "pronargs", "pronargdefaults", "prorettype", "proargtypes", "proallargtypes", "proargmodes", "proargnames", "proargdefaults", "prosrc", "probin", "proconfig", "proacl", "prodefaultargpos", "fencedmode", "proshippable", "propackage", "oid"), _24(NULL), _25("pv_builtin_functions"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/src/common/backend/catalog/dependency.cpp b/src/common/backend/catalog/dependency.cpp index 8a375de9f..d0e593c8a 100644 --- a/src/common/backend/catalog/dependency.cpp +++ b/src/common/backend/catalog/dependency.cpp @@ -205,7 +205,7 @@ static void deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel /* * Keep track of objects for event triggers, if necessary. */ - if (trackDroppedObjectsNeeded()) { + if (trackDroppedObjectsNeeded() && !(flags & PERFORM_DELETION_INTERNAL)) { for (i = 0; i < targetObjects->numrefs; i++) { const ObjectAddress *thisobj = &targetObjects->refs[i]; const ObjectAddressExtra *extra = &targetObjects->extras[i]; diff --git a/src/common/backend/catalog/objectaddress.cpp b/src/common/backend/catalog/objectaddress.cpp index d23aeba89..e75e5d78f 100644 --- a/src/common/backend/catalog/objectaddress.cpp +++ b/src/common/backend/catalog/objectaddress.cpp @@ -97,24 +97,6 @@ * This array provides a common part of system object structure; to help * consolidate routines to handle various kind of object classes. */ -typedef struct { - Oid class_oid; /* oid of catalog */ - Oid oid_index_oid; /* oid of index on system oid column */ - int oid_catcache_id; /* id of catcache on system oid column */ - int name_catcache_id; /* id of catcache on (name,namespace), or - * (name) if the object does not live in a - * namespace */ - AttrNumber attnum_name; /* attnum of name field */ - AttrNumber attnum_namespace; /* attnum of namespace field */ - AttrNumber attnum_owner; /* attnum of owner field */ - AttrNumber attnum_acl; /* attnum of acl field */ - ObjectType objtype; /* OBJECT_* of this object type */ - bool is_nsp_name_unique; /* can the nsp/name combination (or name - * alone, if there's no namespace) be - * considered a unique identifier for an - * object of this class? */ - -} ObjectPropertyType; static THR_LOCAL const ObjectPropertyType ObjectProperty[] = { { @@ -2229,6 +2211,9 @@ static void getRelationTypeDescription(StringInfo buffer, Oid relid, int32 objec case RELKIND_FOREIGN_TABLE: appendStringInfoString(buffer, "foreign table"); break; + case RELKIND_GLOBAL_INDEX: + appendStringInfoString(buffer, "index"); + break; default: /* should not here */ appendStringInfoString(buffer, "relation"); diff --git a/src/common/backend/catalog/pg_publication.cpp b/src/common/backend/catalog/pg_publication.cpp index 4d45297c7..1fdc4d328 100644 --- a/src/common/backend/catalog/pg_publication.cpp +++ b/src/common/backend/catalog/pg_publication.cpp @@ -141,7 +141,7 @@ static Publication *GetPublication(Oid pubid) pub->pubactions.pubinsert = pubform->pubinsert; pub->pubactions.pubupdate = pubform->pubupdate; pub->pubactions.pubdelete = pubform->pubdelete; - + pub->pubactions.pubddl = pubform->pubddl; ReleaseSysCache(tup); return pub; diff --git a/src/common/backend/catalog/pg_subscription.cpp b/src/common/backend/catalog/pg_subscription.cpp index 5f075ff2d..529d02f83 100644 --- a/src/common/backend/catalog/pg_subscription.cpp +++ b/src/common/backend/catalog/pg_subscription.cpp @@ -103,6 +103,14 @@ Subscription *GetSubscription(Oid subid, bool missing_ok) sub->binary = DatumGetBool(datum); } + datum = SysCacheGetAttr(SUBSCRIPTIONOID, tup, Anum_pg_subscription_submatchddlowner, &isnull); + if (unlikely(isnull)) { + /* default matchddlowner is true */ + sub->matchddlowner = true; + } else { + sub->matchddlowner = DatumGetBool(datum); + } + /* Get skiplsn */ datum = SysCacheGetAttr(SUBSCRIPTIONOID, tup, Anum_pg_subscription_subskiplsn, &isnull); if (unlikely(isnull)) { diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 2b01d9040..6477bb6bc 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -5947,6 +5947,7 @@ static CreateSeqStmt* _copyCreateSeqStmt(const CreateSeqStmt* from) COPY_SCALAR_FIELD(canCreateTempSeq); COPY_SCALAR_FIELD(is_large); COPY_SCALAR_FIELD(missing_ok); + COPY_SCALAR_FIELD(is_autoinc); return newnode; } @@ -5959,6 +5960,7 @@ static AlterSeqStmt* _copyAlterSeqStmt(const AlterSeqStmt* from) COPY_NODE_FIELD(options); COPY_SCALAR_FIELD(missing_ok); COPY_SCALAR_FIELD(is_large); + COPY_SCALAR_FIELD(is_autoinc); return newnode; } diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index e52af561c..48bb35a6a 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -1898,6 +1898,7 @@ static bool _equalCreateSeqStmt(const CreateSeqStmt* a, const CreateSeqStmt* b) COMPARE_SCALAR_FIELD(canCreateTempSeq); COMPARE_SCALAR_FIELD(is_large); COMPARE_SCALAR_FIELD(missing_ok); + COMPARE_SCALAR_FIELD(is_autoinc); return true; } @@ -1908,6 +1909,7 @@ static bool _equalAlterSeqStmt(const AlterSeqStmt* a, const AlterSeqStmt* b) COMPARE_NODE_FIELD(options); COMPARE_SCALAR_FIELD(missing_ok); COMPARE_SCALAR_FIELD(is_large); + COMPARE_SCALAR_FIELD(is_autoinc); return true; } diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index 1df93babe..725bcb7da 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -1040,7 +1040,7 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo seqstmt = makeNode(CreateSeqStmt); seqstmt->sequence = makeRangeVar(snamespace, sname, -1); seqstmt->options = is_autoinc ? GetAutoIncSeqOptions(cxt) : NULL; - + seqstmt->is_autoinc = is_autoinc; #ifdef PGXC seqstmt->is_serial = true; #endif @@ -1089,6 +1089,7 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo attnamelist = list_make3(makeString(snamespace), makeString(cxt->relation->relname), makeString(column->colname)); altseqstmt->options = list_make1(makeDefElem("owned_by", (Node*)attnamelist)); altseqstmt->is_large = large; + altseqstmt->is_autoinc = is_autoinc; cxt->alist = lappend(cxt->alist, altseqstmt); @@ -3794,6 +3795,10 @@ static IndexStmt* transformIndexConstraint(Constraint* constraint, CreateStmtCon ReleaseSysCache(tup_idx); index->indexOid = index_oid; + + /* save the original index name, it wll be replace by constraint */ + DefElem *def = makeDefElem("origin_indexname", (Node*)makeString(constraint->indexname)); + index->options = lappend(index->options, def); } /* diff --git a/src/common/backend/utils/adt/format_type.cpp b/src/common/backend/utils/adt/format_type.cpp index e6a40bd67..29c98a226 100644 --- a/src/common/backend/utils/adt/format_type.cpp +++ b/src/common/backend/utils/adt/format_type.cpp @@ -30,7 +30,7 @@ static char* format_type_internal( Oid type_oid, int32 typemod, bool typemod_given, bool allow_invalid, bool include_nspname = false); -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout); +char* printTypmod(const char* typname, int32 typmod, Oid typmodout); static char* psnprintf(size_t len, const char* fmt, ...) /* This lets gcc check the format string for consistency. */ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); @@ -360,7 +360,7 @@ static char* format_type_internal( /* * Add typmod decoration to the basic type name */ -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout) +char* printTypmod(const char* typname, int32 typmod, Oid typmodout) { char* res = NULL; diff --git a/src/common/backend/utils/adt/pgstatfuncs.cpp b/src/common/backend/utils/adt/pgstatfuncs.cpp index e842e20a6..a09885542 100644 --- a/src/common/backend/utils/adt/pgstatfuncs.cpp +++ b/src/common/backend/utils/adt/pgstatfuncs.cpp @@ -32,6 +32,7 @@ #include "catalog/pg_type.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_publication.h" #include "commands/dbcommands.h" #include "commands/user.h" #include "commands/vacuum.h" @@ -3411,6 +3412,28 @@ Datum locktag_decode(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(result); } +Datum pubddl_decode(PG_FUNCTION_ARGS) +{ + int64 pubddl = DatumGetInt64(PG_GETARG_DATUM(0)); + StringInfoData tmpbuf; + initStringInfo(&tmpbuf); + if (pubddl == PUBDDL_NONE) { + appendStringInfo(&tmpbuf, "%s", "none"); + } else if (pubddl == PUBDDL_ALL) { + appendStringInfo(&tmpbuf, "%s", "all"); + } else { + bool first = true; + if (ENABLE_PUBDDL_TYPE(pubddl, PUBDDL_TABLE)) { + appendStringInfo(&tmpbuf, "%s", "table"); + first = false; + } + } + text *result = cstring_to_text(tmpbuf.data); + + FreeStringInfo(&tmpbuf); + PG_RETURN_TEXT_P(result); +} + Datum working_version_num(PG_FUNCTION_ARGS) { PG_RETURN_UINT32(t_thrd.proc->workingVersionNum); diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 15c9933bc..d8a48ab2a 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -43,6 +43,7 @@ #include "catalog/pg_partition.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_proc.h" +#include "catalog/pg_rewrite.h" #include "catalog/pg_synonym.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" @@ -246,9 +247,9 @@ static void decompile_column_index_array(Datum column_index_array, Oid relId, St static char* pg_get_ruledef_worker(Oid ruleoid, int prettyFlags); static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, int prettyFlags, bool dumpSchemaOnly = false, bool showPartitionLocal = true, bool showSubpartitionLocal = true); -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal); -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags); +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option=false); static text* pg_get_expr_worker(text* expr, Oid relid, const char* relname, int prettyFlags); static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool print_table_args, bool print_defaults); static void print_function_ora_arguments(StringInfo buf, HeapTuple proctup); @@ -317,7 +318,7 @@ static void get_from_clause_coldeflist( List* names, List* types, List* typmods, List* collations, deparse_context* context); static void get_tablesample_def(TableSampleClause* tablesample, deparse_context* context); static void GetTimecapsuleDef(const TimeCapsuleClause* timeCapsule, deparse_context* context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node* processIndirection(Node* node, deparse_context* context, bool printit); static void printSubscripts(ArrayRef* aref, deparse_context* context); static char* get_relation_name(Oid relid); @@ -326,7 +327,6 @@ static char* generate_function_name( Oid funcid, int nargs, List* argnames, Oid* argtypes, bool was_variadic, bool* use_variadic_p); static char* generate_operator_name(Oid operid, Oid arg1, Oid arg2); static text* string_to_text(char* str); -static char* flatten_reloptions(Oid relid); static Oid SearchSysTable(const char* query); static void replace_cl_types_in_argtypes(Oid func_id, int numargs, Oid* argtypes, bool *is_client_logic); @@ -339,6 +339,8 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo tableinfo); +static inline bool IsTableVisible(Oid tableoid); +static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoid, tableInfo tableinfo); /* from pgxcship */ Var* get_var_from_node(Node* node, bool (*func)(Oid) = func_oid_check_reject); @@ -988,6 +990,75 @@ void GetPartitionExprKeySrc(StringInfo buf, Datum* datum, char* relname, Oid tab pfree_ext(partkeystr); } +char *pg_get_partkeydef_string(Relation relation) +{ + OverrideSearchPath *tmp_search_path = NULL; + StringInfoData buf; + StringInfoData query; + tableInfo tableinfo; + + Form_pg_class classForm = NULL; + Oid tableoid = RelationGetRelid(relation); + if (IsTempTable(tableoid)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Can not get temporary tables partition defination."))); + } + + initStringInfo(&buf); + initStringInfo(&query); + classForm = relation->rd_rel; + tableinfo.relkind = classForm->relkind; + + if (tableinfo.relkind != RELKIND_RELATION) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Not a ordinary table or foreign table."))); + } + + tableinfo.relpersistence = classForm->relpersistence; + tableinfo.tablespace = classForm->reltablespace; + tableinfo.hasPartialClusterKey = classForm->relhasclusterkey; + tableinfo.hasindex = classForm->relhasindex; + tableinfo.relcmpr = classForm->relcmprs; + tableinfo.relrowmovement = classForm->relrowmovement; + tableinfo.parttype = classForm->parttype; + tableinfo.spcid = classForm->relnamespace; + tableinfo.relname = pstrdup(NameStr(classForm->relname)); + tableinfo.autoinc_attnum = 0; + tableinfo.autoinc_consoid = 0; + tableinfo.autoinc_seqoid = 0; + + tmp_search_path = GetOverrideSearchPath(CurrentMemoryContext); + tmp_search_path->schemas = NIL; + tmp_search_path->addCatalog = true; + tmp_search_path->addTemp = true; + PushOverrideSearchPath(tmp_search_path); + + /* + * Connect to SPI manager + */ + SPI_STACK_LOG("connect", NULL, NULL); + if (SPI_connect() != SPI_OK_CONNECT) + ereport(ERROR, (errcode(ERRCODE_SPI_CONNECTION_FAILURE), + errmsg("SPI_connect failed"))); + + PushActiveSnapshot(GetTransactionSnapshot()); + get_table_partitiondef(&query, &buf, tableoid, tableinfo); + PopActiveSnapshot(); + + /* + * Disconnect from SPI manager + */ + SPI_STACK_LOG("finish", NULL, NULL); + if (SPI_finish() != SPI_OK_FINISH) + ereport(ERROR, (errcode(ERRCODE_SPI_FINISH_FAILURE), + errmsg("SPI_finish failed"))); + + PopOverrideSearchPath(); + pfree_ext(query.data); + pfree_ext(tableinfo.relname); + return buf.data; +} + /* * @Description: get partition table defination * @in query - append query for SPI_execute. @@ -1237,7 +1308,7 @@ static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, Subpar "array_to_string(p.boundaries, ',') as partbound, " "array_to_string(p.boundaries, ''',''') as partboundstr, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' AND p.partstrategy = '%c' " "ORDER BY p.boundaries[1]::%s ASC", @@ -1313,7 +1384,7 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table appendStringInfo(query, "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -1396,7 +1467,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "array_to_string(p.boundaries, ''',''') as partboundstr, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC", @@ -1433,13 +1504,13 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "pg_catalog.unnest(keys_array)::text AS key_value FROM ( " "SELECT oid, relname, reltablespace, bound_id,key_bounds::cstring[] AS keys_array FROM ( " "SELECT oid, relname, reltablespace, pg_catalog.unnest(boundaries) AS key_bounds, " - "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_partition " + "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c')))) " "GROUP BY oid, relname, reltablespace, bound_id) " "GROUP BY oid, relname, reltablespace " - "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_partition " + "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p " - "LEFT JOIN pg_tablespace t ON p.reltablespace = t.oid " + "LEFT JOIN pg_catalog.pg_tablespace t ON p.reltablespace = t.oid " "ORDER BY p.bound_def ASC", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST); @@ -1503,7 +1574,7 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "p.boundaries[1] AS partboundary, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -3369,7 +3440,7 @@ static void GetIndexdefForIntervalPartTabDumpSchemaOnly(Oid indexrelid, RangePar appendStringInfo(buf, ") "); } -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal) { Oid relid = idxrec->indrelid; @@ -3752,7 +3823,12 @@ char* pg_get_constraintdef_string(Oid constraintId) return pg_get_constraintdef_worker(constraintId, true, 0); } -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags) +char* pg_get_constraintdef_part_string(Oid constraintId) +{ + return pg_get_constraintdef_worker(constraintId, false, 0, true); +} + +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option) { HeapTuple tup; Form_pg_constraint conForm; @@ -3998,7 +4074,7 @@ static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int indexId = get_constraint_index(constraintId); /* XXX why do we only print these bits if fullCommand? */ - if (fullCommand && OidIsValid(indexId)) { + if ((fullCommand || with_option) && OidIsValid(indexId)) { char* options = flatten_reloptions(indexId); Oid tblspc; @@ -12060,7 +12136,7 @@ static void get_from_clause_coldeflist( * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; @@ -12564,7 +12640,7 @@ static text* string_to_text(char* str) /* * Generate a C string representing a relation's reloptions, or NULL if none. */ -static char* flatten_reloptions(Oid relid) +char* flatten_reloptions(Oid relid) { char* result = NULL; HeapTuple tuple; diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index 19fc11979..534f8c5f6 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -75,12 +75,13 @@ bool will_shutdown = false; * NEXT | 92899 | ? | ? * ********************************************/ -const uint32 GRAND_VERSION_NUM = 92920; +const uint32 GRAND_VERSION_NUM = 92921; /******************************************** * 2.VERSION NUM FOR EACH FEATURE * Please write indescending order. ********************************************/ +const uint32 PUBLICATION_DDL_VERSION_NUM = 92921; const uint32 UPSERT_ALIAS_VERSION_NUM = 92920; const uint32 SUPPORT_GS_DEPENDENCY_VERSION_NUM = 92916; const uint32 SPQ_VERSION_NUM = 92915; diff --git a/src/gausskernel/optimizer/commands/Makefile b/src/gausskernel/optimizer/commands/Makefile index c0cd0c246..04ebb2839 100644 --- a/src/gausskernel/optimizer/commands/Makefile +++ b/src/gausskernel/optimizer/commands/Makefile @@ -29,7 +29,7 @@ OBJS = aggregatecmds.o alter.o analyze.o async.o cluster.o comment.o \ tsearchcmds.o typecmds.o user.o vacuum.o vacuumlazy.o \ variable.o verifyrepair.o verify.o view.o gds_stream.o formatter.o datasourcecmds.o \ directory.o auto_explain.o shutdown.o \ - eventcmds.o + eventcmds.o ddldeparse.o ddljson.o ifeq ($(enable_lite_mode), no) OBJS += obs_stream.o diff --git a/src/gausskernel/optimizer/commands/ddldeparse.cpp b/src/gausskernel/optimizer/commands/ddldeparse.cpp new file mode 100644 index 000000000..ccf4174c0 --- /dev/null +++ b/src/gausskernel/optimizer/commands/ddldeparse.cpp @@ -0,0 +1,2460 @@ +/*------------------------------------------------------------------------- + * + * ddldeparse.cpp + * Functions to convert utility commands to machine-parseable + * representation + * + * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * NOTES + * + * This is intended to provide JSON blobs representing DDL commands, which can + * later be re-processed into plain strings by well-defined sprintf-like + * expansion. These JSON objects are intended to allow for machine-editing of + * the commands, by replacing certain nodes within the objects. + * + * Much of the information in the output blob actually comes from system + * catalogs, not from the command parse node, as it is impossible to reliably + * construct a fully-specified command (i.e. one not dependent on search_path + * etc) looking only at the parse node. + * + * Deparse object tree is created by using: + * a) new_objtree("know contents") where the complete tree content is known or + * the initial tree content is known. + * b) new_objtree("") for the syntax where the object tree will be derived + * based on some conditional checks. + * c) new_objtree_VA where the complete tree can be derived using some fixed + * content and/or some variable arguments. + * + * IDENTIFICATION + * src/backend/commands/ddl_deparse.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/amapi.h" +#include "catalog/namespace.h" +#include "catalog/pg_am.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_cast.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_language.h" +#include "catalog/pg_largeobject.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_range.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_statistic_ext.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_type.h" +#include "catalog/pg_user_mapping.h" +#include "catalog/heap.h" +#include "commands/defrem.h" +#include "commands/sequence.h" +#include "commands/tablespace.h" +#include "foreign/foreign.h" +#include "funcapi.h" +#include "mb/pg_wchar.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "parser/parse_type.h" +#include "rewrite/rewriteHandler.h" +#include "tcop/ddldeparse.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/guc.h" +#include "utils/jsonb.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +/* Estimated length of the generated jsonb string */ +static const int JSONB_ESTIMATED_LEN = 128; + +/* + * Before they are turned into JSONB representation, each command is + * represented as an object tree, using the structs below. + */ +typedef enum +{ + ObjTypeNull, + ObjTypeBool, + ObjTypeString, + ObjTypeArray, + ObjTypeInteger, + ObjTypeFloat, + ObjTypeObject +} ObjType; + +/* + * Represent the command as an object tree. + */ +typedef struct ObjTree +{ + slist_head params; /* Object tree parameters */ + int numParams; /* Number of parameters in the object tree */ + StringInfo fmtinfo; /* Format string of the ObjTree */ + bool present; /* Indicates if boolean value should be stored */ +} ObjTree; + +/* + * An element of an object tree (ObjTree). + */ +typedef struct ObjElem +{ + char *name; /* Name of object element */ + ObjType objtype; /* Object type */ + + union { + bool boolean; + char *string; + int64 integer; + float8 flt; + ObjTree *object; + List *array; + } value; /* Store the object value based on the object + * type */ + slist_node node; /* Used in converting back to ObjElem + * structure */ +} ObjElem; + +/* + * Reduce some unnecessary strings from the output json when verbose + * and "present" member is false. This means these strings won't be merged into + * the last DDL command. + */ + +static void append_format_string(ObjTree *tree, char *sub_fmt); +static void append_array_object(ObjTree *tree, char *sub_fmt, List *array); +static void append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value); +static char *append_object_to_format_string(ObjTree *tree, const char *sub_fmt); +static void append_premade_object(ObjTree *tree, ObjElem *elem); +static void append_string_object(ObjTree *tree, char *sub_fmt, char *name, + const char *value); +static void append_int_object(ObjTree *tree, char *sub_fmt, int32 value); +static void format_type_detailed(Oid type_oid, int32 typemod, + Oid *nspid, char **typname, char **typemodstr, + bool *typarray); +static ObjElem *new_object(ObjType type, char *name); +static ObjTree *new_objtree_for_qualname_id(Oid classId, Oid objectId); +static ObjTree *new_objtree(const char *fmt); +static ObjElem *new_object_object(ObjTree *value); + +static ObjTree *new_objtree_VA(const char *fmt, int numobjs,...); + +static JsonbValue *objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner); +static void pg_get_indexdef_detailed(Oid indexrelid, bool global, + char **index_am, + char **definition, + char **reloptions, + char **tablespace, + char **whereClause, + bool *invisible); +static char *RelationGetColumnDefault(Relation rel, AttrNumber attno, + List *dpcontext, List **exprs); + +static ObjTree *deparse_ColumnDef(Relation relation, List *dpcontext, + ColumnDef *coldef, List **exprs); + +static ObjTree *deparse_DefElem(DefElem *elem, bool is_reset); + +static inline ObjElem *deparse_Seq_Cache(sequence_values *seqdata, bool alter_table); +static inline ObjElem *deparse_Seq_Cycle(sequence_values * seqdata, bool alter_table); +static inline ObjElem *deparse_Seq_IncrementBy(sequence_values * seqdata, bool alter_table); +static inline ObjElem *deparse_Seq_Minvalue(sequence_values * seqdata, bool alter_table); +static inline ObjElem *deparse_Seq_Maxvalue(sequence_values * seqdata, bool alter_table); +static inline ObjElem *deparse_Seq_Restart(char *last_value); +static inline ObjElem *deparse_Seq_Startwith(sequence_values * seqdata, bool alter_table); +static ObjElem *deparse_Seq_OwnedBy(Oid sequenceId); +static inline ObjElem *deparse_Seq_Order(DefElem *elem); +static inline ObjElem *deparse_Seq_As(DefElem *elem); + +static List *deparse_TableElements(Relation relation, List *tableElements, List *dpcontext); + +/* + * Append an int32 parameter to a tree. + */ +static void +append_int_object(ObjTree *tree, char *sub_fmt, int32 value) +{ + ObjElem *param; + char *object_name; + + Assert(sub_fmt); + + object_name = append_object_to_format_string(tree, sub_fmt); + + param = new_object(ObjTypeInteger, object_name); + param->value.integer = value; + append_premade_object(tree, param); +} + +/* + * Append a NULL-or-quoted-literal clause. Userful for COMMENT and SECURITY + * LABEL. + * + * Verbose syntax + * %{null}s %{literal}s + */ +static void +append_literal_or_null(ObjTree *parent, char *elemname, char *value) +{ + ObjTree *top; + ObjTree *part; + + top = new_objtree(""); + part = new_objtree_VA("NULL", 1, + "present", ObjTypeBool, !value); + append_object_object(top, "%{null}s", part); + + part = new_objtree_VA("", 1, + "present", ObjTypeBool, value != NULL); + + if (value) + append_string_object(part, "%{value}L", "value", value); + append_object_object(top, "%{literal}s", part); + + append_object_object(parent, elemname, top); +} + +/* + * Append an array parameter to a tree. + */ +static void +append_array_object(ObjTree *tree, char *sub_fmt, List *array) +{ + ObjElem *param; + char *object_name; + + Assert(sub_fmt); + + if (!array || list_length(array) == 0) + return; + + ListCell *lc; + + /* Remove elements where present flag is false */ + foreach(lc, array) { + ObjElem *elem = (ObjElem *) lfirst(lc); + + if (elem->objtype != ObjTypeObject && elem->objtype != ObjTypeString) { + elog(ERROR, "ObjectTypeArray %s elem need to be object or string", sub_fmt); + } + } + + object_name = append_object_to_format_string(tree, sub_fmt); + + param = new_object(ObjTypeArray, object_name); + param->value.array = array; + append_premade_object(tree, param); +} + + +/* + * Append the input format string to the ObjTree. + */ +static void +append_format_string(ObjTree *tree, char *sub_fmt) +{ + int len; + char *fmt; + + if (tree->fmtinfo == NULL) + return; + + fmt = tree->fmtinfo->data; + len = tree->fmtinfo->len; + + /* Add a separator if necessary */ + if (len > 0 && fmt[len - 1] != ' ') + appendStringInfoSpaces(tree->fmtinfo, 1); + + appendStringInfoString(tree->fmtinfo, sub_fmt); +} + +/* + * Append an object parameter to a tree. + */ +static void +append_object_object(ObjTree *tree, char *sub_fmt, ObjTree *value) +{ + ObjElem *param; + char *object_name; + + Assert(sub_fmt); + + if (!value) + return; + + object_name = append_object_to_format_string(tree, sub_fmt); + + param = new_object(ObjTypeObject, object_name); + param->value.object = value; + append_premade_object(tree, param); +} + +/* + * Return the object name which is extracted from the input "*%{name[:.]}*" + * style string. And append the input format string to the ObjTree. + */ +static char * +append_object_to_format_string(ObjTree *tree, const char *sub_fmt) +{ + StringInfoData object_name; + const char *end_ptr, *start_ptr; + int length; + char *tmp_str; + + if (tree->fmtinfo == NULL) + elog(ERROR, "object fmtinfo not found"); + + initStringInfo(&object_name); + + start_ptr = strchr(sub_fmt, '{'); + end_ptr = strchr(sub_fmt, ':'); + if (end_ptr == NULL) + end_ptr = strchr(sub_fmt, '}'); + + if (start_ptr != NULL && end_ptr != NULL) { + error_t rc = 0; + length = end_ptr - start_ptr - 1; + tmp_str = (char *) palloc(length + 1); + rc = strncpy_s(tmp_str, length + 1, start_ptr + 1, length); + securec_check_c(rc, "\0", "\0"); + tmp_str[length] = '\0'; + appendStringInfoString(&object_name, tmp_str); + pfree(tmp_str); + } + + if (object_name.len == 0) + elog(ERROR, "object name not found"); + + append_format_string(tree, pstrdup(sub_fmt)); + + return object_name.data; +} + +/* + * Append a preallocated parameter to a tree. + */ +static inline void +append_premade_object(ObjTree *tree, ObjElem *elem) +{ + slist_push_head(&tree->params, &elem->node); + tree->numParams++; +} + +/* + * Append a string parameter to a tree. + */ +static void +append_string_object(ObjTree *tree, char *sub_fmt, char *name, + const char *value) +{ + ObjElem *param; + + Assert(sub_fmt); + + if (value == NULL || value[0] == '\0') + return; + + append_format_string(tree, sub_fmt); + param = new_object(ObjTypeString, name); + param->value.string = pstrdup(value); + append_premade_object(tree, param); +} + +/* + * Similar to format_type_extended, except we return each bit of information + * separately: + * + * - nspid is the schema OID. For certain SQL-standard types which have weird + * typmod rules, we return InvalidOid; the caller is expected to not schema- + * qualify the name nor add quotes to the type name in this case. + * + * - typname is set to the type name, without quotes + * + * - typemodstr is set to the typemod, if any, as a string with parentheses + * + * - typarray indicates whether []s must be added + * + * We don't try to decode type names to their standard-mandated names, except + * in the cases of types with unusual typmod rules. + */ +static void +format_type_detailed(Oid type_oid, int32 typemod, + Oid *nspid, char **typname, char **typemodstr, + bool *typearray) +{ + HeapTuple tuple; + Form_pg_type typeform; + Oid array_base_type; + + tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_oid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for type with OID %u", type_oid); + + typeform = (Form_pg_type) GETSTRUCT(tuple); + + /* + * Check if it's a regular (variable length) array type. As above, + * fixed-length array types such as "name" shouldn't get deconstructed. + */ + array_base_type = typeform->typelem; + + *typearray = (typeform->typstorage != 'p') && (OidIsValid(array_base_type)); + + if (*typearray) { + /* Switch our attention to the array element type */ + ReleaseSysCache(tuple); + tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(array_base_type)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for type with OID %u", type_oid); + + typeform = (Form_pg_type) GETSTRUCT(tuple); + type_oid = array_base_type; + } + + /* + * Special-case crock for types with strange typmod rules where we put + * typemod at the middle of name (e.g. TIME(6) with time zone). We cannot + * schema-qualify nor add quotes to the type name in these cases. + */ + *nspid = InvalidOid; + + switch (type_oid) { + case INTERVALOID: + *typname = pstrdup("INTERVAL"); + break; + case TIMESTAMPTZOID: + if (typemod < 0) + *typname = pstrdup("TIMESTAMP WITH TIME ZONE"); + else + /* otherwise, WITH TZ is added by typmod. */ + *typname = pstrdup("TIMESTAMP"); + break; + case TIMESTAMPOID: + *typname = pstrdup("TIMESTAMP"); + break; + case TIMETZOID: + if (typemod < 0) + *typname = pstrdup("TIME WITH TIME ZONE"); + else + /* otherwise, WITH TZ is added by typmod. */ + *typname = pstrdup("TIME"); + break; + case TIMEOID: + *typname = pstrdup("TIME"); + break; + default: + + /* + * No additional processing is required for other types, so get + * the type name and schema directly from the catalog. + */ + *nspid = typeform->typnamespace; + *typname = pstrdup(NameStr(typeform->typname)); + } + + if (typemod >= 0) + *typemodstr = printTypmod("", typemod, typeform->typmodout); + else + *typemodstr = pstrdup(""); + + ReleaseSysCache(tuple); +} + +/* + * Return the string representation of the given RELPERSISTENCE value. + */ +static inline char * +get_persistence_str(char persistence) +{ + switch (persistence) { + case RELPERSISTENCE_TEMP: + return "TEMPORARY"; + case RELPERSISTENCE_UNLOGGED: + return "UNLOGGED"; + case RELPERSISTENCE_GLOBAL_TEMP: + return "GLOBAL TEMPORARY"; + case RELPERSISTENCE_PERMANENT: + return ""; + default: + elog(ERROR, "unexpected persistence marking %c", persistence); + return ""; /* make compiler happy */ + } +} + +/* + * Return the string representation of the given storagetype value. + */ +static inline char * +get_type_storage(char storagetype) +{ + switch (storagetype) { + case 'p': + return "plain"; + case 'e': + return "external"; + case 'x': + return "extended"; + case 'm': + return "main"; + default: + elog(ERROR, "invalid storage specifier %c", storagetype); + } +} + +/* + * Allocate a new parameter. + */ +static ObjElem * +new_object(ObjType type, char *name) +{ + ObjElem *param; + + param = (ObjElem*)palloc0(sizeof(ObjElem)); + param->name = name; + param->objtype = type; + + return param; +} + +/* + * Allocate a new object parameter. + */ +static ObjElem * +new_object_object(ObjTree *value) +{ + ObjElem *param; + + param = new_object(ObjTypeObject, NULL); + param->value.object = value; + + return param; +} + +/* + * Allocate a new object tree to store parameter values. + */ +static ObjTree * +new_objtree(const char *fmt) +{ + ObjTree *params; + + params = (ObjTree*)palloc0(sizeof(ObjTree)); + slist_init(¶ms->params); + + if (fmt) + { + params->fmtinfo = makeStringInfo(); + appendStringInfoString(params->fmtinfo, fmt); + } + + return params; +} + +/* + * A helper routine to set up %{}D and %{}O elements. + * + * Elements "schema_name" and "obj_name" are set. If the namespace OID + * corresponds to a temp schema, that's set to "pg_temp". + * + * The difference between those two element types is whether the obj_name will + * be quoted as an identifier or not, which is not something that this routine + * concerns itself with; that will be up to the expand function. + */ +static ObjTree * +new_objtree_for_qualname(Oid nspid, char *name) +{ + ObjTree *qualified; + char *namespc; + + if (isAnyTempNamespace(nspid)) + namespc = pstrdup("pg_temp"); + else + namespc = get_namespace_name(nspid); + + qualified = new_objtree_VA(NULL, 2, + "schemaname", ObjTypeString, namespc, + "objname", ObjTypeString, pstrdup(name)); + + return qualified; +} + +/* + * A helper routine to set up %{}D and %{}O elements, with the object specified + * by classId/objId. + */ +static ObjTree * +new_objtree_for_qualname_id(Oid classId, Oid objectId) +{ + ObjTree *qualified; + Relation catalog; + HeapTuple catobj; + Datum obj_nsp; + Datum obj_name; + AttrNumber Anum_name; + AttrNumber Anum_namespace; + bool isnull; + + catalog = relation_open(classId, AccessShareLock); + + catobj = get_catalog_object_by_oid(catalog, objectId); + if (!catobj) + elog(ERROR, "cache lookup failed for object with OID %u of catalog \"%s\"", + objectId, RelationGetRelationName(catalog)); + Anum_name = get_object_attnum_name(classId); + Anum_namespace = get_object_attnum_namespace(classId); + + obj_nsp = heap_getattr(catobj, Anum_namespace, RelationGetDescr(catalog), + &isnull); + if (isnull) + elog(ERROR, "null namespace for object %u", objectId); + + obj_name = heap_getattr(catobj, Anum_name, RelationGetDescr(catalog), + &isnull); + if (isnull) + elog(ERROR, "null attribute name for object %u", objectId); + + qualified = new_objtree_for_qualname(DatumGetObjectId(obj_nsp), + NameStr(*DatumGetName(obj_name))); + relation_close(catalog, AccessShareLock); + + return qualified; +} + +/* + * A helper routine to setup %{}T elements. + */ +static ObjTree * +new_objtree_for_type(Oid typeId, int32 typmod) +{ + Oid typnspid; + char *type_nsp; + char *type_name = NULL; + char *typmodstr; + bool type_array; + + format_type_detailed(typeId, typmod, + &typnspid, &type_name, &typmodstr, &type_array); + + if (OidIsValid(typnspid)) + type_nsp = get_namespace_name_or_temp(typnspid); + else + type_nsp = pstrdup(""); + + return new_objtree_VA(NULL, 4, + "schemaname", ObjTypeString, type_nsp, + "typename", ObjTypeString, type_name, + "typmod", ObjTypeString, typmodstr, + "typarray", ObjTypeBool, type_array); +} + +/* + * Allocate a new object tree to store parameter values -- varargs version. + * + * The "fmt" argument is used to append as a "fmt" element in the output blob. + * numobjs indicates the number of extra elements to append; for each one, a + * name (string), type (from the ObjType enum) and value must be supplied. The + * value must match the type given; for instance, ObjTypeInteger requires an + * int64, ObjTypeString requires a char *, ObjTypeArray requires a list (of + * ObjElem), ObjTypeObject requires an ObjTree, and so on. Each element type * + * must match the conversion specifier given in the format string, as described + * in ddl_deparse_expand_command, q.v. + * + * Note we don't have the luxury of sprintf-like compiler warnings for + * malformed argument lists. + */ +static ObjTree * +new_objtree_VA(const char *fmt, int numobjs,...) +{ + ObjTree *tree; + va_list args; + int i; + + /* Set up the toplevel object and its "fmt" */ + tree = new_objtree(fmt); + + /* And process the given varargs */ + va_start(args, numobjs); + for (i = 0; i < numobjs; i++) { + char *name; + ObjType type; + ObjElem *elem; + + name = va_arg(args, char *); + type =(ObjType)va_arg(args, int); + elem = new_object(type, NULL); + + /* + * For all param types other than ObjTypeNull, there must be a value in + * the varargs. Fetch it and add the fully formed subobject into the + * main object. + */ + switch (type) { + case ObjTypeNull: + /* Null params don't have a value (obviously) */ + break; + case ObjTypeBool: + elem->value.boolean = va_arg(args, int); + break; + case ObjTypeString: + elem->value.string = va_arg(args, char *); + break; + case ObjTypeArray: + elem->value.array = va_arg(args, List *); + break; + case ObjTypeInteger: + elem->value.integer = va_arg(args, int); + break; + case ObjTypeFloat: + elem->value.flt = va_arg(args, double); + break; + case ObjTypeObject: + elem->value.object = va_arg(args, ObjTree *); + break; + default: + elog(ERROR, "invalid ObjTree element type %d", type); + } + + elem->name = name; + append_premade_object(tree, elem); + } + + va_end(args); + return tree; +} + +/* + * Return the given object type as a string. + * + * If isgrant is true, then this function is called while deparsing GRANT + * statement and some object names are replaced. + */ +static const char * +string_objtype(ObjectType objtype, bool isgrant) +{ + switch (objtype) { + case OBJECT_COLUMN: + return isgrant ? "TABLE" : "COLUMN"; + case OBJECT_DOMAIN: + return "DOMAIN"; + case OBJECT_INDEX: + return "INDEX"; + case OBJECT_SEQUENCE: + return "SEQUENCE"; + case OBJECT_LARGE_SEQUENCE: + return "LARGE SEQUENCE"; + case OBJECT_TABLE: + return "TABLE"; + case OBJECT_TYPE: + return "TYPE"; + default: + elog(WARNING, "unsupported object type %d for string", objtype); + } + + return "???"; /* keep compiler quiet */ +} + +/* + * Process the pre-built format string from the ObjTree into the output parse + * state. + */ +static void +objtree_fmt_to_jsonb_element(JsonbParseState *state, ObjTree *tree) +{ + JsonbValue key; + JsonbValue val; + + if (tree->fmtinfo == NULL) + return; + + /* Push the key first */ + key.type = jbvString; + key.string.val = "fmt"; + key.string.len = strlen(key.string.val); + key.estSize = sizeof(JEntry) + key.string.len; + pushJsonbValue(&state, WJB_KEY, &key); + + /* Then process the pre-built format string */ + val.type = jbvString; + val.string.len = tree->fmtinfo->len; + val.string.val = tree->fmtinfo->data; + val.estSize = sizeof(JEntry) + val.string.len; + pushJsonbValue(&state, WJB_VALUE, &val); +} + +/* + * Process the role string into the output parse state. + */ +static void +role_to_jsonb_element(JsonbParseState *state, char *owner) +{ + JsonbValue key; + JsonbValue val; + + if (owner == NULL) + return; + + /* Push the key first */ + key.type = jbvString; + key.string.val = "myowner"; + key.string.len = strlen(key.string.val); + key.estSize = sizeof(JEntry) + key.string.len; + pushJsonbValue(&state, WJB_KEY, &key); + + /* Then process the role string */ + val.type = jbvString; + val.string.len = strlen(owner); + val.string.val = owner; + val.estSize = sizeof(JEntry) + val.string.len; + pushJsonbValue(&state, WJB_VALUE, &val); +} + +/* + * Create a JSONB representation from an ObjTree and its owner (if given). + */ +static Jsonb * +objtree_to_jsonb(ObjTree *tree, char *owner) +{ + JsonbValue *value; + + value = objtree_to_jsonb_rec(tree, NULL, owner); + return JsonbValueToJsonb(value); +} + +/* + * Helper for objtree_to_jsonb: process an individual element from an object or + * an array into the output parse state. + */ +static void +objtree_to_jsonb_element(JsonbParseState *state, ObjElem *object, + int elem_token) +{ + JsonbValue val; + + switch (object->objtype) { + case ObjTypeNull: + val.type = jbvNull; + val.estSize = sizeof(JEntry); + pushJsonbValue(&state, elem_token, &val); + break; + + case ObjTypeString: + val.type = jbvString; + if (!object->value.string) { + elog(ERROR, "ObjTypeString value should not be empty"); + } + val.string.len = strlen(object->value.string); + val.string.val = object->value.string; + val.estSize = sizeof(JEntry) + val.string.len; + pushJsonbValue(&state, elem_token, &val); + break; + + case ObjTypeInteger: + val.type = jbvNumeric; + val.numeric = (Numeric) + DatumGetNumeric(DirectFunctionCall1(int8_numeric, + object->value.integer)); + val.estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val.numeric); + pushJsonbValue(&state, elem_token, &val); + break; + + case ObjTypeFloat: + val.type = jbvNumeric; + val.numeric = (Numeric) + DatumGetNumeric(DirectFunctionCall1(float8_numeric, + object->value.flt)); + val.estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val.numeric); + pushJsonbValue(&state, elem_token, &val); + break; + + case ObjTypeBool: + val.type = jbvBool; + val.boolean = object->value.boolean; + val.estSize = sizeof(JEntry); + pushJsonbValue(&state, elem_token, &val); + break; + + case ObjTypeObject: + /* Recursively add the object into the existing parse state */ + if (!object->value.object) { + elog(ERROR, "ObjTypeObject value should not be empty"); + } + objtree_to_jsonb_rec(object->value.object, state, NULL); + break; + + case ObjTypeArray: { + ListCell *cell; + if (!object->value.array) { + elog(ERROR, "ObjTypeArray value should not be empty"); + } + pushJsonbValue(&state, WJB_BEGIN_ARRAY, NULL); + foreach(cell, object->value.array) { + ObjElem *elem = (ObjElem*)lfirst(cell); + + objtree_to_jsonb_element(state, elem, WJB_ELEM); + } + pushJsonbValue(&state, WJB_END_ARRAY, NULL); + } + break; + + default: + elog(ERROR, "unrecognized object type %d", object->objtype); + break; + } +} + +/* + * Recursive helper for objtree_to_jsonb. + */ +static JsonbValue * +objtree_to_jsonb_rec(ObjTree *tree, JsonbParseState *state, char *owner) +{ + slist_iter iter; + + pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); + + role_to_jsonb_element(state, owner); + objtree_fmt_to_jsonb_element(state, tree); + + slist_foreach(iter, &tree->params) { + ObjElem *object = slist_container(ObjElem, node, iter.cur); + JsonbValue key; + + /* Push the key first */ + key.type = jbvString; + key.string.len = strlen(object->name); + key.string.val = object->name; + key.estSize = sizeof(JEntry) + key.string.len; + pushJsonbValue(&state, WJB_KEY, &key); + + /* Then process the value according to its type */ + objtree_to_jsonb_element(state, object, WJB_VALUE); + } + + return pushJsonbValue(&state, WJB_END_OBJECT, NULL); +} + +/* + * Subroutine for CREATE TABLE/CREATE DOMAIN deparsing. + * + * Given a table OID or domain OID, obtain its constraints and append them to + * the given elements list. The updated list is returned. + * + * This works for typed tables, regular tables, and domains. + * + * Note that CONSTRAINT_FOREIGN constraints are always ignored. + */ +static List * +obtainConstraints(List *elements, Oid relationId) +{ + Relation conRel; + ScanKeyData skey[1]; + SysScanDesc scan; + HeapTuple tuple; + ObjTree *constr; + + /* Only one may be valid */ + Assert(OidIsValid(relationId)); + + /* + * Scan pg_constraint to fetch all constraints linked to the given + * relation. + */ + conRel = relation_open(ConstraintRelationId, AccessShareLock); + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(relationId)); + scan = systable_beginscan(conRel, ConstraintRelidIndexId, true, NULL, 1, skey); + + /* + * For each constraint, add a node to the list of table elements. In + * these nodes we include not only the printable information ("fmt"), but + * also separate attributes to indicate the type of constraint, for + * automatic processing. + */ + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + Form_pg_constraint constrForm; + char *contype = NULL; + + constrForm = (Form_pg_constraint) GETSTRUCT(tuple); + + switch (constrForm->contype) { + case CONSTRAINT_CHECK: + contype = "check"; + break; + case CONSTRAINT_FOREIGN: + continue; /* not here */ + case CONSTRAINT_PRIMARY: + contype = "primary key"; + break; + case CONSTRAINT_UNIQUE: + contype = "unique"; + break; + case CONSTRAINT_TRIGGER: + contype = "trigger"; + break; + case CONSTRAINT_EXCLUSION: + contype = "exclusion"; + break; + default: + elog(ERROR, "unrecognized constraint type %c", constrForm->contype); + } + + /* + * "type" and "contype" are not part of the printable output, but are + * useful to programmatically distinguish these from columns and among + * different constraint types. + * + * XXX it might be useful to also list the column names in a PK, etc. + */ + constr = new_objtree_VA("CONSTRAINT %{name}I %{definition}s", 4, + "type", ObjTypeString, "constraint", + "contype", ObjTypeString, contype, + "name", ObjTypeString, NameStr(constrForm->conname), + "definition", ObjTypeString, + pg_get_constraintdef_part_string(HeapTupleGetOid(tuple))); + + if (constrForm->conindid && + (constrForm->contype == CONSTRAINT_PRIMARY || + constrForm->contype == CONSTRAINT_UNIQUE || + constrForm->contype == CONSTRAINT_EXCLUSION)) { + Oid tblspc = get_rel_tablespace(constrForm->conindid); + + if (OidIsValid(tblspc)) { + char *tblspcname = get_tablespace_name(tblspc); + + if (!tblspcname) { + elog(ERROR, "cache lookup failed for tablespace %u", tblspc); + } + + append_string_object(constr, "USING INDEX TABLESPACE %{tblspc}s", "tblspc", tblspcname); + } + } + + elements = lappend(elements, new_object_object(constr)); + } + + systable_endscan(scan); + relation_close(conRel, AccessShareLock); + + return elements; +} + +/* + * Return an index definition, split into several pieces. + * + * A large amount of code is duplicated from pg_get_indexdef_worker, but + * control flow is different enough that it doesn't seem worth keeping them + * together. + */ +static void +pg_get_indexdef_detailed(Oid indexrelid, bool global, + char **index_am, + char **definition, + char **reloptions, + char **tablespace, + char **whereClause, + bool *invisible) +{ + HeapTuple ht_idx; + HeapTuple ht_idxrel; + HeapTuple ht_am; + Form_pg_index idxrec; + Form_pg_class idxrelrec; + Form_pg_am amrec; + List *indexprs; + ListCell *indexpr_item; + List *context; + Oid indrelid; + int keyno; + Datum indcollDatum; + Datum indclassDatum; + Datum indoptionDatum; + bool isnull; + oidvector *indcollation; + oidvector *indclass; + int2vector *indoption; + StringInfoData definitionBuf; + int indnkeyatts; + + *tablespace = NULL; + *whereClause = NULL; + + /* Fetch the pg_index tuple by the Oid of the index */ + ht_idx = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexrelid)); + if (!HeapTupleIsValid(ht_idx)) + elog(ERROR, "cache lookup failed for index with OID %u", indexrelid); + idxrec = (Form_pg_index) GETSTRUCT(ht_idx); + + indrelid = idxrec->indrelid; + Assert(indexrelid == idxrec->indexrelid); + indnkeyatts = GetIndexKeyAttsByTuple(NULL, ht_idx); + + /* Must get indcollation, indclass, and indoption the hard way */ + indcollDatum = SysCacheGetAttr(INDEXRELID, ht_idx, + Anum_pg_index_indcollation, &isnull); + Assert(!isnull); + indcollation = (oidvector *) DatumGetPointer(indcollDatum); + + indclassDatum = SysCacheGetAttr(INDEXRELID, ht_idx, + Anum_pg_index_indclass, &isnull); + Assert(!isnull); + indclass = (oidvector *) DatumGetPointer(indclassDatum); + + indoptionDatum = SysCacheGetAttr(INDEXRELID, ht_idx, + Anum_pg_index_indoption, &isnull); + Assert(!isnull); + indoption = (int2vector *) DatumGetPointer(indoptionDatum); + + /* Fetch the pg_class tuple of the index relation */ + ht_idxrel = SearchSysCache1(RELOID, ObjectIdGetDatum(indexrelid)); + if (!HeapTupleIsValid(ht_idxrel)) + elog(ERROR, "cache lookup failed for relation with OID %u", indexrelid); + idxrelrec = (Form_pg_class) GETSTRUCT(ht_idxrel); + + /* Fetch the pg_am tuple of the index' access method */ + ht_am = SearchSysCache1(AMOID, ObjectIdGetDatum(idxrelrec->relam)); + if (!HeapTupleIsValid(ht_am)) + elog(ERROR, "cache lookup failed for access method with OID %u", + idxrelrec->relam); + amrec = (Form_pg_am) GETSTRUCT(ht_am); + + /* + * Get the index expressions, if any. (NOTE: we do not use the relcache + * versions of the expressions and predicate, because we want to display + * non-const-folded expressions.) + */ + if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs, NULL)) { + Datum exprsDatum; + char *exprsString; + + exprsDatum = SysCacheGetAttr(INDEXRELID, ht_idx, + Anum_pg_index_indexprs, &isnull); + Assert(!isnull); + exprsString = TextDatumGetCString(exprsDatum); + indexprs = (List *) stringToNode(exprsString); + pfree(exprsString); + } + else { + indexprs = NIL; + } + + indexpr_item = list_head(indexprs); + + context = deparse_context_for(get_rel_name(indrelid), indrelid); + + initStringInfo(&definitionBuf); + + /* Output index AM */ + *index_am = pstrdup(quote_identifier(NameStr(amrec->amname))); + + /* + * Output index definition. Note the outer parens must be supplied by + * caller. + */ + bool add_global = false; + appendStringInfoString(&definitionBuf, "("); + for (keyno = 0; keyno < idxrec->indnatts; keyno++) { + AttrNumber attnum = idxrec->indkey.values[keyno]; + int16 opt = indoption->values[keyno]; + Oid keycoltype; + Oid keycolcollation; + + /* Print INCLUDE to divide key and non-key attrs. */ + if (keyno == indnkeyatts) { + if (global) { + appendStringInfoString(&definitionBuf, ") GLOBAL "); + add_global = true; + } else { + appendStringInfoString(&definitionBuf, ") "); + } + appendStringInfoString(&definitionBuf, "INCLUDE ("); + } + else { + appendStringInfoString(&definitionBuf, keyno == 0 ? "" : ", "); + } + + if (attnum != 0) { + /* Simple index column */ + char *attname; + int32 keycoltypmod; + + attname = get_attname(indrelid, attnum); + appendStringInfoString(&definitionBuf, quote_identifier(attname)); + get_atttypetypmodcoll(indrelid, attnum, + &keycoltype, &keycoltypmod, + &keycolcollation); + } else { + /* Expressional index */ + Node *indexkey; + char *str; + + if (indexpr_item == NULL) + elog(ERROR, "too few entries in indexprs list"); + indexkey = (Node *) lfirst(indexpr_item); + indexpr_item = lnext(indexpr_item); + + /* Deparse */ + str = deparse_expression(indexkey, context, false, false); + + /* Need parens if it's not a bare function call */ + if (indexkey && IsA(indexkey, FuncExpr) && + ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL) { + appendStringInfoString(&definitionBuf, str); + } else { + appendStringInfo(&definitionBuf, "(%s)", str); + } + + keycoltype = exprType(indexkey); + keycolcollation = exprCollation(indexkey); + } + + /* Print additional decoration for (selected) key columns, even if default */ + if (keyno < indnkeyatts) { + Oid indcoll = indcollation->values[keyno]; + if (OidIsValid(indcoll)) + appendStringInfo(&definitionBuf, " COLLATE %s", + generate_collation_name((indcoll))); + + /* Add the operator class name, even if default */ + get_opclass_name(indclass->values[keyno], InvalidOid, &definitionBuf); + + /* Add options if relevant */ + if (amrec->amcanorder) { + /* If it supports sort ordering, report DESC and NULLS opts */ + if (opt & INDOPTION_DESC) { + appendStringInfoString(&definitionBuf, " DESC"); + /* NULLS FIRST is the default in this case */ + if (!(opt & INDOPTION_NULLS_FIRST)) + appendStringInfoString(&definitionBuf, " NULLS LAST"); + } else { + if (opt & INDOPTION_NULLS_FIRST) + appendStringInfoString(&definitionBuf, " NULLS FIRST"); + } + } + + /* XXX excludeOps thingy was here; do we need anything? */ + } + } + + appendStringInfoString(&definitionBuf, ")"); + if (!add_global && global) { + appendStringInfoString(&definitionBuf, "GLOBAL"); + } + + if (idxrelrec->parttype == PARTTYPE_PARTITIONED_RELATION && + idxrelrec->relkind != RELKIND_GLOBAL_INDEX) { + pg_get_indexdef_partitions(indexrelid, idxrec, true, &definitionBuf, true, true, true); + } + + *definition = definitionBuf.data; + + /* Output reloptions */ + *reloptions = flatten_reloptions(indexrelid); + + /* Output tablespace */ + { + Oid tblspc; + + tblspc = get_rel_tablespace(indexrelid); + if (OidIsValid(tblspc)) + *tablespace = pstrdup(quote_identifier(get_tablespace_name(tblspc))); + } + + *invisible = false; + if (!GetIndexVisibleStateByTuple(ht_idx)) { + *invisible = true; + } + + /* Report index predicate, if any */ + if (!heap_attisnull(ht_idx, Anum_pg_index_indpred, NULL)) + { + Node *node; + Datum predDatum; + char *predString; + + /* Convert text string to node tree */ + predDatum = SysCacheGetAttr(INDEXRELID, ht_idx, + Anum_pg_index_indpred, &isnull); + Assert(!isnull); + predString = TextDatumGetCString(predDatum); + node = (Node *) stringToNode(predString); + pfree(predString); + + /* Deparse */ + *whereClause = deparse_expression(node, context, false, false); + } + + /* Clean up */ + ReleaseSysCache(ht_idx); + ReleaseSysCache(ht_idxrel); + ReleaseSysCache(ht_am); +} + +/* + * Obtain the deparsed default value for the given column of the given table. + * + * Caller must have set a correct deparse context. + */ +static char * +RelationGetColumnDefault(Relation rel, AttrNumber attno, List *dpcontext, + List **exprs) +{ + Node *defval; + char *defstr; + + defval = build_column_default(rel, attno); + defstr = deparse_expression(defval, dpcontext, false, false); + + /* Collect the expression for later replication safety checks */ + if (exprs) + *exprs = lappend(*exprs, defval); + + return defstr; +} + +static char *RelationGetColumnOnUpdate(Node *update_expr, List *dpcontext, List **exprs) +{ + StringInfoData buf; + + initStringInfo(&buf); + + if (IsA(update_expr, FuncCall)) { + FuncCall *n = (FuncCall*)update_expr; + Value *funcname = (Value*)(lsecond(n->funcname)); + if (!pg_strcasecmp(strVal(funcname), "text_date")) { + appendStringInfo(&buf, "CURRENT_DATE"); + } else if (!pg_strcasecmp(strVal(funcname), "pg_systimestamp")) { + appendStringInfo(&buf, "CURRENT_TIMESTAMP"); + } else if (!pg_strcasecmp(strVal(funcname), "sysdate")) { + appendStringInfo(&buf, "SYSDATE"); + } + } else if (IsA(update_expr, TypeCast)) { + TypeCast *n = (TypeCast*)update_expr; + TypeName *t = n->typname; + Value *typname = (Value*)(lsecond(t->names)); + if (!pg_strcasecmp(strVal(typname), "timetz")) { + appendStringInfo(&buf, "CURRENT_TIME"); + } else if (!pg_strcasecmp(strVal(typname), "timestamptz")) { + appendStringInfo(&buf, "CURRENT_TIMESTAMP"); + } else if (!pg_strcasecmp(strVal(typname), "time")) { + appendStringInfo(&buf, "LOCALTIME"); + } else if (!pg_strcasecmp(strVal(typname), "timestamp")) { + appendStringInfo(&buf, "LOCALTIMESTAMP"); + } + + if (t->typmods) { + A_Const* typmod = (A_Const*)(linitial(t->typmods)); + appendStringInfo(&buf, "(%ld)", intVal(&typmod->val)); + } + } else if (IsA(update_expr, A_Expr)) { + A_Expr *a = (A_Expr*)update_expr; + Value *n = (Value*)linitial(a->name); + if (a->kind == AEXPR_OP && (!pg_strcasecmp(strVal(n), "+") || + !pg_strcasecmp(strVal(n), "-"))) { + if (a->rexpr && IsA(a->rexpr, A_Const)) { + char *larg = RelationGetColumnOnUpdate(a->lexpr, dpcontext, exprs); + A_Const *con = (A_Const*)a->rexpr; + Value *v = &con->val; + if (nodeTag(v) == T_Integer) { + appendStringInfo(&buf, "%s %s %ld", larg, strVal(n), intVal(v)); + } else if (nodeTag(v) == T_String) { + appendStringInfo(&buf, "%s %s '%s'", larg, strVal(n), strVal(v)); + } + } + } + } + + return buf.data; +} + +/* + * Deparse a ColumnDef node within a regular (non-typed) table creation. + * + * NOT NULL constraints in the column definition are emitted directly in the + * column definition by this routine; other constraints must be emitted + * elsewhere (the info in the parse node is incomplete anyway). + * + * Verbose syntax + * %{name}I %{coltype}T %{compression}s %{default}s %{not_null}s %{collation}s + */ +static ObjTree * +deparse_ColumnDef(Relation relation, List *dpcontext, + ColumnDef *coldef, List **exprs) +{ + ObjTree *ret; + Oid relid = RelationGetRelid(relation); + HeapTuple attrTup; + Form_pg_attribute attrForm; + Oid typid; + int32 typmod; + Oid typcollation; + bool saw_notnull; + bool saw_autoincrement; + char *onupdate = NULL; + ListCell *cell; + + /* + * Inherited columns without local definitions must not be emitted. + * + * XXX maybe it is useful to have them with "present = false" or some + * such? + */ + if (!coldef->is_local) + return NULL; + + attrTup = SearchSysCacheAttName(relid, coldef->colname); + if (!HeapTupleIsValid(attrTup)) + elog(ERROR, "could not find cache entry for column \"%s\" of relation %u", + coldef->colname, relid); + attrForm = (Form_pg_attribute) GETSTRUCT(attrTup); + + get_atttypetypmodcoll(relid, attrForm->attnum, + &typid, &typmod, &typcollation); + + ret = new_objtree_VA("%{name}I %{coltype}T", 3, + "type", ObjTypeString, "column", + "name", ObjTypeString, coldef->colname, + "coltype", ObjTypeObject, + new_objtree_for_type(typid, typmod)); + + + + if (OidIsValid(typcollation)) { + append_object_object(ret, "COLLATE %{collate}D", + new_objtree_for_qualname_id(CollationRelationId, + typcollation)); + } + + /* + * Emit a NOT NULL declaration if necessary. Note that we cannot + * trust pg_attribute.attnotnull here, because that bit is also set + * when primary keys are specified; we must not emit a NOT NULL + * constraint in that case, unless explicitly specified. Therefore, + * we scan the list of constraints attached to this column to + * determine whether we need to emit anything. (Fortunately, NOT NULL + * constraints cannot be table constraints.) + * + * In the ALTER TABLE cases, we also add a NOT NULL if the colDef is + * marked is_not_null. + */ + saw_notnull = false; + saw_autoincrement = false; + + foreach(cell, coldef->constraints) { + Constraint *constr = (Constraint *) lfirst(cell); + + if (constr->contype == CONSTR_NOTNULL) { + saw_notnull = true; + } else if (constr->contype == CONSTR_AUTO_INCREMENT) { + saw_autoincrement = true; + } else if (constr->contype == CONSTR_DEFAULT && constr->update_expr) { + onupdate = RelationGetColumnOnUpdate(constr->update_expr, dpcontext, exprs); + } + } + + append_string_object(ret, "%{auto_increment}s", "auto_increment", + saw_autoincrement ? "AUTO_INCREMENT" : ""); + + + + append_string_object(ret, "%{not_null}s", "not_null", + saw_notnull ? "NOT NULL" : ""); + + /* GENERATED COLUMN EXPRESSION */ + if (coldef->generatedCol == ATTRIBUTE_GENERATED_STORED) { + char *defstr = RelationGetColumnDefault(relation, attrForm->attnum, dpcontext, exprs); + append_string_object(ret, "GENERATED ALWAYS AS (%{generation_expr}s) STORED", + "generation_expr", defstr); + } + + if (attrForm->atthasdef && + coldef->generatedCol != ATTRIBUTE_GENERATED_STORED && + !saw_autoincrement) { + char *defstr; + + defstr = RelationGetColumnDefault(relation, attrForm->attnum, + dpcontext, exprs); + + append_string_object(ret, "DEFAULT %{default}s", "default", defstr); + } + + append_string_object(ret, "ON UPDATE %{on_update}s", "on_update", onupdate ? onupdate : ""); + + ReleaseSysCache(attrTup); + + return ret; +} + + +/* + * Deparse DefElems + * + * Verbose syntax + * %{label}s = %{value}L + */ +static ObjTree * +deparse_DefElem(DefElem *elem, bool is_reset) +{ + ObjTree *ret; + ObjTree *optname = new_objtree(""); + + if (elem->defnamespace != NULL) + append_string_object(optname, "%{schema}I.", "schema", + elem->defnamespace); + + append_string_object(optname, "%{label}I", "label", elem->defname); + + ret = new_objtree_VA("%{label}s", 1, + "label", ObjTypeObject, optname); + + if (!is_reset) { + if (!elem->arg) { + append_string_object(ret, "= %{value}s", "value", defGetBoolean(elem) ? "TRUE" : "FALSE"); + } else { + if (nodeTag(elem->arg) == T_String) { + append_string_object(ret, "= %{value}L", "value", defGetString(elem)); + } else { + append_string_object(ret, "= %{value}s", "value", defGetString(elem)); + } + } + } + return ret; +} + +/* + * Deparse the ON COMMIT ... clause for CREATE ... TEMPORARY ... + * + * Verbose syntax + * ON COMMIT %{on_commit_value}s + */ +static ObjTree * +deparse_OnCommitClause(OnCommitAction option) +{ + ObjTree *ret = new_objtree("ON COMMIT"); + switch (option) { + case ONCOMMIT_DROP: + append_string_object(ret, "%{on_commit_value}s", + "on_commit_value", "DROP"); + break; + + case ONCOMMIT_DELETE_ROWS: + append_string_object(ret, "%{on_commit_value}s", + "on_commit_value", "DELETE ROWS"); + break; + + case ONCOMMIT_PRESERVE_ROWS: + append_string_object(ret, "%{on_commit_value}s", + "on_commit_value", "PRESERVE ROWS"); + break; + + case ONCOMMIT_NOOP: + ret = NULL; + break; + } + + return ret; +} + +/* + * Deparse the sequence CACHE option. + * + * Verbose syntax + * SET CACHE %{value}s + * OR + * CACHE %{value} + */ +static inline ObjElem * +deparse_Seq_Cache(sequence_values * seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET CACHE %{value}s" : "CACHE %{value}s"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "cache", + "value", ObjTypeString, seqdata->cache_value); + + return new_object_object(ret); +} + +/* + * Deparse the sequence CYCLE option. + * + * Verbose syntax + * SET %{no}s CYCLE + * OR + * %{no}s CYCLE + */ +static inline ObjElem * +deparse_Seq_Cycle(sequence_values * seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET %{no}s CYCLE" : "%{no}s CYCLE"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "cycle", + "no", ObjTypeString, + seqdata->is_cycled ? "" : "NO"); + + return new_object_object(ret); +} + +/* + * Deparse the sequence INCREMENT BY option. + * + * Verbose syntax + * SET INCREMENT BY %{value}s + * OR + * INCREMENT BY %{value}s + */ +static inline ObjElem * +deparse_Seq_IncrementBy(sequence_values * seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET INCREMENT BY %{value}s" : "INCREMENT BY %{value}s"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "seqincrement", + "value", ObjTypeString, seqdata->increment_by); + + return new_object_object(ret); +} + +/* + * Deparse the sequence MAXVALUE option. + * + * Verbose syntax + * SET MAXVALUE %{value}s + * OR + * MAXVALUE %{value}s + */ +static inline ObjElem * +deparse_Seq_Maxvalue(sequence_values * seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET MAXVALUE %{value}s" : "MAXVALUE %{value}s"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "maxvalue", + "value", ObjTypeString, seqdata->max_value); + + return new_object_object(ret); +} + +/* + * Deparse the sequence MINVALUE option. + * + * Verbose syntax + * SET MINVALUE %{value}s + * OR + * MINVALUE %{value}s + */ +static inline ObjElem * +deparse_Seq_Minvalue(sequence_values * seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET MINVALUE %{value}s" : "MINVALUE %{value}s"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "minvalue", + "value", ObjTypeString, seqdata->min_value); + + return new_object_object(ret); +} + +/* + * Deparse the sequence OWNED BY command. + * + * Verbose syntax + * OWNED BY %{owner}D + */ +static ObjElem * +deparse_Seq_OwnedBy(Oid sequenceId) +{ + ObjTree *ret = NULL; + Relation depRel; + SysScanDesc scan; + ScanKeyData keys[3]; + HeapTuple tuple; + + depRel = relation_open(DependRelationId, AccessShareLock); + ScanKeyInit(&keys[0], + Anum_pg_depend_classid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&keys[1], + Anum_pg_depend_objid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(sequenceId)); + ScanKeyInit(&keys[2], + Anum_pg_depend_objsubid, + BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(0)); + + scan = systable_beginscan(depRel, DependDependerIndexId, true, + NULL, 3, keys); + + while (HeapTupleIsValid(tuple = systable_getnext(scan))) { + Oid ownerId; + Form_pg_depend depform; + ObjTree *tmp_obj; + char *colname; + + depform = (Form_pg_depend) GETSTRUCT(tuple); + + /* Only consider AUTO dependencies on pg_class */ + if (depform->deptype != DEPENDENCY_AUTO) + continue; + if (depform->refclassid != RelationRelationId) + continue; + if(depform->refobjsubid <= 0) + continue; + + ownerId = depform->refobjid; + colname = get_attname(ownerId, depform->refobjsubid); + if (colname == NULL) + continue; + + tmp_obj = new_objtree_for_qualname_id(RelationRelationId, ownerId); + append_string_object(tmp_obj, "attrname", "attrname", colname); + ret = new_objtree_VA("OWNED BY %{owner}D", 2, + "clause", ObjTypeString, "owned", + "owner", ObjTypeObject, tmp_obj); + } + + systable_endscan(scan); + relation_close(depRel, AccessShareLock); + + /* + * If there's no owner column, emit an empty OWNED BY element, set up so + * that it won't print anything. + */ + if (!ret) + /* XXX this shouldn't happen */ + ret = new_objtree_VA("OWNED BY %{owner}D", 3, + "clause", ObjTypeString, "owned", + "owner", ObjTypeNull, + "present", ObjTypeBool, false); + + return new_object_object(ret); +} + +/* + * Deparse the sequence ORDER option. + */ +static inline ObjElem * +deparse_Seq_Order(DefElem *elem) +{ + ObjTree *ret; + + ret = new_objtree_VA("%{order}s", 2, + "clause", ObjTypeString, "order", + "order", ObjTypeString, defGetBoolean(elem) ? "ORDER" : "NOORDER"); + + return new_object_object(ret); +} + + +/* + * Deparse the sequence RESTART option. + * + * Verbose syntax + * RESTART %{value}s + */ +static inline ObjElem * +deparse_Seq_Restart(char *last_value) +{ + ObjTree *ret; + ret = new_objtree_VA("RESTART %{value}s", 2, + "clause", ObjTypeString, "restart", + "value", ObjTypeString, last_value); + + return new_object_object(ret); +} + +/* + * Deparse the sequence AS option. + * + * Verbose syntax + * AS %{identity}D + */ +static inline ObjElem * +deparse_Seq_As(DefElem *elem) +{ + ObjTree *ret; + Type likeType; + Form_pg_type likeForm; + + likeType = typenameType(NULL, defGetTypeName(elem), NULL); + likeForm = (Form_pg_type) GETSTRUCT(likeType); + + ret = new_objtree_VA("AS %{identity}D", 1, + "identity", ObjTypeObject, + new_objtree_for_qualname(likeForm->typnamespace, + NameStr(likeForm->typname))); + ReleaseSysCache(likeType); + + return new_object_object(ret); +} + +/* + * Deparse the sequence START WITH option. + * + * Verbose syntax + * SET START WITH %{value}s + * OR + * START WITH %{value}s + */ +static inline ObjElem * +deparse_Seq_Startwith(sequence_values *seqdata, bool alter_table) +{ + ObjTree *ret; + const char *fmt; + + fmt = alter_table ? "SET START WITH %{value}s" : "START WITH %{value}s"; + + ret = new_objtree_VA(fmt, 2, + "clause", ObjTypeString, "start", + "value", ObjTypeString, seqdata->start_value); + + return new_object_object(ret); +} + +/* + * Subroutine for CREATE TABLE deparsing. + * + * Deal with all the table elements (columns and constraints). + * + * Note we ignore constraints in the parse node here; they are extracted from + * system catalogs instead. + */ +static List * +deparse_TableElements(Relation relation, List *tableElements, List *dpcontext) +{ + List *elements = NIL; + ListCell *lc; + + foreach(lc, tableElements) { + Node *elt = (Node *) lfirst(lc); + + switch (nodeTag(elt)) { + case T_ColumnDef: { + ObjTree *tree; + tree = deparse_ColumnDef(relation, dpcontext, (ColumnDef *) elt, NULL); + if (tree != NULL) + elements = lappend(elements, new_object_object(tree)); + } + break; + case T_Constraint: + break; + default: + elog(ERROR, "invalid node type %d", nodeTag(elt)); + } + } + + return elements; +} + +/* + * Deparse a CreateSeqStmt. + * + * Given a sequence OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * CREATE %{persistence}s SEQUENCE %{identity}D + */ +static ObjTree * +deparse_CreateSeqStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + Relation relation; + List *elems = NIL; + sequence_values *seqvalues; + CreateSeqStmt *createSeqStmt = (CreateSeqStmt *) parsetree; + + if (createSeqStmt->is_autoinc) + return NULL; + + seqvalues = get_sequence_values(objectId); + + /* Definition elements */ + elems = lappend(elems, deparse_Seq_Cache(seqvalues, false)); + elems = lappend(elems, deparse_Seq_Cycle(seqvalues, false)); + elems = lappend(elems, deparse_Seq_IncrementBy(seqvalues, false)); + elems = lappend(elems, deparse_Seq_Minvalue(seqvalues, false)); + elems = lappend(elems, deparse_Seq_Maxvalue(seqvalues, false)); + elems = lappend(elems, deparse_Seq_Startwith(seqvalues, false)); + elems = lappend(elems, deparse_Seq_Restart(seqvalues->last_value)); + + /* We purposefully do not emit OWNED BY here */ + + relation = relation_open(objectId, AccessShareLock); + + ret = new_objtree_VA("CREATE %{persistence}s %{large}s SEQUENCE %{identity}D %{definition: }s", 4, + "persistence", ObjTypeString, get_persistence_str(relation->rd_rel->relpersistence), + "large", ObjTypeString, seqvalues->large ? "LARGE" : "", + "identity", ObjTypeObject, new_objtree_for_qualname(relation->rd_rel->relnamespace, + RelationGetRelationName(relation)), + "definition", ObjTypeArray, elems); + + relation_close(relation, AccessShareLock); + + return ret; +} + +/* + * Deparse an AlterSeqStmt + * Given a sequence OID and a parse tree that modified it, return an ObjTree + * representing the alter command + * + * Verbose syntax + * ALTER SEQUENCE %{identity}D %{definition: }s + */ +static ObjTree * +deparse_AlterSeqStmt(Oid objectId, Node *parsetree) +{ + ObjTree *ret; + Relation relation; + List *elems = NIL; + ListCell *cell; + sequence_values *seqvalues; + AlterSeqStmt *alterSeqStmt = (AlterSeqStmt *) parsetree; + + if (alterSeqStmt->is_autoinc) { + return NULL; + } + + if (!alterSeqStmt->options) { + return NULL; + } + + seqvalues = get_sequence_values(objectId); + + foreach(cell, alterSeqStmt->options) { + DefElem *elem = (DefElem *) lfirst(cell); + ObjElem *newelm = NULL; + + if (strcmp(elem->defname, "cache") == 0) + newelm = deparse_Seq_Cache(seqvalues, false); + else if (strcmp(elem->defname, "cycle") == 0) + newelm = deparse_Seq_Cycle(seqvalues, false); + else if (strcmp(elem->defname, "increment") == 0) + newelm = deparse_Seq_IncrementBy(seqvalues, false); + else if (strcmp(elem->defname, "minvalue") == 0) + newelm = deparse_Seq_Minvalue(seqvalues, false); + else if (strcmp(elem->defname, "maxvalue") == 0) + newelm = deparse_Seq_Maxvalue(seqvalues, false); + else if (strcmp(elem->defname, "start") == 0) + newelm = deparse_Seq_Startwith(seqvalues, false); + else if (strcmp(elem->defname, "restart") == 0) + newelm = deparse_Seq_Restart(seqvalues->last_value); + else if (strcmp(elem->defname, "owned_by") == 0) + newelm = deparse_Seq_OwnedBy(objectId); + else if (strcmp(elem->defname, "as") == 0) + newelm = deparse_Seq_As(elem); + else if (strcmp(elem->defname, "order") == 0) + newelm = deparse_Seq_Order(elem); + else + elog(WARNING, "unsupport sequence option %s for replication", elem->defname); + + elems = lappend(elems, newelm); + } + + relation = relation_open(objectId, AccessShareLock); + + ret = new_objtree_VA("ALTER %{large}s SEQUENCE %{identity}D %{definition: }s", 3, + "large", ObjTypeString, seqvalues->large ? "LARGE" : "", + "identity", ObjTypeObject, + new_objtree_for_qualname(relation->rd_rel->relnamespace, + RelationGetRelationName(relation)), + "definition", ObjTypeArray, elems); + + relation_close(relation, AccessShareLock); + + return ret; +} + +/* + * Deparse a CommentStmt when it pertains to a constraint. + * + * Verbose syntax + * COMMENT ON CONSTRAINT %{identity}s ON [DOMAIN] %{parentobj}s IS %{comment}s + */ +static ObjTree * +deparse_CommentOnConstraintSmt(Oid objectId, Node *parsetree) +{ + CommentStmt *node = (CommentStmt *) parsetree; + ObjTree *ret; + HeapTuple constrTup; + Form_pg_constraint constrForm; + ObjectAddress addr; + + Assert(node->objtype == OBJECT_TABCONSTRAINT || node->objtype == OBJECT_DOMCONSTRAINT); + + constrTup = SearchSysCache1(CONSTROID, objectId); + if (!HeapTupleIsValid(constrTup)) + elog(ERROR, "cache lookup failed for constraint with OID %u", objectId); + constrForm = (Form_pg_constraint) GETSTRUCT(constrTup); + + if (OidIsValid(constrForm->conrelid)) + ObjectAddressSet(addr, RelationRelationId, constrForm->conrelid); + else + ObjectAddressSet(addr, TypeRelationId, constrForm->contypid); + + ret = new_objtree_VA("COMMENT ON CONSTRAINT %{identity}s ON %{domain}s %{parentobj}s", 3, + "identity", ObjTypeString, pstrdup(NameStr(constrForm->conname)), + "domain", ObjTypeString, + (node->objtype == OBJECT_DOMCONSTRAINT) ? "DOMAIN" : "", + "parentobj", ObjTypeString, + getObjectIdentity(&addr)); + + /* Add the comment clause */ + append_literal_or_null(ret, "IS %{comment}s", node->comment); + + ReleaseSysCache(constrTup); + return ret; +} + +/* + * Deparse an CommentStmt (COMMENT ON ...). + * + * Given the object address and the parse tree that created it, return an + * ObjTree representing the comment command. + * + * Verbose syntax + * COMMENT ON %{objtype}s %{identity}s IS %{comment}s + */ +static ObjTree * +deparse_CommentStmt(ObjectAddress address, Node *parsetree) +{ + CommentStmt *node = (CommentStmt *) parsetree; + ObjTree *ret; + char *identity; + + /* Comment on subscription is not supported */ + if (node->objtype == OBJECT_SUBSCRIPTION) + return NULL; + + /* + * Constraints are sufficiently different that it is easier to handle them + * separately. + */ + if (node->objtype == OBJECT_DOMCONSTRAINT || + node->objtype == OBJECT_TABCONSTRAINT) { + Assert(address.classId == ConstraintRelationId); + return deparse_CommentOnConstraintSmt(address.objectId, parsetree); + } + + ret = new_objtree_VA("COMMENT ON %{objtype}s", 1, + "objtype", ObjTypeString, + (char *) string_objtype(node->objtype, false)); + + /* + * Add the object identity clause. For zero argument aggregates we need + * to add the (*) bit; in all other cases we can just use + * getObjectIdentity. + * + * XXX shouldn't we instead fix the object identities for zero-argument + * aggregates? + */ + if (node->objtype == OBJECT_AGGREGATE) { + HeapTuple procTup; + Form_pg_proc procForm; + + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(address.objectId)); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for procedure with OID %u", + address.objectId); + procForm = (Form_pg_proc) GETSTRUCT(procTup); + if (procForm->pronargs == 0) + identity = psprintf("%s(*)", + quote_qualified_identifier(get_namespace_name(procForm->pronamespace), + NameStr(procForm->proname))); + else + identity = getObjectIdentity(&address); + ReleaseSysCache(procTup); + } else { + identity = getObjectIdentity(&address); + } + + append_string_object(ret, "%{identity}s", "identity", identity); + + /* Add the comment clause; can be either NULL or a quoted literal. */ + append_literal_or_null(ret, "IS %{comment}s", node->comment); + + return ret; +} + +/* + * Deparse an IndexStmt. + * + * Given an index OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * If the index corresponds to a constraint, NULL is returned. + * + * Verbose syntax + * CREATE %{unique}s INDEX %{concurrently}s %{if_not_exists}s %{name}I ON + * %{table}D USING %{index_am}s %{definition}s %{with}s %{tablespace}s + * %{where_clause}s + */ +static ObjTree * +deparse_IndexStmt(Oid objectId, Node *parsetree) +{ + IndexStmt *node = (IndexStmt *) parsetree; + ObjTree *ret; + Relation idxrel; + Relation heaprel; + char *index_am; + char *definition; + char *reloptions; + char *tablespace; + char *whereClause; + bool invisible; + + if (node->primary || node->isconstraint) + { + /* + * Indexes for PRIMARY KEY and other constraints are output + * separately; return empty here. + */ + return NULL; + } + + idxrel = relation_open(objectId, AccessShareLock); + heaprel = relation_open(idxrel->rd_index->indrelid, AccessShareLock); + + pg_get_indexdef_detailed(objectId, node->isGlobal, + &index_am, &definition, &reloptions, + &tablespace, &whereClause, &invisible); + + ret = new_objtree_VA("CREATE %{unique}s INDEX %{concurrently}s %{name}I ON %{table}D USING %{index_am}s %{definition}s", 6, + "unique", ObjTypeString, node->unique ? "UNIQUE" : "", + "concurrently", ObjTypeString, node->concurrent ? "CONCURRENTLY" : "", + "name", ObjTypeString, RelationGetRelationName(idxrel), + "table", ObjTypeObject, + new_objtree_for_qualname(heaprel->rd_rel->relnamespace, RelationGetRelationName(heaprel)), + "index_am", ObjTypeString, index_am, + "definition", ObjTypeString, definition); + + /* reloptions */ + if (reloptions) + append_string_object(ret, "WITH (%{opts}s)", "opts", reloptions); + + /* tablespace */ + if (tablespace) + append_string_object(ret, "TABLESPACE %{tablespace}s", "tablespace", tablespace); + + if (invisible) + append_format_string(ret, "INVISIBLE"); + + /* WHERE clause */ + + if (whereClause) + append_string_object(ret, "WHERE %{where}s", "where", whereClause); + + relation_close(idxrel, AccessShareLock); + relation_close(heaprel, AccessShareLock); + + return ret; +} + +/* + * Deparse a CreateStmt (CREATE TABLE). + * + * Given a table OID and the parse tree that created it, return an ObjTree + * representing the creation command. + * + * Verbose syntax + * CREATE %{persistence}s TABLE %{if_not_exists}s %{identity}D [OF + * %{of_type}T | PARTITION OF %{parent_identity}D] %{table_elements}s + * %{inherits}s %{partition_by}s %{access_method}s %{with_clause}s + * %{on_commit}s %{tablespace}s + */ +static ObjTree * +deparse_CreateStmt(Oid objectId, Node *parsetree) +{ + CreateStmt *node = (CreateStmt *) parsetree; + Relation relation = relation_open(objectId, AccessShareLock); + List *dpcontext; + ObjTree *ret; + ObjTree *tmp_obj; + List *list = NIL; + ListCell *cell; + + ret = new_objtree_VA("CREATE %{persistence}s TABLE %{if_not_exists}s %{identity}D", 3, + "persistence", ObjTypeString, get_persistence_str(relation->rd_rel->relpersistence), + "if_not_exists", ObjTypeString, node->if_not_exists ? "IF NOT EXISTS" : "", + "identity", ObjTypeObject, new_objtree_for_qualname(relation->rd_rel->relnamespace, + RelationGetRelationName(relation))); + + dpcontext = deparse_context_for(RelationGetRelationName(relation), + objectId); + + if (node->ofTypename) { + tmp_obj = new_objtree_for_type(relation->rd_rel->reloftype, -1); + append_object_object(ret, "OF %{of_type}T", tmp_obj); + } else { + List *tableelts = NIL; + + /* + * There is no need to process LIKE clauses separately; they have + * already been transformed into columns and constraints. + */ + + /* + * Process table elements: column definitions and constraints. Only + * the column definitions are obtained from the parse node itself. To + * get constraints we rely on pg_constraint, because the parse node + * might be missing some things such as the name of the constraints. + */ + tableelts = deparse_TableElements(relation, node->tableElts, dpcontext); + tableelts = obtainConstraints(tableelts, objectId); + + if (tableelts) + append_array_object(ret, "(%{table_elements:, }s)", tableelts); + else + append_format_string(ret, "()"); + } + + /* AUTO_INCREMENT */ + if (node->autoIncStart) { + DefElem *defel = (DefElem*)node->autoIncStart; + if (IsA(defel->arg, Integer)) { + append_int_object(ret, "AUTO_INCREMENT = %{autoincstart}n", (int32)intVal(defel->arg)); + } else { + append_string_object(ret, "AUTO_INCREMENT = %{autoincstart}s", "autoincstart", strVal(defel->arg)); + } + } + + /* WITH clause */ + foreach (cell, node->options) { + ObjTree *tmp_obj2; + DefElem *opt = (DefElem *)lfirst(cell); + + /* filter out some options which we won't set in subscription */ + if (!pg_strcasecmp(opt->defname, "parallel_workers")) { + continue; + } + + tmp_obj2 = deparse_DefElem(opt, false); + if (tmp_obj2) + list = lappend(list, new_object_object(tmp_obj2)); + } + if (list) + append_array_object(ret, "WITH (%{with:, }s)", list); + + if (node->oncommit != ONCOMMIT_NOOP) + append_object_object(ret, "%{on_commit}s", + deparse_OnCommitClause(node->oncommit)); + + if (node->tablespacename) + append_string_object(ret, "TABLESPACE %{tablespace}I", "tablespace", + node->tablespacename); + + /* opt_table_options (COMMENT = XX) */ + List *table_options = NIL; + foreach(cell, node->tableOptions) { + if (IsA(lfirst(cell), CommentStmt)) { + CommentStmt *commentStmt = (CommentStmt *)lfirst(cell); + if (commentStmt->comment) { + /* order in reverse */ + ObjTree *option_obj = new_objtree(""); + append_string_object(option_obj, "COMMENT=%{comment}L", "comment", commentStmt->comment); + table_options = lcons(new_object_object(option_obj), table_options); + } + } + } + if (table_options) + append_array_object(ret, "%{options:, }s", table_options); + + /* opt_table_partitioning_clause */ + if (node->partTableState && relation->rd_rel->parttype != PARTTYPE_NON_PARTITIONED_RELATION) { + append_string_object(ret, "%{partition_by}s", "partition_by", pg_get_partkeydef_string(relation)); + } + + relation_close(relation, AccessShareLock); + + return ret; +} + +/* + * Handle deparsing of DROP commands. + * + * Verbose syntax + * DROP %s IF EXISTS %%{objidentity}s %{cascade}s + */ +char * +deparse_drop_command(const char *objidentity, const char *objecttype, + Node *parsetree) +{ + DropStmt *node = (DropStmt*)parsetree; + StringInfoData str; + char *command; + char *identity = (char *) objidentity; + ObjTree *stmt; + Jsonb *jsonb; + bool concurrent = false; + + initStringInfo(&str); + + if ((!strcmp(objecttype, "index") && node->concurrent)) + concurrent = true; + + stmt = new_objtree_VA("DROP %{objtype}s %{concurrently}s %{if_exists}s %{objidentity}s %{cascade}s", 5, + "objtype", ObjTypeString, objecttype, + "concurrently", ObjTypeString, concurrent ? "CONCURRENTLY" : "", + "if_exists", ObjTypeString, node->missing_ok ? "IF EXISTS" : "", + "objidentity", ObjTypeString, identity, + "cascade", ObjTypeString, node->behavior == DROP_CASCADE ? "CASCADE" : ""); + + jsonb = objtree_to_jsonb(stmt, NULL); + command = JsonbToCString(&str, VARDATA(jsonb), JSONB_ESTIMATED_LEN); + + return command; +} + +/* + * Handle deparsing of simple commands. + * + * This function should cover all cases handled in ProcessUtilitySlow. + */ +static ObjTree * +deparse_simple_command(CollectedCommand *cmd, bool *include_owner) +{ + Oid objectId; + Node *parsetree; + + Assert(cmd->type == SCT_Simple); + + parsetree = cmd->parsetree; + objectId = cmd->d.simple.address.objectId; + + if (cmd->in_extension && (nodeTag(parsetree) != T_CreateExtensionStmt)) + return NULL; + + /* This switch needs to handle everything that ProcessUtilitySlow does */ + switch (nodeTag(parsetree)) { + case T_CreateStmt: + return deparse_CreateStmt(objectId, parsetree); + + case T_IndexStmt: + return deparse_IndexStmt(objectId, parsetree); + + case T_CreateSeqStmt: + return deparse_CreateSeqStmt(objectId, parsetree); + + case T_AlterSeqStmt: + *include_owner = false; + return deparse_AlterSeqStmt(objectId, parsetree); + + case T_CommentStmt: + *include_owner = false; + return deparse_CommentStmt(cmd->d.simple.address, parsetree); + + default: + elog(INFO, "unrecognized node type in deparse command: %d", + (int) nodeTag(parsetree)); + } + + return NULL; +} + +/* + * Workhorse to deparse a CollectedCommand. + */ +char * +deparse_utility_command(CollectedCommand *cmd, ddl_deparse_context *context) +{ + OverrideSearchPath *overridePath; + MemoryContext oldcxt; + MemoryContext tmpcxt; + ObjTree *tree; + char *command = NULL; + StringInfoData str; + + /* + * Allocate everything done by the deparsing routines into a temp context, + * to avoid having to sprinkle them with memory handling code, but + * allocate the output StringInfo before switching. + */ + initStringInfo(&str); + tmpcxt = AllocSetContextCreate(CurrentMemoryContext, + "deparse ctx", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + oldcxt = MemoryContextSwitchTo(tmpcxt); + + /* + * Many routines underlying this one will invoke ruleutils.c functionality + * to obtain deparsed versions of expressions. In such results, we want + * all object names to be qualified, so that results are "portable" to + * environments with different search_path settings. Rather than inject + * what would be repetitive calls to override search path all over the + * place, we do it centrally here. + */ + overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = false; + overridePath->addTemp = true; + PushOverrideSearchPath(overridePath); + + switch (cmd->type) { + case SCT_Simple: + tree = deparse_simple_command(cmd, &context->include_owner); + break; + + default: + elog(ERROR, "unexpected deparse node type %d", cmd->type); + } + + PopOverrideSearchPath(); + + if (tree) { + Jsonb *jsonb; + + jsonb = objtree_to_jsonb(tree, context->include_owner ? cmd->role : NULL); + command = JsonbToCString(&str, VARDATA(jsonb), JSONB_ESTIMATED_LEN); + } + + /* + * Clean up. Note that since we created the StringInfo in the caller's + * context, the output string is not deleted here. + */ + MemoryContextSwitchTo(oldcxt); + MemoryContextDelete(tmpcxt); + + return command; +} diff --git a/src/gausskernel/optimizer/commands/ddljson.cpp b/src/gausskernel/optimizer/commands/ddljson.cpp new file mode 100644 index 000000000..0e4c99cbb --- /dev/null +++ b/src/gausskernel/optimizer/commands/ddljson.cpp @@ -0,0 +1,723 @@ +/*------------------------------------------------------------------------- + * + * ddljson.cpp + * JSON code related to DDL command deparsing + * + * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * NOTES + * + * Each JSONB object is supposed to have a "fmt" which will tell expansion + * routines how JSONB can be expanded to construct ddl command. One example + * snippet from JSONB object for 'ALTER TABLE sales ADD col1 int': + * + * { *1-level* + * "fmt": "ALTER %{objtype}s %{only}s %{identity}D %{subcmds:, }s", + * "only": "", + * "objtype": "TABLE", + * "identity": {"objname": "sales", "schemaname": "public"} + * "subcmds": [ + * { *2-level* + * "fmt": "ADD %{objtype}s %{if_not_exists}s %{definition}s", + * "type": "add column", + * "objtype": "COLUMN", + * "definition": {} + * ... + * } + * ... + * } + * + * From above, we can see different key-value pairs. + * level-1 represents ROOT object with 'fmt', 'only', 'objtype','identity', + * 'subcmds' as the keys with the values appended after ":" with each key. + * Value can be string, bool, numeric, array or any nested object. As an + * example, "objtype" has string value while "subcmds" has nested-object + * as its value which can further have multiple key-value pairs. + * + * The value of "fmt" tells us how the expansion will be carried on. The + * value of "fmt" may contain zero or more %-escapes, which consist of key + * name enclosed in { }, followed by a conversion specifier which tells us + * how the value for that particular key should be expanded. + * Possible conversion specifiers are: + * % expand to a literal % + * I expand as a single, non-qualified identifier + * D expand as a possibly-qualified identifier + * T expand as a type name + * L expand as a string literal (quote using single quotes) + * s expand as a simple string (no quoting) + * n expand as a simple number (no quoting) + * + * In order to build a DDL command, it will first extract "fmt" node in + * jsonb string and will read each key name enclosed in { } in fmt-string + * and will replace it with its value. For each name mentioned in { } in + * fmt string, there must be a key-value pair, in absence of which, the + * expansion will error out. While doing this expansion, it will consider + * the conversion-specifier maintained with each key in fmt string to figure + * out how value should actually be represented. This is how DDL command can + * be constructed back from the jsonb-string. + * + * IDENTIFICATION + * src/gausskernel/optimizer/commands/ddljson.cpp + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "knl/knl_thread.h" +#include "tcop/ddldeparse.h" +#include "utils/builtins.h" +#include "lib/stringinfo.h" +#include "utils/jsonb.h" + +#define ADVANCE_PARSE_POINTER(ptr,end_ptr) \ + do { \ + if (++(ptr) >= (end_ptr)) \ + ereport(ERROR, \ + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), \ + errmsg("unterminated format specifier"))); \ + } while (0) + +/* + * Conversion specifier which determines how to expand the JSON element + * into a string. + */ +typedef enum +{ + SpecDottedName, + SpecIdentifier, + SpecNumber, + SpecString, + SpecStringLiteral, + SpecTypeName +} convSpecifier; + +/* + * A ternary value that represents a boolean type JsonbValue. + */ +typedef enum +{ + tv_absent, + tv_true, + tv_false +} json_trivalue; + +static bool expand_one_jsonb_element(StringInfo buf, char *param, + JsonbValue *jsonval, convSpecifier specifier, + const char *fmt); +static void expand_jsonb_array(StringInfo buf, char *param, + JsonbValue *jsonarr, char *arraysep, + convSpecifier specifier, const char *fmt); +static void fmtstr_error_callback(void *arg); + +static void expand_fmt_recursive(StringInfo buf, JsonbSuperHeader sheader); +/* + * Given a JsonbContainer, find the JsonbValue with the given key name in it. + * If it's of a type other than jbvBool, an error is raised. If it doesn't + * exist, tv_absent is returned; otherwise return the actual json_trivalue. + */ +static json_trivalue +find_bool_in_jsonbcontainer(JsonbSuperHeader sheader, char *keyname) +{ + JsonbValue key; + JsonbValue *value; + json_trivalue result; + + key.type = jbvString; + key.string.val = keyname; + key.string.len = strlen(keyname); + value = findJsonbValueFromSuperHeader(sheader, + JB_FOBJECT, NULL, &key); + if (value == NULL) + return tv_absent; + if (value->type != jbvBool) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" is not of type boolean", keyname))); + result = value->boolean ? tv_true : tv_false; + pfree(value); + + return result; +} + +/* + * Given a JsonbContainer, find the JsonbValue with the given key name in it. + * If it's of a type other than jbvString, an error is raised. If it doesn't + * exist, an error is raised unless missing_ok; otherwise return NULL. + * + * If it exists and is a string, a freshly palloc'ed copy is returned. + * + * If *length is not NULL, it is set to the length of the string. + */ +static char * +find_string_in_jsonbcontainer(JsonbSuperHeader sheader, char *keyname, + bool missing_ok, int *length) +{ + JsonbValue key; + JsonbValue *value; + char *str; + + /* XXX verify that this is an object, not an array */ + + key.type = jbvString; + key.string.val = keyname; + key.string.len = strlen(keyname); + value = findJsonbValueFromSuperHeader(sheader, + JB_FOBJECT, NULL, &key); + if (value == NULL) { + if (missing_ok) + return NULL; + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("missing element \"%s\" in JSON object", keyname))); + } + + if (value->type != jbvString) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" is not of type string", keyname))); + + str = pnstrdup(value->string.val, value->string.len); + if (length) + *length = value->string.len; + pfree(value); + return str; +} + +/* + * Expand a json value as a quoted identifier. The value must be of type string. + */ +static void +expand_jsonval_identifier(StringInfo buf, JsonbValue *jsonval) +{ + char *str; + + Assert(jsonval->type == jbvString); + + str = pnstrdup(jsonval->string.val, jsonval->string.len); + appendStringInfoString(buf, quote_identifier(str)); + pfree(str); +} + + +/* + * Expand a json value as a dot-separated-name. The value must be of type + * binary and may contain elements "schemaname" (optional), "objname" + * (mandatory), "attrname" (optional). Double quotes are added to each element + * as necessary, and dot separators where needed. + * + * One day we might need a "catalog" element as well, but no current use case + * needs that. + */ +static void +expand_jsonval_dottedname(StringInfo buf, JsonbValue *jsonval) +{ + char *str; + JsonbSuperHeader data = jsonval->binary.data; + + Assert(jsonval->type == jbvBinary); + + str = find_string_in_jsonbcontainer(data, "schemaname", true, NULL); + if (str) { + appendStringInfo(buf, "%s.", quote_identifier(str)); + pfree(str); + } + + str = find_string_in_jsonbcontainer(data, "objname", false, NULL); + appendStringInfo(buf, "%s", quote_identifier(str)); + pfree(str); + + str = find_string_in_jsonbcontainer(data, "attrname", true, NULL); + if (str) { + appendStringInfo(buf, ".%s", quote_identifier(str)); + pfree(str); + } +} + +/* + * Expand a JSON value as a type name. + */ +static void +expand_jsonval_typename(StringInfo buf, JsonbValue *jsonval) +{ + char *schema = NULL; + char *typname = NULL; + char *typmodstr = NULL; + json_trivalue is_array; + char *array_decor = NULL; + JsonbSuperHeader data = jsonval->binary.data; + + /* + * We omit schema-qualifying the output name if the schema element is + * either the empty string or NULL; the difference between those two cases + * is that in the latter we quote the type name, in the former we don't. + * This allows for types with special typmod needs, such as interval and + * timestamp (see format_type_detailed), while at the same time allowing + * for the schema name to be omitted from type names that require quotes + * but are to be obtained from a user schema. + */ + + schema = find_string_in_jsonbcontainer(data, "schemaname", true, NULL); + typname = find_string_in_jsonbcontainer(data, "typename", false, NULL); + typmodstr = find_string_in_jsonbcontainer(data, "typmod", true, NULL); + is_array = find_bool_in_jsonbcontainer(data, "typarray"); + switch (is_array) { + case tv_true: + array_decor = "[]"; + break; + + case tv_false: + array_decor = ""; + break; + + case tv_absent: + default: + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("missing typarray element"))); + } + + if (schema == NULL) + appendStringInfo(buf, "%s", quote_identifier(typname)); + else if (schema[0] == '\0') + appendStringInfo(buf, "%s", typname); /* Special typmod needs */ + else + appendStringInfo(buf, "%s.%s", quote_identifier(schema), + quote_identifier(typname)); + + appendStringInfo(buf, "%s%s", typmodstr ? typmodstr : "", array_decor); + + if (schema) + pfree(schema); + if (typname) + pfree(typname); + if (typmodstr) + pfree(typmodstr); +} + +/* + * Expand a JSON value as a string. The value must be of type string or of + * type Binary. In the latter case, it must contain a "fmt" element which will + * be recursively expanded; also, if the object contains an element "present" + * and it is set to false, the expansion is the empty string. + * + * Returns false if no actual expansion was made due to the "present" flag + * being set to "false". + * + * The caller is responsible to check jsonval is of type jbvString or jbvBinary. + */ +static bool +expand_jsonval_string(StringInfo buf, JsonbValue *jsonval) +{ + bool expanded = false; + + Assert((jsonval->type == jbvString) || (jsonval->type == jbvBinary)); + + if (jsonval->type == jbvString) { + appendBinaryStringInfo(buf, jsonval->string.val, + jsonval->string.len); + expanded = true; + } else if (jsonval->type == jbvBinary) { + json_trivalue present; + + present = find_bool_in_jsonbcontainer((JsonbSuperHeader)jsonval->binary.data, + "present"); + + /* + * If "present" is set to false, this element expands to empty; + * otherwise (either true or absent), expand "fmt". + */ + if (present != tv_false) { + expand_fmt_recursive(buf, (JsonbSuperHeader)jsonval->binary.data); + expanded = true; + } + } + + return expanded; +} + +/* + * Expand a JSON value as a string literal. + */ +static void +expand_jsonval_strlit(StringInfo buf, JsonbValue *jsonval) +{ + char *str; + StringInfoData dqdelim; + static const char dqsuffixes[] = "_XYZZYX_"; + int dqnextchar = 0; + + Assert(jsonval->type == jbvString); + + str = pnstrdup(jsonval->string.val, jsonval->string.len); + + /* Easy case: if there are no ' and no \, just use a single quote */ + if (strpbrk(str, "\'\\") == NULL) { + appendStringInfo(buf, "'%s'", str); + pfree(str); + return; + } + + /* Otherwise need to find a useful dollar-quote delimiter */ + initStringInfo(&dqdelim); + appendStringInfoString(&dqdelim, "$"); + while (strstr(str, dqdelim.data) != NULL) { + appendStringInfoChar(&dqdelim, dqsuffixes[dqnextchar++]); + dqnextchar = dqnextchar % (sizeof(dqsuffixes) - 1); + } + /* Add trailing $ */ + appendStringInfoChar(&dqdelim, '$'); + + /* And finally produce the quoted literal into the output StringInfo */ + appendStringInfo(buf, "%s%s%s", dqdelim.data, str, dqdelim.data); + pfree(dqdelim.data); + pfree(str); +} + +/* + * Expand a JSON value as an integer quantity. + */ +static void +expand_jsonval_number(StringInfo buf, JsonbValue *jsonval) +{ + char *strdatum; + + Assert(jsonval->type == jbvNumeric); + + strdatum = DatumGetCString(DirectFunctionCall1(numeric_out, + NumericGetDatum(jsonval->numeric))); + appendStringInfoString(buf, strdatum); + pfree(strdatum); +} + +/* + * Expand one JSON element into the output StringInfo according to the + * conversion specifier. The element type is validated, and an error is raised + * if it doesn't match what we expect for the conversion specifier. + * + * Returns true, except for the formatted string case if no actual expansion + * was made (due to the "present" flag being set to "false"). + */ +static bool +expand_one_jsonb_element(StringInfo buf, char *param, JsonbValue *jsonval, + convSpecifier specifier, const char *fmt) +{ + bool string_expanded = true; + ErrorContextCallback sqlerrcontext; + + /* If we were given a format string, setup an ereport() context callback */ + if (fmt) { + sqlerrcontext.callback = fmtstr_error_callback; + sqlerrcontext.arg = (void *) fmt; + sqlerrcontext.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &sqlerrcontext; + } + + if (!jsonval) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" not found", param))); + + switch (specifier) { + case SpecIdentifier: + if (jsonval->type != jbvString) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON string for %%I element \"%s\", got %d", + param, jsonval->type))); + expand_jsonval_identifier(buf, jsonval); + break; + + case SpecDottedName: + if (jsonval->type != jbvBinary) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON struct for %%D element \"%s\", got %d", + param, jsonval->type))); + expand_jsonval_dottedname(buf, jsonval); + break; + + case SpecString: + if (jsonval->type != jbvString && jsonval->type != jbvBinary) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON string or struct for %%s element \"%s\", got %d", + param, jsonval->type))); + string_expanded = expand_jsonval_string(buf, jsonval); + break; + + case SpecStringLiteral: + if (jsonval->type != jbvString) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON string for %%L element \"%s\", got %d", + param, jsonval->type))); + expand_jsonval_strlit(buf, jsonval); + break; + + case SpecTypeName: + if (jsonval->type != jbvBinary) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON struct for %%T element \"%s\", got %d", + param, jsonval->type))); + expand_jsonval_typename(buf, jsonval); + break; + + case SpecNumber: + if (jsonval->type != jbvNumeric) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("expected JSON numeric for %%n element \"%s\", got %d", + param, jsonval->type))); + expand_jsonval_number(buf, jsonval); + break; + } + + if (fmt) + t_thrd.log_cxt.error_context_stack = sqlerrcontext.previous; + + return string_expanded; +} + +/* + * Recursive helper for deparse_ddl_json_to_string. + * + * Find the "fmt" element in the given container, and expand it into the + * provided StringInfo. + */ +static void +expand_fmt_recursive(StringInfo buf, JsonbSuperHeader sheader) +{ + JsonbValue key; + JsonbValue *value; + const char *cp; + const char *start_ptr; + const char *end_ptr; + int len; + + start_ptr = find_string_in_jsonbcontainer(sheader, "fmt", false, &len); + end_ptr = start_ptr + len; + + for (cp = start_ptr; cp < end_ptr; cp++) { + convSpecifier specifier = SpecString; + bool is_array = false; + char *param = NULL; + char *arraysep = NULL; + + if (*cp != '%') { + appendStringInfoCharMacro(buf, *cp); + continue; + } + + ADVANCE_PARSE_POINTER(cp, end_ptr); + + /* Easy case: %% outputs a single % */ + if (*cp == '%') { + appendStringInfoCharMacro(buf, *cp); + continue; + } + + /* + * Scan the mandatory element name. Allow for an array separator + * (which may be the empty string) to be specified after a colon. + */ + if (*cp == '{') { + StringInfoData parbuf; + StringInfoData arraysepbuf; + StringInfo appendTo; + + initStringInfo(&parbuf); + appendTo = &parbuf; + + ADVANCE_PARSE_POINTER(cp, end_ptr); + while (cp < end_ptr) { + if (*cp == ':') { + /* + * Found array separator delimiter; element name is now + * complete, start filling the separator. + */ + initStringInfo(&arraysepbuf); + appendTo = &arraysepbuf; + is_array = true; + ADVANCE_PARSE_POINTER(cp, end_ptr); + continue; + } + + if (*cp == '}') { + ADVANCE_PARSE_POINTER(cp, end_ptr); + break; + } + appendStringInfoCharMacro(appendTo, *cp); + ADVANCE_PARSE_POINTER(cp, end_ptr); + } + param = parbuf.data; + if (is_array) + arraysep = arraysepbuf.data; + } + if (param == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("missing conversion name in conversion specifier"))); + + switch (*cp) { + case 'I': + specifier = SpecIdentifier; + break; + case 'D': + specifier = SpecDottedName; + break; + case 's': + specifier = SpecString; + break; + case 'L': + specifier = SpecStringLiteral; + break; + case 'T': + specifier = SpecTypeName; + break; + case 'n': + specifier = SpecNumber; + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid conversion specifier \"%c\"", *cp))); + } + + /* + * Obtain the element to be expanded. + */ + key.type = jbvString; + key.string.val = param; + key.string.len = strlen(param); + + value = findJsonbValueFromSuperHeader(sheader, JB_FOBJECT, NULL, &key); + Assert(value != NULL); + + /* + * Expand the data (possibly an array) into the output StringInfo. + */ + if (is_array) + expand_jsonb_array(buf, param, value, arraysep, specifier, start_ptr); + else + expand_one_jsonb_element(buf, param, value, specifier, start_ptr); + + pfree(value); + } +} + +/* + * Iterate on the elements of a JSON array, expanding each one into the output + * StringInfo per the given conversion specifier, separated by the given + * separator. + */ +static void +expand_jsonb_array(StringInfo buf, char *param, + JsonbValue *jsonarr, char *arraysep, convSpecifier specifier, + const char *fmt) +{ + ErrorContextCallback sqlerrcontext; + JsonbIterator *it; + JsonbValue v; + int type; + bool first = true; + StringInfoData arrayelem; + + /* If we were given a format string, setup an ereport() context callback */ + if (fmt) { + sqlerrcontext.callback = fmtstr_error_callback; + sqlerrcontext.arg = (void *) fmt; + sqlerrcontext.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &sqlerrcontext; + } + + if (!jsonarr) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" not found", param))); + + if (jsonarr->type != jbvBinary) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" is not a JSON array", param))); + + JsonbSuperHeader container = jsonarr->binary.data; + if (!((*(uint32*)container) & JB_FARRAY)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("element \"%s\" is not a JSON array", param))); + + initStringInfo(&arrayelem); + + it = JsonbIteratorInit(container); + while ((type = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) { + if (type == WJB_ELEM) { + resetStringInfo(&arrayelem); + + if (expand_one_jsonb_element(&arrayelem, param, &v, specifier, NULL)) { + if (!first) + appendStringInfoString(buf, arraysep); + + appendBinaryStringInfo(buf, arrayelem.data, arrayelem.len); + first = false; + } + } + } + + if (fmt) + t_thrd.log_cxt.error_context_stack = sqlerrcontext.previous; +} + +char * +deparse_ddl_json_to_string(char *json_str, char** owner) +{ + Datum d; + Jsonb *jsonb; + StringInfo buf = (StringInfo) palloc0(sizeof(StringInfoData)); + + initStringInfo(buf); + + d = DirectFunctionCall1(jsonb_in, PointerGetDatum(json_str)); + jsonb = (Jsonb *) DatumGetPointer(d); + + if (owner != NULL) { + JsonbValue key; + key.type = jbvString; + key.string.val = "myowner"; + key.string.len = strlen(key.string.val); + + JsonbValue *value; + JsonbSuperHeader data = VARDATA(jsonb); + value = findJsonbValueFromSuperHeader(data, JB_FOBJECT, NULL, &key); + if (value) { + errno_t rc = 0; + char *str = NULL; + + /* value->string.val may not be NULL terminated */ + str = (char*)palloc(value->string.len + 1); + rc = memcpy_s(str, value->string.len, value->string.val, value->string.len); + securec_check(rc, "\0", "\0"); + str[value->string.len] = '\0'; + *owner = str; + } + else + /* myowner is not given in this jsonb, e.g. for Drop Commands */ + *owner = NULL; + } + + expand_fmt_recursive(buf, VARDATA(jsonb)); + + return buf->data; +} + +/* + * Error context callback for JSON format string expansion. + * + * XXX: indicate which element we're expanding, if applicable. + */ +static void +fmtstr_error_callback(void *arg) +{ + errcontext("while expanding format string \"%s\"", (char *) arg); +} diff --git a/src/gausskernel/optimizer/commands/event_trigger.cpp b/src/gausskernel/optimizer/commands/event_trigger.cpp index e72d74c2b..679b94dc9 100644 --- a/src/gausskernel/optimizer/commands/event_trigger.cpp +++ b/src/gausskernel/optimizer/commands/event_trigger.cpp @@ -31,6 +31,8 @@ #include "commands/trigger.h" #include "funcapi.h" #include "parser/parse_func.h" +#include "parser/parser.h" +#include "parser/parse_relation.h" #include "pgstat.h" #include "miscadmin.h" #include "utils/acl.h" @@ -42,24 +44,10 @@ #include "utils/rel.h" #include "utils/syscache.h" #include "tcop/utility.h" +#include "tcop/ddldeparse.h" #include "lib/ilist.h" #include "access/heapam.h" -/* Support for dropped objects */ -typedef struct SQLDropObject { - ObjectAddress address; - const char *schemaname; - const char *objname; - const char *objidentity; - const char *objecttype; - List *addrnames; - List *addrargs; - bool original; - bool normal; - bool istemp; - slist_node next; -} SQLDropObject; - typedef struct { const char *obtypename; bool supported; @@ -664,7 +652,7 @@ static List * EventTriggerCommonSetup(Node *parsetree, List *cachelist; ListCell *lc; List *runlist = NIL; - + int pub_deparse_func_cnt = 0; #ifdef USE_ASSERT_CHECKING { const char *dbgtag; @@ -701,8 +689,26 @@ static List * EventTriggerCommonSetup(Node *parsetree, /* Filter by session replication role. */ if (filter_event_trigger(&tag, item)) { - /* We must plan to fire this trigger. */ - runlist = lappend_oid(runlist, item->fnoid); + static const char *trigger_func_prefix = "publication_deparse_%s"; + char trigger_func_name[NAMEDATALEN]; + Oid pub_funcoid; + List *pubfuncname; + errno_t rc; + /* Get function oid of the publicaiton's ddl deparse event trigger */ + rc = snprintf_s(trigger_func_name, sizeof(trigger_func_name), sizeof(trigger_func_name) - 1, + trigger_func_prefix, eventstr); + securec_check_ss(rc, "", ""); + pubfuncname = SystemFuncName(trigger_func_name); + pub_funcoid = LookupFuncName(pubfuncname, 0, NULL, true); + + if (item->fnoid != pub_funcoid) { + runlist = lappend_oid(runlist, item->fnoid); + } else { + /* Only the first ddl deparse event trigger needs to be invoked */ + if (pub_deparse_func_cnt++ == 0) { + runlist = lappend_oid(runlist, item->fnoid); + } + } } /* Filter by tags, if any were specified. */ @@ -1561,7 +1567,8 @@ void EventTriggerCollectSimpleCommand(ObjectAddress address, command->type = SCT_Simple; command->in_extension = creating_extension; - + command->role = GetUserNameFromId(GetUserId()); + command->d.simple.address = address; command->d.simple.secondaryObject = secondaryObject; command->parsetree = (Node*)copyObject(parsetree); @@ -1601,7 +1608,8 @@ void EventTriggerAlterTableStart(Node *parsetree) command->type = SCT_AlterTable; command->in_extension = creating_extension; - + command->role = GetUserNameFromId(GetUserId()); + command->d.alterTable.classId = RelationRelationId; command->d.alterTable.objectId = InvalidOid; command->d.alterTable.subcmds = NIL; diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index 578693119..f0c706b0e 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -425,6 +425,9 @@ static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *subPartitionOidList, errmsg("Cannot match subpartitions when create subpartition indexes."))); } + /* the element stmt->partClause point has been freed */ + stmt->partClause = (Node*)partitionIndexdef; + /* Count sum of subpartitions */ foreach(lc1, backupIdxdef) { RangePartitionindexDefState *def = (RangePartitionindexDefState*)lfirst(lc1); @@ -2084,7 +2087,8 @@ static bool columnIsExist(Relation rel, const Form_pg_attribute attTup, const Li HeapTuple tuple; Value* colValue = (Value*)linitial(ielem->collation); char* colName = colValue->val.str; - Oid colId = CollationGetCollid(colName); + /* ielem->collation is a Value list with schema */ + Oid colId = get_collation_oid(ielem->collation, true); Oid attColId = InvalidOid; if (0 == colId) { diff --git a/src/gausskernel/optimizer/commands/publicationcmds.cpp b/src/gausskernel/optimizer/commands/publicationcmds.cpp index 1019c967d..9b771e41e 100644 --- a/src/gausskernel/optimizer/commands/publicationcmds.cpp +++ b/src/gausskernel/optimizer/commands/publicationcmds.cpp @@ -33,10 +33,12 @@ #include "catalog/pg_publication.h" #include "catalog/pg_publication_rel.h" #include "catalog/dependency.h" +#include "parser/parser.h" #include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/publicationcmds.h" +#include "nodes/makefuncs.h" #include "utils/array.h" #include "utils/builtins.h" @@ -57,17 +59,24 @@ static void CloseTableList(List *rels); static void PublicationAddTables(Oid pubid, List *rels, bool if_not_exists, AlterPublicationStmt *stmt); static void PublicationDropTables(Oid pubid, List *rels, bool missing_ok); -static void parse_publication_options(List *options, bool *publish_given, bool *publish_insert, bool *publish_update, - bool *publish_delete) +static void parse_publication_options(List *options, + bool *publish_given, + bool *publish_insert, + bool *publish_update, + bool *publish_delete, + bool *publish_ddl_given, + int64 *pubddl) { ListCell *lc; *publish_given = false; + *publish_ddl_given = false; /* Defaults are true */ *publish_insert = true; *publish_update = true; *publish_delete = true; + *pubddl = 0; /* Parse options */ foreach (lc, options) { @@ -108,12 +117,156 @@ static void parse_publication_options(List *options, bool *publish_given, bool * ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized \"publish\" value: \"%s\"", publish_opt))); } + } else if (pubddl && strcmp(defel->defname, "ddl") == 0) { + char *ddl_types; + List *ddl_type_list; + ListCell *lc2; + + if (*publish_ddl_given) + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); + + *publish_ddl_given = true; + *pubddl = 0; + + /* + * If ddl option was given only the explicitly listed ddl types + * should be published. + */ + ddl_types = defGetString(defel); + + if (!SplitIdentifierString(ddl_types, ',', &ddl_type_list)) + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid list syntax for \"ddl\" option"))); + + /* Process the option list */ + foreach (lc2, ddl_type_list) { + char *ddl_opt = (char *)lfirst(lc2); + + if (strcmp(ddl_opt, "table") == 0) + *pubddl |= PUBDDL_TABLE; + else if (strcmp(ddl_opt, "none") == 0) + *pubddl = 0; + else if (strcmp(ddl_opt, "all") == 0) + *pubddl = PUBDDL_ALL; + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized \"ddl\" value: \"%s\"", ddl_opt))); + } } else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized publication parameter: %s", defel->defname))); } } +/* + * Create event trigger which is used for DDL replication + */ +static void +CreateDDLReplicaEventTrigger(char *eventname, List *commands, ObjectAddress pubaddress, Oid puboid) +{ + List *tags = NIL; + ListCell *lc; + Oid trigger_id; + ObjectAddress referenced; + CreateEventTrigStmt *ddl_trigger; + char trigger_name[NAMEDATALEN]; + char trigger_func_name[NAMEDATALEN]; + errno_t rc; + static const char *trigger_func_prefix = "publication_deparse_%s"; + + ddl_trigger = makeNode(CreateEventTrigStmt); + + rc = snprintf_s(trigger_name, sizeof(trigger_name), sizeof(trigger_name) - 1, PUB_EVENT_TRIG_FORMAT, eventname, puboid); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(trigger_func_name, sizeof(trigger_func_name), sizeof(trigger_func_name) - 1, trigger_func_prefix, eventname); + securec_check_ss(rc, "\0", "\0"); + + ddl_trigger->trigname = pstrdup(trigger_name); + ddl_trigger->eventname = eventname; + ddl_trigger->funcname = SystemFuncName(trigger_func_name); + + foreach(lc, commands) { + Value *tag = (Value*)lfirst(lc); + tags = lappend(tags, tag); + } + + ddl_trigger->whenclause = list_make1(makeDefElem("tag", (Node*)tags)); + + trigger_id = CreateEventTrigger(ddl_trigger); + + /* + * Register the event triggers as internally dependent on the publication. + */ + ObjectAddressSet(referenced, EventTriggerRelationId, trigger_id); + recordDependencyOn(&referenced, &pubaddress, DEPENDENCY_INTERNAL); +} + +static void +AddAllDDLReplicaEventTriggers(List *end_commands) +{ + end_commands = lappend(end_commands, makeString("CREATE INDEX")); + end_commands = lappend(end_commands, makeString("DROP INDEX")); +} + +/* + * If DDL replication is enabled, create event triggers to capture and log any + * relevant events. + */ +static void +CreateDDLReplicaEventTriggers(ObjectAddress pubaddress, Oid puboid) +{ + List *start_commands = list_make1(makeString("DROP TABLE")); + List *end_commands = NIL; + end_commands = lappend(end_commands, makeString("CREATE TABLE")); + end_commands = lappend(end_commands, makeString("DROP TABLE")); + AddAllDDLReplicaEventTriggers(end_commands); + + /* Create the ddl_command_end event trigger */ + CreateDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_END, end_commands, pubaddress, puboid); + + /* Create the ddl_command_start event trigger */ + CreateDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_START, start_commands, pubaddress, puboid); +} + +/* + * Helper function to drop an event trigger for DDL replication. + */ +static void +DropDDLReplicaEventTrigger(char *eventname, Oid puboid) +{ + char trigger_name[NAMEDATALEN]; + Oid evtoid; + ObjectAddress obj; + errno_t rc; + + rc = snprintf_s(trigger_name, sizeof(trigger_name), sizeof(trigger_name) - 1, PUB_EVENT_TRIG_FORMAT, eventname, puboid); + securec_check_ss(rc, "\0", "\0"); + + evtoid = get_event_trigger_oid(trigger_name, true); + if (!OidIsValid(evtoid)) + return; + + deleteDependencyRecordsForClass(EventTriggerRelationId, evtoid, PublicationRelationId, DEPENDENCY_INTERNAL); + + /* + * Ensure that the dependency removal is visible, so that we can drop the + * event trigger. + */ + CommandCounterIncrement(); + + ObjectAddressSet(obj, EventTriggerRelationId, evtoid); + performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); +} + +/* + * Drop all the event triggers which are used for DDL replication. + */ +static void +DropDDLReplicaEventTriggers(Oid puboid) +{ + DropDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_START, puboid); + DropDDLReplicaEventTrigger(PUB_TRIG_DDL_CMD_END, puboid); +} + /* * Create new publication. */ @@ -134,6 +287,8 @@ ObjectAddress CreatePublication(CreatePublicationStmt *stmt) bool publish_insert; bool publish_update; bool publish_delete; + bool publish_ddl_given; + int64 pubddl; AclResult aclresult; int rc; @@ -164,15 +319,24 @@ ObjectAddress CreatePublication(CreatePublicationStmt *stmt) values[Anum_pg_publication_pubname - 1] = DirectFunctionCall1(namein, CStringGetDatum(stmt->pubname)); values[Anum_pg_publication_pubowner - 1] = ObjectIdGetDatum(GetUserId()); - parse_publication_options(stmt->options, &publish_given, &publish_insert, &publish_update, &publish_delete); + parse_publication_options(stmt->options, &publish_given, + &publish_insert, &publish_update, &publish_delete, + &publish_ddl_given, &pubddl); values[Anum_pg_publication_puballtables - 1] = BoolGetDatum(stmt->for_all_tables); values[Anum_pg_publication_pubinsert - 1] = BoolGetDatum(publish_insert); values[Anum_pg_publication_pubupdate - 1] = BoolGetDatum(publish_update); values[Anum_pg_publication_pubdelete - 1] = BoolGetDatum(publish_delete); + values[Anum_pg_publication_pubddl - 1] = Int64GetDatum(pubddl); tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); + if (pubddl == PUBDDL_ALL && !stmt->for_all_tables) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("DDL=all replication is only supported in FOR ALL TABLES publications"))); + } + /* Insert tuple into catalog. */ puboid = simple_heap_insert(rel, tup); CatalogUpdateIndexes(rel, tup); @@ -200,6 +364,13 @@ ObjectAddress CreatePublication(CreatePublicationStmt *stmt) CacheInvalidateRelcacheAll(); } + /* + * Create an event trigger to allow logging of DDL statements. + */ + if (pubddl != PUBDDL_NONE) { + CreateDDLReplicaEventTriggers(myself, puboid); + } + heap_close(rel, RowExclusiveLock); InvokeObjectAccessHook(OAT_POST_CREATE, PublicationRelationId, puboid, 0, NULL); @@ -225,10 +396,17 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He bool publish_insert; bool publish_update; bool publish_delete; + bool publish_ddl_given; + int64 pubddl; + bool pubddl_change = false; int rc; ObjectAddress obj; + Form_pg_publication pubform; + pubform = (Form_pg_publication)GETSTRUCT(tup); - parse_publication_options(stmt->options, &publish_given, &publish_insert, &publish_update, &publish_delete); + parse_publication_options(stmt->options, &publish_given, + &publish_insert, &publish_update, &publish_delete, + &publish_ddl_given, &pubddl); /* Everything ok, form a new tuple. */ rc = memset_s(values, sizeof(values), 0, sizeof(values)); @@ -249,6 +427,23 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He replaces[Anum_pg_publication_pubdelete - 1] = true; } + if (publish_ddl_given) { + if (pubform->pubddl != pubddl) { + if (pubddl == PUBDDL_NONE || pubform->pubddl == PUBDDL_NONE) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("can not enable or disable DDL replication for an existing publication, cause there maybe some subscriptor with different compatibility."))); + } + + pubddl_change = true; + } + + values[Anum_pg_publication_pubddl - 1] = Int64GetDatum(pubddl); + replaces[Anum_pg_publication_pubddl - 1] = true; + } else { + pubddl = pubform->pubddl; + } + tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); /* Update the catalog. */ @@ -261,6 +456,12 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He if (((Form_pg_publication)GETSTRUCT(tup))->puballtables) { CacheInvalidateRelcacheAll(); } else { + if (pubddl == PUBDDL_ALL) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("DDL=all replication is only supported in FOR ALL TABLES publications"))); + } + List *relids = GetPublicationRelations(HeapTupleGetOid(tup)); /* @@ -280,6 +481,12 @@ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, He } } ObjectAddressSet(obj, PublicationRelationId, HeapTupleGetOid(tup)); + if (pubddl_change) { + DropDDLReplicaEventTriggers(HeapTupleGetOid(tup)); + if (pubddl != PUBDDL_NONE) { + CreateDDLReplicaEventTriggers(obj, HeapTupleGetOid(tup)); + } + } EventTriggerCollectSimpleCommand(obj, InvalidObjectAddress, (Node *) stmt); } diff --git a/src/gausskernel/optimizer/commands/sequence/sequence.cpp b/src/gausskernel/optimizer/commands/sequence/sequence.cpp index 8dd21db7c..aadf533f0 100644 --- a/src/gausskernel/optimizer/commands/sequence/sequence.cpp +++ b/src/gausskernel/optimizer/commands/sequence/sequence.cpp @@ -2445,6 +2445,58 @@ static void process_owned_by(const Relation seqrel, List* owned_by) relation_close(tablerel, NoLock); } +/* + * Return sequence parameters, detailed + */ +sequence_values *get_sequence_values(Oid sequenceId) +{ + Relation seq_rel; + SeqTable elm = NULL; + HeapTupleData seqtuple; + int64 uuid; + Buffer buf; + + sequence_values *seqvalues = NULL; + + /* + * Read the old sequence. This does a bit more work than really + * necessary, but it's simple, and we do want to double-check that it's + * indeed a sequence. + */ + init_sequence(sequenceId, &elm, &seq_rel); + + seqvalues = (sequence_values *)palloc(sizeof(sequence_values)); + seqvalues->large = (RelationGetRelkind(seq_rel) == RELKIND_LARGE_SEQUENCE); + + if (seqvalues->large) { + Form_pg_large_sequence seq = read_seq_tuple(elm, seq_rel, &buf, &seqtuple, &uuid); + seqvalues->sequence_name = pstrdup(seq->sequence_name.data); + seqvalues->is_cycled = seq->is_cycled; + seqvalues->last_value = Int8or16Out(seq->last_value); + seqvalues->start_value = Int8or16Out(seq->start_value); + seqvalues->increment_by = Int8or16Out(seq->increment_by); + seqvalues->max_value = Int8or16Out(seq->max_value); + seqvalues->min_value = Int8or16Out(seq->min_value); + seqvalues->cache_value = Int8or16Out(seq->cache_value); + UnlockReleaseBuffer(buf); + } else { + Form_pg_sequence seq = read_seq_tuple(elm, seq_rel, &buf, &seqtuple, &uuid); + seqvalues->sequence_name = pstrdup(seq->sequence_name.data); + seqvalues->is_cycled = seq->is_cycled; + seqvalues->last_value = Int8or16Out(seq->last_value); + seqvalues->start_value = Int8or16Out(seq->start_value); + seqvalues->increment_by = Int8or16Out(seq->increment_by); + seqvalues->max_value = Int8or16Out(seq->max_value); + seqvalues->min_value = Int8or16Out(seq->min_value); + seqvalues->cache_value = Int8or16Out(seq->cache_value); + UnlockReleaseBuffer(buf); + } + + relation_close(seq_rel, NoLock); + + return seqvalues; +} + /* * Return sequence parameters */ diff --git a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp index c3d21f758..440eb0a96 100644 --- a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp +++ b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp @@ -63,6 +63,7 @@ #define SUBOPT_COPY_DATA 0x00000040 #define SUBOPT_CONNECT 0x00000080 #define SUBOPT_SKIPLSN 0x00000100 +#define SUBOPT_MATCHDDLOWNER 0x00000200 /* check if the 'val' has 'bits' set */ #define IsSet(val, bits) (((val) & (bits)) == (bits)) @@ -82,6 +83,7 @@ typedef struct SubOpts bool binary; bool copy_data; bool connect; + bool matchddlowner; XLogRecPtr skiplsn; } SubOpts; @@ -118,6 +120,9 @@ static void parse_subscription_options(const List *stmt_options, bits32 supporte if (IsSet(supported_opts, SUBOPT_CONNECT)) { opts->connect = true; } + if (IsSet(supported_opts, SUBOPT_MATCHDDLOWNER)) { + opts->matchddlowner = true; + } /* Parse options */ foreach (lc, stmt_options) { @@ -218,6 +223,16 @@ static void parse_subscription_options(const List *stmt_options, bits32 supporte (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid WAL location (LSN): %s", lsn_str))); } + } else if (IsSet(supported_opts, SUBOPT_MATCHDDLOWNER) && strcmp(defel->defname, "matchddlowner") == 0) { + if (IsSet(opts->specified_opts, SUBOPT_MATCHDDLOWNER)) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("conflicting or redundant options"))); + } + + opts->specified_opts |= SUBOPT_MATCHDDLOWNER; + opts->matchddlowner = defGetBoolean(defel); + } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized subscription parameter: %s", defel->defname))); @@ -507,7 +522,7 @@ ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) * Connection and publication should not be specified here. */ supported_opts = (SUBOPT_ENABLED | SUBOPT_SLOT_NAME | SUBOPT_SYNCHRONOUS_COMMIT | SUBOPT_BINARY | - SUBOPT_COPY_DATA | SUBOPT_CONNECT); + SUBOPT_COPY_DATA | SUBOPT_CONNECT | SUBOPT_MATCHDDLOWNER); parse_subscription_options(stmt->options, supported_opts, &opts); /* @@ -552,6 +567,7 @@ ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) values[Anum_pg_subscription_subowner - 1] = ObjectIdGetDatum(owner); values[Anum_pg_subscription_subenabled - 1] = BoolGetDatum(opts.enabled); values[Anum_pg_subscription_subbinary - 1] = BoolGetDatum(opts.binary); + values[Anum_pg_subscription_submatchddlowner - 1] = BoolGetDatum(opts.matchddlowner); /* encrypt conninfo */ char *encryptConninfo = EncryptOrDecryptConninfo(stmt->conninfo, 'E'); @@ -867,7 +883,7 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt, bool isTopLevel) /* Parse options. */ if (!stmt->refresh) { supported_opts = (SUBOPT_CONNINFO | SUBOPT_PUBLICATION | SUBOPT_ENABLED | SUBOPT_SLOT_NAME | - SUBOPT_SYNCHRONOUS_COMMIT | SUBOPT_BINARY | SUBOPT_SKIPLSN); + SUBOPT_SYNCHRONOUS_COMMIT | SUBOPT_BINARY | SUBOPT_SKIPLSN | SUBOPT_MATCHDDLOWNER); parse_subscription_options(stmt->options, supported_opts, &opts); } else { supported_opts = SUBOPT_COPY_DATA; @@ -956,6 +972,10 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt, bool isTopLevel) values[Anum_pg_subscription_subskiplsn - 1] = LsnGetTextDatum(opts.skiplsn); replaces[Anum_pg_subscription_subskiplsn - 1] = true; } + if (IsSet(opts.specified_opts, SUBOPT_MATCHDDLOWNER)) { + values[Anum_pg_subscription_submatchddlowner - 1] = BoolGetDatum(opts.matchddlowner); + replaces[Anum_pg_subscription_submatchddlowner - 1] = true; + } tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); diff --git a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp index 9f4fde5c7..41251bd41 100644 --- a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp +++ b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp @@ -5084,7 +5084,7 @@ List* QueryRewriteCTAS(Query* parsetree) proutility_cxt.readOnlyTree = false; proutility_cxt.params = NULL; proutility_cxt.is_top_level = true; - ProcessUtility(&proutility_cxt, NULL, false, NULL, PROCESS_UTILITY_GENERATED); + ProcessUtility(&proutility_cxt, NULL, false, NULL, PROCESS_UTILITY_GENERATED, true); } /* CREATE MATILIZED VIEW AS*/ @@ -5097,7 +5097,7 @@ List* QueryRewriteCTAS(Query* parsetree) proutility_cxt.readOnlyTree = false; proutility_cxt.params = NULL; proutility_cxt.is_top_level = true; - ProcessUtility(&proutility_cxt, NULL, false, NULL, PROCESS_UTILITY_GENERATED); + ProcessUtility(&proutility_cxt, NULL, false, NULL, PROCESS_UTILITY_GENERATED, true); create_matview_meta(query, stmt->into->rel, stmt->into->ivm); diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index 298f572ee..9b72e1b80 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -2326,7 +2326,8 @@ void CreateCommand(CreateStmt *parse_tree, const char *query_string, ParamListIn true, #endif /* PGXC */ NULL, - is_top_level ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY, + //is_top_level ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY, + PROCESS_UTILITY_SUBCOMMAND, isCTAS); } @@ -5141,9 +5142,15 @@ ProcessUtilitySlow(Node *parse_tree, ObjectAddress address; ObjectAddress secondaryObject = InvalidObjectAddress; + if (T_CreateStmt == nodeTag(parse_tree) && isCTAS) { + isCompleteQuery = true; + } else if (T_AlterTableStmt == nodeTag(parse_tree) && ((AlterTableStmt*)parse_tree)->fromCreate) { + isCompleteQuery = false; + } + /* All event trigger calls are done only when isCompleteQuery is true */ needCleanup = isCompleteQuery && EventTriggerBeginCompleteQuery(); - + /* PG_TRY block is to ensure we call EventTriggerEndCompleteQuery */ PG_TRY(); { diff --git a/src/gausskernel/storage/access/rmgrdesc/Makefile b/src/gausskernel/storage/access/rmgrdesc/Makefile index d0972cfab..422ca04b3 100644 --- a/src/gausskernel/storage/access/rmgrdesc/Makefile +++ b/src/gausskernel/storage/access/rmgrdesc/Makefile @@ -14,12 +14,12 @@ ifeq ($(enable_mot), yes) OBJS = barrierdesc.o clogdesc.o dbasedesc.o gindesc.o gistdesc.o \ hashdesc.o heapdesc.o motdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \ replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o \ - xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o + xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o logicalddlmsgdesc.o else OBJS = barrierdesc.o clogdesc.o dbasedesc.o gindesc.o gistdesc.o \ hashdesc.o heapdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \ replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o \ - xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o + xactdesc.o xlogdesc.o slotdesc.o undologdesc.o uheapdesc.o segpagedesc.o logicalddlmsgdesc.o endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/rmgrdesc/logicalddlmsgdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/logicalddlmsgdesc.cpp new file mode 100644 index 000000000..e97426e46 --- /dev/null +++ b/src/gausskernel/storage/access/rmgrdesc/logicalddlmsgdesc.cpp @@ -0,0 +1,61 @@ +/*------------------------------------------------------------------------- + * + * logicalddlmsgdesc.cpp + * rmgr descriptor routines for replication/logical/ddlmessage.c + * + * Portions Copyright (c) 2015-2023, PostgreSQL Global Development Group + * + * + * IDENTIFICATION + * src/gausskernel/storage/access/rmgrdesc/logicalddlmsgdesc.cpp + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "replication/ddlmessage.h" + +void +logicalddlmsg_desc(StringInfo buf, XLogReaderState *record) +{ + char *rec = XLogRecGetData(record); + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + + if (info == XLOG_LOGICAL_DDL_MESSAGE) { + int cnt = 0; + xl_logical_ddl_message *xlrec = (xl_logical_ddl_message *) rec; + char *prefix = xlrec->message; + char *message = xlrec->message + xlrec->prefix_size; + + Assert(prefix[xlrec->prefix_size - 1] == '\0'); + + appendStringInfo(buf, "prefix \"%s\"; payload (%zu bytes): ", + prefix, xlrec->message_size); + appendStringInfo(buf, "relid %u cmdtype %u", xlrec->relid, xlrec->cmdtype); + /* Write message payload as a series of hex bytes */ + for (; cnt < xlrec->message_size - 1; cnt++) { + appendStringInfo(buf, "%02X ", (unsigned char) message[cnt]); + } + appendStringInfo(buf, "%02X", (unsigned char) message[cnt]); + } +} + +const char * +logicalddlmsg_identify(uint8 info) +{ + if ((info & ~XLR_INFO_MASK) == XLOG_LOGICAL_DDL_MESSAGE) + return "DDL"; + + return NULL; +} + +const char * +logicalddlmsg_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_LOGICAL_DDL_MESSAGE) { + return "logical_ddl_message"; + } else { + return "unknown_type"; + } +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/transam/rmgr.cpp b/src/gausskernel/storage/access/transam/rmgr.cpp index 6b0536c64..797f8bd6d 100644 --- a/src/gausskernel/storage/access/transam/rmgr.cpp +++ b/src/gausskernel/storage/access/transam/rmgr.cpp @@ -41,6 +41,7 @@ #include "access/xlog_internal.h" #include "catalog/storage_xlog.h" #include "commands/dbcommands.h" +#include "replication/ddlmessage.h" #include "commands/sequence.h" #include "commands/tablespace.h" #include "replication/slot.h" diff --git a/src/gausskernel/storage/replication/logical/Makefile b/src/gausskernel/storage/replication/logical/Makefile index 44963bc28..8395758e5 100644 --- a/src/gausskernel/storage/replication/logical/Makefile +++ b/src/gausskernel/storage/replication/logical/Makefile @@ -6,6 +6,6 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -OBJS = decode.o launcher.o logical.o logicalfuncs.o origin.o proto.o relation.o reorderbuffer.o snapbuild.o worker.o parallel_decode_worker.o parallel_decode.o parallel_reorderbuffer.o logical_queue.o logical_parse.o tablesync.o +OBJS = decode.o launcher.o logical.o logicalfuncs.o origin.o proto.o relation.o reorderbuffer.o snapbuild.o worker.o parallel_decode_worker.o parallel_decode.o parallel_reorderbuffer.o logical_queue.o logical_parse.o tablesync.o ddlmessage.o ddltrigger.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/replication/logical/ddlmessage.cpp b/src/gausskernel/storage/replication/logical/ddlmessage.cpp new file mode 100644 index 000000000..c4d7982a3 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/ddlmessage.cpp @@ -0,0 +1,91 @@ +/*------------------------------------------------------------------------- + * + * ddlmessage.cpp + * Logical DDL messages. + * + * Copyright (c) 2023, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/ddlmessage.cpp + * + * NOTES + * + * Logical DDL messages allow XLOG logging of DDL command strings that + * get passed to the logical decoding plugin. In normal XLOG processing they + * are same as NOOP. + * + * Unlike generic logical messages, these DDL messages have only transactional + * mode. Note by default DDLs in PostgreSQL are transactional. + * + * These messages are part of current transaction and will be sent to + * decoding plugin similar to DML operations. + * + * Every message includes a prefix to avoid conflicts between different decoding + * plugins. Plugin authors must take special care to use a unique prefix (e.g one + * idea is to include the name of the extension). + * --------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "access/xloginsert.h" +#include "catalog/namespace.h" +#include "miscadmin.h" +#include "nodes/execnodes.h" +#include "replication/logical.h" +#include "replication/ddlmessage.h" +#include "tcop/ddldeparse.h" +#include "utils/memutils.h" + +/* + * Write logical decoding DDL message into XLog. + */ +XLogRecPtr +LogLogicalDDLMessage(const char *prefix, Oid relid, DeparsedCommandType cmdtype, + const char *message, size_t size) +{ + xl_logical_ddl_message xlrec; + + /* Ensure we have a valid transaction id. */ + Assert(IsTransactionState()); + GetCurrentTransactionId(); + + elog(LOG, "LogLogicalDDLMessage : %s", message); + char *tmp = pstrdup(message); + char *owner = NULL; + + if (cmdtype != DCT_TableDropStart) { + char *decodestring = deparse_ddl_json_to_string(tmp, &owner); + elog(LOG, "will decode to : %s, [owner %s]", decodestring, owner ? owner : "none"); + } + pfree(tmp); + + xlrec.dbId = u_sess->proc_cxt.MyDatabaseId; + /* Trailing zero is critical; see logicalddlmsg_desc */ + xlrec.prefix_size = strlen(prefix) + 1; + xlrec.message_size = size; + xlrec.relid = relid; + xlrec.cmdtype = cmdtype; + + XLogBeginInsert(); + XLogRegisterData((char *) &xlrec, SizeOfLogicalDDLMessage); + XLogRegisterData((char *) prefix, xlrec.prefix_size); + XLogRegisterData((char *) message, size); + + return XLogInsert(RM_LOGICALDDLMSG_ID, XLOG_LOGICAL_DDL_MESSAGE); +} + +/* + * Redo is basically just noop for logical decoding DDL messages. + */ +void +logicalddlmsg_redo(XLogReaderState *record) +{ + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + + if (info != XLOG_LOGICAL_DDL_MESSAGE) + elog(PANIC, "logicalddlmsg_redo: unknown op code %u", info); + + /* This is only interesting for logical decoding, see decode.c. */ +} diff --git a/src/gausskernel/storage/replication/logical/ddltrigger.cpp b/src/gausskernel/storage/replication/logical/ddltrigger.cpp new file mode 100644 index 000000000..71f6cc760 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/ddltrigger.cpp @@ -0,0 +1,227 @@ +/*------------------------------------------------------------------------- + * + * ddltrigger.cpp + * Logical DDL triggers. + * + * Copyright (c) 2023, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/ddltrigger.cpp + * + * NOTES + * + * Deparse the ddl command and log it. + * + * --------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_class.h" +#include "catalog/pg_proc.h" +#include "commands/event_trigger.h" +#include "funcapi.h" +#include "lib/ilist.h" +#include "replication/ddlmessage.h" +#include "tcop/ddldeparse.h" +#include "utils/lsyscache.h" + +/* + * Check if the command can be published. + * + * XXX Executing a non-immutable expression during the table rewrite phase is + * not allowed, as it may result in different data between publisher and + * subscriber. While some may suggest converting the rewrite inserts to updates + * and replicate them after the ddl command to maintain data consistency, but it + * doesn't work if the replica identity column is altered in the command. This + * is because the rewrite inserts do not contain the old values and therefore + * cannot be converted to update. + * + * Apart from that, commands containing volatile functions are not allowed. Because + * it's possible the functions contain DDL/DML in which case these operations + * will be executed twice and cause duplicate data. In addition, we don't know + * whether the tables being accessed by these DDL/DML are published or not. So + * blindly allowing such functions can allow unintended clauses like the tables + * accessed in those functions may not even exist on the subscriber. + */ +static void +check_command_publishable(ddl_deparse_context context) +{ + if (context.max_volatility == PROVOLATILE_VOLATILE) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot use volatile function in this command because it cannot be replicated in DDL replication"))); +} + +bool relation_support_ddl_replication(Oid relid) +{ + bool support = false; + + Relation rel = relation_open(relid, AccessShareLock); + /* if relpersistence is 'p', not support */ + if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { + return false; + } + if (RelationIsRowFormat(rel) && RelationIsAstoreFormat(rel)) { + support = true; + } else if (RelationIsIndex(rel)) { + if(IS_BTREE(rel) && !RelationAmIsBtree(rel)) { + return false; + } + support = true; + } else if(RelationIsSequnce(rel)) { + support = true; + } + + relation_close(rel, AccessShareLock); + + return support; +} + +/* + * Deparse the ddl command and log it prior to + * execution. Currently only used for DROP TABLE command + * so that catalog can be accessed before being deleted. + * This is to check if the table is part of the publication + * or not. + */ +Datum +publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) +{ + EventTriggerData *trigdata; + char *command = psprintf("Drop table command start"); + DropStmt *stmt; + ListCell *cell1; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + trigdata = (EventTriggerData *) fcinfo->context; + stmt = (DropStmt *) trigdata->parsetree; + + /* Extract the relid from the parse tree */ + foreach(cell1, stmt->objects) { + bool support = true; + Node *object = (Node*)lfirst(cell1); + ObjectAddress address; + Relation relation = NULL; + + address = get_object_address(stmt->removeType, + IsA(object, List) ? (List*)object : list_make1(object), + NULL, + &relation, + AccessExclusiveLock, + true); + + /* Object does not exist, nothing to do */ + if (!relation) + continue; + + if (get_rel_relkind(address.objectId)) + support = relation_support_ddl_replication(address.objectId); + + /* + * Do not generate wal log for commands whose target table is a + * temporary or unlogged table. + * + * XXX We may generate wal logs for unlogged tables in the future so + * that unlogged tables can also be created and altered on the + * subscriber side. This makes it possible to directly replay the SET + * LOGGED command and the incoming rewrite message without creating a + * new table. + */ + if (support) + LogLogicalDDLMessage("deparse", address.objectId, DCT_TableDropStart, + command, strlen(command) + 1); + + relation_close(relation, NoLock); + } + return PointerGetDatum(NULL); +} + +/* + * Deparse the ddl command and log it. This function + * is called after the execution of the command but before the + * transaction commits. + */ +Datum +publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) +{ + ListCell *lc; + slist_iter iter; + DeparsedCommandType type; + Oid relid = InvalidOid; + ddl_deparse_context context; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + foreach(lc, currentEventTriggerState->commandList) { + bool support = true; + + CollectedCommand *cmd = (CollectedCommand*)lfirst(lc); + char *json_string; + + if (cmd->type == SCT_Simple && + !OidIsValid(cmd->d.simple.address.objectId)) + continue; + + /* Only SCT_Simple for now */ + relid = cmd->d.simple.address.objectId; + type = DCT_SimpleCmd; + + if (get_rel_relkind(relid)) { + support = relation_support_ddl_replication(relid); + } + + if (support) { + /* + * Deparse the DDL command and WAL log it to allow decoding of the + * same. + */ + context.include_owner = true; + context.max_volatility = PROVOLATILE_IMMUTABLE; + + json_string = deparse_utility_command(cmd, &context); + + if (json_string != NULL) { + check_command_publishable(context); + LogLogicalDDLMessage("deparse", relid, type, json_string, + strlen(json_string) + 1); + } + } + } + + /* Drop commands are not part commandlist but handled here as part of SQLDropList */ + slist_foreach(iter, &(currentEventTriggerState->SQLDropList)) { + SQLDropObject *obj; + EventTriggerData *trigdata; + char *command; + DeparsedCommandType cmdtype; + + trigdata = (EventTriggerData *) fcinfo->context; + + obj = slist_container(SQLDropObject, next, iter.cur); + + if (obj->istemp || !obj->original) { + continue; + } + + if (strcmp(obj->objecttype, "table") == 0) { + cmdtype = DCT_TableDropEnd; + } else if (strcmp(obj->objecttype, "index") == 0) { + cmdtype = DCT_ObjectDrop; + } else { + continue; + } + + command = deparse_drop_command(obj->objidentity, obj->objecttype, (Node*)trigdata->parsetree); + + if (command) + LogLogicalDDLMessage("deparse", obj->address.objectId, cmdtype, + command, strlen(command) + 1); + + } + + return PointerGetDatum(NULL); +} diff --git a/src/gausskernel/storage/replication/logical/decode.cpp b/src/gausskernel/storage/replication/logical/decode.cpp index 3befcfc34..d7257f6af 100644 --- a/src/gausskernel/storage/replication/logical/decode.cpp +++ b/src/gausskernel/storage/replication/logical/decode.cpp @@ -41,7 +41,7 @@ #include "replication/reorderbuffer.h" #include "replication/origin.h" #include "replication/snapbuild.h" - +#include "replication/ddlmessage.h" #include "storage/standby.h" #include "utils/memutils.h" @@ -49,13 +49,6 @@ #include "utils/lsyscache.h" -typedef struct XLogRecordBuffer { - XLogRecPtr origptr; - XLogRecPtr endptr; - XLogReaderState *record; - char *record_data; -} XLogRecordBuffer; - /* RMGR Handlers */ static void DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); @@ -255,6 +248,9 @@ void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState * case RM_UHEAP_ID: DecodeUheapOp(ctx, &buf); break; + case RM_LOGICALDDLMSG_ID: + logicalddl_decode(ctx, &buf); + break; default: break; } @@ -1040,6 +1036,47 @@ static void AreaDecodingChange(ReorderBufferChange *change, LogicalDecodingConte RelationClose(relation); } +/* + * Handle rmgr LOGICALDDLMSG_ID records for DecodeRecordIntoReorderBuffer() + */ +void +logicalddl_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + SnapBuild *builder = ctx->snapshot_builder; + XLogReaderState *r = buf->record; + TransactionId xid = XLogRecGetXid(r); + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + RepOriginId origin_id = XLogRecGetOrigin(r); + xl_logical_ddl_message *message; + + if (info != XLOG_LOGICAL_DDL_MESSAGE) + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errmsg("unexpected RM_LOGICALDDLMSG_ID record type: %u", info))); + + ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(r), buf->origptr); + + /* + * If we don't have snapshot or we are just fast-forwarding, there is no + * point in decoding ddl messages. + */ + if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT || + ctx->fast_forward) + return; + + message = (xl_logical_ddl_message *)XLogRecGetData(r); + + if (message->dbId != ctx->slot->data.database || + FilterByOrigin(ctx, origin_id)) + return; + + if (SnapBuildProcessChange(builder, xid, buf->origptr)) + ReorderBufferQueueDDLMessage(ctx, xid, buf->endptr, + message->message, + message->message_size, + message->message + message->prefix_size, + message->relid, message->cmdtype); + +} + /* * Consolidated commit record handling between the different form of commit * records. diff --git a/src/gausskernel/storage/replication/logical/logical.cpp b/src/gausskernel/storage/replication/logical/logical.cpp index ea6ecf772..211690175 100644 --- a/src/gausskernel/storage/replication/logical/logical.cpp +++ b/src/gausskernel/storage/replication/logical/logical.cpp @@ -46,7 +46,7 @@ #include "replication/snapbuild.h" #include "replication/walreceiver.h" #include "replication/walsender.h" - +#include "replication/ddlmessage.h" #include "storage/proc.h" #include "storage/procarray.h" @@ -84,6 +84,10 @@ static void parallel_change_cb_wrapper(ParallelReorderBuffer *cache, ReorderBuff static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, const char *plugin); static void LoadOutputPlugin(ParallelOutputPluginCallbacks *callbacks, const char *plugin); +static void ddl_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, const char *prefix, + Oid relid, DeparsedCommandType cmdtype, + Size message_size, const char *message); /* Checkout aurgments whether coming from ALTER SYSTEM SET*/ bool QuoteCheckOut(char* newval) @@ -180,6 +184,7 @@ static LogicalDecodingContext *StartupDecodingContext(List *output_plugin_option ctx->reorder->begin = begin_cb_wrapper; ctx->reorder->apply_change = change_cb_wrapper; ctx->reorder->commit = commit_cb_wrapper; + ctx->reorder->ddl = ddl_cb_wrapper; ctx->out = makeStringInfo(); ctx->prepare_write = prepare_write; @@ -1030,6 +1035,42 @@ bool filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin return ret; } +static void +ddl_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, const char *prefix, + Oid relid, DeparsedCommandType cmdtype, + Size message_size, const char *message) +{ + LogicalDecodingContext *ctx = (LogicalDecodingContext*)cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + if (ctx->callbacks.ddl_cb == NULL) + return; + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "ddl"; + state.report_location = message_lsn; + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *)&state; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + ctx->write_xid = txn != NULL ? txn->xid : InvalidTransactionId; + ctx->write_location = message_lsn; + + /* do the actual work: call callback */ + ctx->callbacks.ddl_cb(ctx, txn, message_lsn, prefix, relid, cmdtype, message_size, message); + + /* Pop the error context stack */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; +} + /* * Set the required catalog xmin horizon for historic snapshots in the current * replication slot. diff --git a/src/gausskernel/storage/replication/logical/logicalfuncs.cpp b/src/gausskernel/storage/replication/logical/logicalfuncs.cpp index 833f05c36..cf6c44254 100755 --- a/src/gausskernel/storage/replication/logical/logicalfuncs.cpp +++ b/src/gausskernel/storage/replication/logical/logicalfuncs.cpp @@ -43,6 +43,7 @@ #include "replication/logicalfuncs.h" #include "replication/walsender_private.h" #include "access/xlog_internal.h" +#include "replication/ddlmessage.h" #include "storage/smgr/fd.h" #include "storage/file/fio_device.h" diff --git a/src/gausskernel/storage/replication/logical/proto.cpp b/src/gausskernel/storage/replication/logical/proto.cpp index a75b3b16b..03920cba3 100644 --- a/src/gausskernel/storage/replication/logical/proto.cpp +++ b/src/gausskernel/storage/replication/logical/proto.cpp @@ -629,3 +629,44 @@ void logicalrep_read_conninfo(StringInfo in, char** conninfo) int rc = strcpy_s(*conninfo, conninfoLen, conninfoTemp); securec_check(rc, "", ""); } + +/* + * Read DDL MESSAGE from stream + */ +char * +logicalrep_read_ddl(StringInfo in, XLogRecPtr *lsn, + const char **prefix, + Size *sz) +{ + uint8 flags; + char *msg; + + flags = pq_getmsgint(in, 1); + if (flags != 0) + elog(ERROR, "unrecognized flags %u in ddl message", flags); + + *lsn = pq_getmsgint64(in); + *prefix = pq_getmsgstring(in); + *sz = pq_getmsgint64(in); + msg = (char *)pq_getmsgbytes(in, *sz); + + return msg; +} + +/* + * Write DDL MESSAGE to stream + */ +void +logicalrep_write_ddl(StringInfo out, XLogRecPtr lsn, + const char *prefix, Size sz, const char *message) +{ + uint8 flags = 0; + + pq_sendbyte(out, LOGICAL_REP_MSG_DDL); + + pq_sendint8(out, flags); + pq_sendint64(out, lsn); + pq_sendstring(out, prefix); + pq_sendint64(out, sz); + pq_sendbytes(out, message, sz); +} diff --git a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp index 4d6844768..394e189be 100644 --- a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp +++ b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp @@ -71,6 +71,7 @@ #include "replication/slot.h" #include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ #include "access/xlog_internal.h" +#include "replication/ddlmessage.h" #include "storage/buf/bufmgr.h" #include "storage/smgr/fd.h" @@ -243,6 +244,8 @@ static ReorderBufferTXN *ReorderBufferGetTXN(ReorderBuffer *rb) dlist_init(&txn->tuplecids); dlist_init(&txn->subtxns); + txn->output_plugin_private = NULL; + return txn; } @@ -343,6 +346,13 @@ static Size ReorderBufferChangeSize(ReorderBufferChange *change) } break; } + case REORDER_BUFFER_CHANGE_DDL: { + Size prefix_size = strlen(change->data.ddl.prefix) + 1; + + sz += prefix_size + change->data.ddl.message_size + sizeof(Size) + sizeof(Size) + sizeof(Oid) + sizeof(DeparsedCommandType); + + break; + } } return sz; } @@ -393,6 +403,14 @@ void ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) change->data.tp.oldtuple = NULL; } break; + case REORDER_BUFFER_CHANGE_DDL: + if (change->data.ddl.prefix != NULL) + pfree(change->data.ddl.prefix); + change->data.ddl.prefix = NULL; + if (change->data.ddl.message != NULL) + pfree(change->data.ddl.message); + change->data.ddl.message = NULL; + break; case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: if (change->data.snapshot) { ReorderBufferFreeSnap(rb, change->data.snapshot); @@ -623,6 +641,35 @@ void ReorderBufferQueueChange(LogicalDecodingContext *ctx, TransactionId xid, XL ReorderBufferCheckSerializeTXN(ctx, txn); } +/* + * A transactional DDL message is queued to be processed upon commit. + */ +void +ReorderBufferQueueDDLMessage(LogicalDecodingContext *ctx, TransactionId xid, XLogRecPtr lsn, + const char*prefix, Size message_size, const char *message, Oid relid, DeparsedCommandType cmdtype) +{ + int rc = 0; + ReorderBuffer *rb = ctx->reorder; + MemoryContext oldcontext; + ReorderBufferChange *change; + + Assert(TransactionIdIsValid(xid)); + + oldcontext = MemoryContextSwitchTo(rb->context); + + change = ReorderBufferGetChange(rb); + change->action = REORDER_BUFFER_CHANGE_DDL; + change->data.ddl.prefix = pstrdup(prefix); + change->data.ddl.relid = relid; + change->data.ddl.cmdtype = cmdtype; + change->data.ddl.message_size = message_size; + change->data.ddl.message = (char*)palloc(message_size); + rc = memcpy_s(change->data.ddl.message, message_size, message, message_size); + securec_check(rc, "", ""); + + ReorderBufferQueueChange(ctx, xid, lsn, change); + MemoryContextSwitchTo(oldcontext); +} /* * AssertTXNLsnOrder * Verify LSN ordering of transaction lists in the reorderbuffer @@ -1303,6 +1350,21 @@ static void ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap) SnapBuildSnapDecRefcount(snap); } +/* + * Helper function for ReorderBufferProcessTXN for applying the DDL message. + */ +static inline void +ReorderBufferApplyDDLMessage(ReorderBuffer *rb, ReorderBufferTXN *txn, + ReorderBufferChange *change) +{ + rb->ddl(rb, txn, change->lsn, + change->data.ddl.prefix, + change->data.ddl.relid, + change->data.ddl.cmdtype, + change->data.ddl.message_size, + change->data.ddl.message); +} + /* * Perform the replay of a transaction and its non-aborted subtransactions. * @@ -1390,7 +1452,6 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit Relation relation = NULL; Oid reloid; Oid partitionReltoastrelid = InvalidOid; - bool stopDecoding = false; bool isSegment = false; switch (change->action) { @@ -1500,11 +1561,6 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: Assert(change->data.command_id != InvalidCommandId); - if (change->data.command_id != FirstCommandId) { - LogicalDecodeReportLostChanges(iterstate); - stopDecoding = true; - break; - } if (command_id < change->data.command_id) { command_id = change->data.command_id; @@ -1532,6 +1588,9 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tuplecid value in changequeue"))); break; + case REORDER_BUFFER_CHANGE_DDL: + ReorderBufferApplyDDLMessage(rb, txn, change); + break; case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UDELETE: case REORDER_BUFFER_CHANGE_UUPDATE: @@ -1606,9 +1665,6 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit RelationClose(relation); break; } - if (stopDecoding) { - break; - } } ReorderBufferIterTXNFinish(rb, iterstate); @@ -2220,6 +2276,44 @@ static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *tx } break; } + case REORDER_BUFFER_CHANGE_DDL: { + char *data; + Size prefix_size = strlen(change->data.ddl.prefix) + 1; + + sz += prefix_size + change->data.ddl.message_size + + sizeof(Size) + sizeof(Oid) + sizeof(DeparsedCommandType) + sizeof(Size); + ReorderBufferSerializeReserve(rb, sz); + + data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange); + + /* might have been reallocated above */ + ondisk = (ReorderBufferDiskChange *) rb->outbuf; + + /* write the prefix, relid and cmdtype including the size */ + rc = memcpy_s(data, sizeof(Size), &prefix_size, sizeof(Size)); + securec_check(rc, "", ""); + data += sizeof(Size); + rc = memcpy_s(data, sizeof(Oid), &change->data.ddl.relid, sizeof(Oid)); + securec_check(rc, "", ""); + data += sizeof(Oid); + rc = memcpy_s(data, sizeof(DeparsedCommandType), &change->data.ddl.cmdtype, sizeof(DeparsedCommandType)); + securec_check(rc, "", ""); + data += sizeof(DeparsedCommandType); + rc = memcpy_s(data, prefix_size, change->data.ddl.prefix, prefix_size); + securec_check(rc, "", ""); + data += prefix_size; + + /* write the message including the size */ + rc = memcpy_s(data, sizeof(Size), &change->data.ddl.message_size, sizeof(Size)); + securec_check(rc, "", ""); + data += sizeof(Size); + rc = memcpy_s(data, change->data.ddl.message_size, change->data.ddl.message, + change->data.ddl.message_size); + securec_check(rc, "", ""); + data += change->data.ddl.message_size; + + break; + } case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: /* ReorderBufferChange contains everything important */ break; @@ -2440,6 +2534,39 @@ static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID: break; + case REORDER_BUFFER_CHANGE_DDL: { + Size prefix_size; + + /* read prefix */ + rc = memcpy_s(&prefix_size, sizeof(Size), data, sizeof(Size)); + securec_check(rc, "", ""); + data += sizeof(Size); + rc = memcpy_s(&change->data.ddl.relid, sizeof(Oid), data, sizeof(Oid)); + securec_check(rc, "", ""); + data += sizeof(Oid); + rc = memcpy_s(&change->data.ddl.cmdtype, sizeof(DeparsedCommandType), data, sizeof(DeparsedCommandType)); + securec_check(rc, "", ""); + data += sizeof(DeparsedCommandType); + + change->data.ddl.prefix = (char*)MemoryContextAlloc(rb->context, prefix_size); + rc = memcpy_s(change->data.ddl.prefix, prefix_size, data, prefix_size); + securec_check(rc, "", ""); + Assert(change->data.ddl.prefix[prefix_size - 1] == '\0'); + data += prefix_size; + + /* read the message */ + rc = memcpy_s(&change->data.ddl.message_size, sizeof(Size), data, sizeof(Size)); + securec_check(rc, "", ""); + data += sizeof(Size); + change->data.ddl.message = (char*)MemoryContextAlloc(rb->context, + change->data.ddl.message_size); + rc = memcpy_s(change->data.ddl.message, change->data.ddl.message_size, data, + change->data.ddl.message_size); + securec_check(rc, "", ""); + data += change->data.ddl.message_size; + + break; + } case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UDELETE: case REORDER_BUFFER_CHANGE_UUPDATE: diff --git a/src/gausskernel/storage/replication/logical/worker.cpp b/src/gausskernel/storage/replication/logical/worker.cpp index 0b6cc2aeb..1431b74e4 100644 --- a/src/gausskernel/storage/replication/logical/worker.cpp +++ b/src/gausskernel/storage/replication/logical/worker.cpp @@ -36,11 +36,16 @@ #include "access/xact.h" #include "access/xlog_internal.h" +#include "catalog/catalog.h" #include "catalog/namespace.h" #include "catalog/pg_subscription.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_subscription_rel.h" - +#include "parser/analyze.h" +#include "tcop/ddldeparse.h" +#include "tcop/pquery.h" +#include "tcop/utility.h" +#include "commands/tablecmds.h" #include "commands/trigger.h" #include "commands/subscriptioncmds.h" @@ -89,6 +94,8 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/acl.h" #include "utils/syscache.h" #include "utils/postinit.h" #include "utils/ps_status.h" @@ -1160,6 +1167,241 @@ static void apply_handle_delete(StringInfo s) } +/* + * Handle CREATE TABLE command + * + * Call AddSubscriptionRelState for CREATE TABLE command to set the relstate to + * SUBREL_STATE_READY so DML changes on this new table can be replicated without + * having to manually run "ALTER SUBSCRIPTION ... REFRESH PUBLICATION" + */ +static void +handle_create_table(Node *command) +{ + const char *commandTag; + RangeVar *rv = NULL; + Oid relid; + Oid relnamespace = InvalidOid; + CreateStmt *cstmt; + char *schemaname = NULL; + char *relname = NULL; + + commandTag = CreateCommandTag((Node *) command); + cstmt = (CreateStmt *) command; + rv = cstmt->relation; + + if (nodeTag(command) == T_CreateStmt) { + cstmt = (CreateStmt*)command; + rv = cstmt->relation; + } else { + return; + } + + if (!rv) + return; + + schemaname = rv->schemaname; + relname = rv->relname; + + if (schemaname != NULL) + relnamespace = get_namespace_oid(schemaname, false); + + if (relnamespace != InvalidOid) + relid = get_relname_relid(relname, relnamespace); + else + relid = RelnameGetRelid(relname); + + if (OidIsValid(relid)) + { + AddSubscriptionRelState(t_thrd.applyworker_cxt.mySubscription->oid, relid, + SUBREL_STATE_READY); + ereport(DEBUG1, + (errmsg_internal("table \"%s\" added to subscription \"%s\"", + relname, t_thrd.applyworker_cxt.mySubscription->name))); + } +} + + +/* + * Begin one step (one INSERT, UPDATE, etc) of a replication transaction. + * + * Start a transaction, if this is the first step (else we keep using the + * existing transaction). + * Also provide a global snapshot and ensure we run in ApplyMessageContext. + */ +static void +begin_replication_step(void) +{ + SetCurrentStatementStartTimestamp(); + + if (!IsTransactionState()) + { + StartTransactionCommand(); + reread_subscription(); + } + + PushActiveSnapshot(GetTransactionSnapshot()); +} + +/* + * Finish up one step of a replication transaction. + * Callers of begin_replication_step() must also call this. + * + * We don't close out the transaction here, but we should increment + * the command counter to make the effects of this step visible. + */ +static void +end_replication_step(void) +{ + PopActiveSnapshot(); + + CommandCounterIncrement(); +} + +/* + * Handle DDL commands + * + * Handle DDL replication messages. Convert the json string into a query + * string and run it through the query portal. + */ +static void +apply_handle_ddl(StringInfo s) +{ + XLogRecPtr lsn; + const char *prefix = NULL; + char *message = NULL; + char *ddl_command; + char *owner; + Size sz; + List *parsetree_list; + ListCell *parsetree_item; + DestReceiver *receiver; + MemoryContext oldcontext; + Oid owner_oid = InvalidOid; + Oid saved_current_user = InvalidOid; + int save_sec_context = 0; + + const char *save_debug_query_string = t_thrd.postgres_cxt.debug_query_string; + + MemoryContext per_parsetree_context = NULL; + + ensure_transaction(); + + message = logicalrep_read_ddl(s, &lsn, &prefix, &sz); + + begin_replication_step(); + + ddl_command = deparse_ddl_json_to_string(message, &owner); + t_thrd.postgres_cxt.debug_query_string = ddl_command; + + ereport(LOG, (errmsg("apply [ddl] for %s [owner] %s", ddl_command, owner ? owner : "none"))); + + /* + * If requested, set the current role to the owner that executed the + * command on the publication server. + */ + GetUserIdAndSecContext(&saved_current_user, &save_sec_context); + if (t_thrd.applyworker_cxt.mySubscription->matchddlowner && owner) { + owner_oid = get_role_oid(owner, false); + SetUserIdAndSecContext(owner_oid, save_sec_context); + } + + /* DestNone for logical replication */ + receiver = CreateDestReceiver(DestNone); + parsetree_list = pg_parse_query(ddl_command); + + foreach(parsetree_item, parsetree_list) { + List *plantree_list; + List *querytree_list; + Node *command = (Node *) lfirst(parsetree_item); + const char *commandTag; + + Portal portal; + bool snapshot_set = false; + + commandTag = CreateCommandTag((Node *) command); + + /* If we got a cancel signal in parsing or prior command, quit */ + CHECK_FOR_INTERRUPTS(); + + /* + * Set up a snapshot if parse analysis/planning will need one. + */ + if (analyze_requires_snapshot(command)) { + PushActiveSnapshot(GetTransactionSnapshot()); + snapshot_set = true; + } + + /* + * We do the work for each parsetree in a short-lived context, to + * limit the memory used when there are many commands in the string. + */ + per_parsetree_context = + AllocSetContextCreate(CurrentMemoryContext, + "execute_sql_string per-statement context", + ALLOCSET_DEFAULT_SIZES); + oldcontext = MemoryContextSwitchTo(per_parsetree_context); + + querytree_list = pg_analyze_and_rewrite(command, ddl_command, NULL, 0); + + plantree_list = pg_plan_queries(querytree_list, 0, NULL); + + /* Done with the snapshot used for parsing/planning */ + if (snapshot_set) + PopActiveSnapshot(); + + portal = CreatePortal("logical replication", true, true); + + /* + * We don't have to copy anything into the portal, because everything + * we are passing here is in ApplyMessageContext or the + * per_parsetree_context, and so will outlive the portal anyway. + */ + PortalDefineQuery(portal, + NULL, + ddl_command, + commandTag, + plantree_list, + NULL); + + /* + * Start the portal. No parameters here. + */ + PortalStart(portal, NULL, 0, InvalidSnapshot); + + /* + * Switch back to transaction context for execution. + */ + MemoryContextSwitchTo(oldcontext); + + (void) PortalRun(portal, + FETCH_ALL, + true, + receiver, + receiver, + NULL); + + PortalDrop(portal, false); + + CommandCounterIncrement(); + + /* + * Table created by DDL replication (database level) is automatically + * added to the subscription here. + */ + handle_create_table(command); + + /* Now we may drop the per-parsetree context, if one was created. */ + MemoryContextDelete(per_parsetree_context); + } + + if (t_thrd.applyworker_cxt.mySubscription->matchddlowner && owner) { + SetUserIdAndSecContext(saved_current_user, save_sec_context); + } + + t_thrd.postgres_cxt.debug_query_string = save_debug_query_string; + end_replication_step(); +} + /* * Logical replication protocol message dispatcher. */ @@ -1203,6 +1445,9 @@ static void apply_dispatch(StringInfo s) case 'S': apply_handle_conninfo(s); break; + case LOGICAL_REP_MSG_DDL: + apply_handle_ddl(s); + break; default: ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid logical replication message type \"%c\"", action))); @@ -1730,6 +1975,14 @@ void ApplyWorkerMain() gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); + if (!t_thrd.mem_cxt.mask_password_mem_cxt) + t_thrd.mem_cxt.mask_password_mem_cxt = AllocSetContextCreate(t_thrd.top_mem_cxt, + "MaskPasswordCtx", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + /* * If an exception is encountered, processing resumes here. * diff --git a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp index 4478a8a0a..44cbed1f8 100644 --- a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp +++ b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp @@ -21,6 +21,7 @@ #include "replication/logicalproto.h" #include "replication/origin.h" #include "replication/pgoutput.h" +#include "commands/publicationcmds.h" #include "utils/builtins.h" #include "utils/inval.h" @@ -42,6 +43,12 @@ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *t static void pgoutput_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); +static void pgoutput_ddl(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, + DeparsedCommandType cmdtype, + Size sz, const char *message); + static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); static List *LoadPublications(List *pubnames); @@ -49,6 +56,12 @@ static void publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue static bool ReplconninfoChanged(); static bool GetConninfo(StringInfoData* standbysInfo); +typedef struct PGOutputTxnData +{ + bool sent_begin_txn; /* flag indicating where BEGIN has been set */ + List *deleted_relids; /* maintain list of deleted table oids */ +} PGOutputTxnData; + /* Entry in the map used to remember which relation schemas we sent. */ typedef struct RelationSyncEntry { Oid relid; /* relation oid */ @@ -58,6 +71,7 @@ typedef struct RelationSyncEntry { } RelationSyncEntry; static void init_rel_sync_cache(MemoryContext decoding_context); +static bool NeedPublicationDDL(PGOutputData *data, bool needall); static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid); static void rel_sync_cache_relation_cb(Datum arg, Oid relid); static void rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue); @@ -76,6 +90,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->abort_cb = pgoutput_abort_txn; cb->filter_by_origin_cb = pgoutput_origin_filter; cb->shutdown_cb = pgoutput_shutdown; + cb->ddl_cb = pgoutput_ddl; } static void parse_output_parameters(List *options, PGOutputData *data) @@ -186,12 +201,41 @@ static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *o } } +/* Initialize the per-transaction private data for the given transaction. */ +static void +init_txn_data(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) +{ + PGOutputTxnData *txndata; + + if (txn->output_plugin_private != NULL) + return; + + txndata = (PGOutputTxnData*)MemoryContextAllocZero(ctx->context, sizeof(PGOutputTxnData)); + + txn->output_plugin_private = txndata; +} + +/* Clean up the per-transaction private data for the given transaction. */ +static void +clean_txn_data(ReorderBufferTXN *txn) +{ + PGOutputTxnData *txndata = (PGOutputTxnData *) txn->output_plugin_private; + + if (txndata == NULL) + return; + + list_free(txndata->deleted_relids); + pfree(txndata); + txn->output_plugin_private = NULL; +} + /* * BEGIN callback */ static void pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { bool send_replication_origin = txn->origin_id != InvalidRepOriginId; + init_txn_data(ctx, txn); OutputPluginPrepareWrite(ctx, !send_replication_origin); logicalrep_write_begin(ctx->out, txn); @@ -228,6 +272,7 @@ static void pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *tx */ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn) { + clean_txn_data(txn); OutputPluginPrepareWrite(ctx, true); logicalrep_write_commit(ctx->out, txn, commit_lsn); OutputPluginWrite(ctx, true); @@ -405,6 +450,105 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContextReset(data->common.context); } + +/* Check if the given object is published. */ +static bool +is_object_published_ddl(LogicalDecodingContext *ctx, Oid objid) +{ + RelationSyncEntry *relentry; + PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; + + /* First check the DDL command filter. */ + switch (get_rel_relkind(objid)) { + case RELKIND_RELATION: + relentry = get_rel_sync_entry(data, objid); + + /* Only send this ddl if we publish ddl message. */ + if (ENABLE_PUBDDL_TYPE(relentry->pubactions.pubddl, PUBDDL_TABLE)) + return true; + + break; + + case RELKIND_SEQUENCE: + /* Send sequences */ + return NeedPublicationDDL(data, false); + default: + /* unsupported objects */ + return NeedPublicationDDL(data, true); + } + + return false; +} + +/* + * Send the decoded DDL message. + */ +static void +pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, Oid relid, DeparsedCommandType cmdtype, + Size sz, const char *message) +{ + PGOutputTxnData *txndata = (PGOutputTxnData *) txn->output_plugin_private; + + /* + * Check if the given object is published. Note that for dropped objects, + * we cannot get the required information from the catalog, so we skip the + * check for them. + */ + if (cmdtype != DCT_TableDropEnd && !is_object_published_ddl(ctx, relid)) { + return; + } + + switch (cmdtype) { + case DCT_TableDropStart: { + MemoryContext oldctx; + + init_txn_data(ctx, txn); + + txndata = (PGOutputTxnData *) txn->output_plugin_private; + + /* + * On DROP start, add the relid to a deleted_relid list if the + * relid is part of a publication that supports ddl + * publication. We need this because on DROP end, the relid + * will no longer be valid. Later on Drop end, verify that the + * drop is for a relid that is on the deleted_rid list, and + * only then send the ddl message. + */ + oldctx = MemoryContextSwitchTo(ctx->context); + txndata->deleted_relids = lappend_oid(txndata->deleted_relids, + relid); + MemoryContextSwitchTo(oldctx); + } + return; + case DCT_TableDropEnd: + if (!list_member_oid(txndata->deleted_relids, relid)) + return; + + txndata->deleted_relids = list_delete_oid(txndata->deleted_relids, + relid); + break; + + case DCT_SimpleCmd: + case DCT_ObjectDrop: + case DCT_ObjectCreate: + /* do nothing */ + break; + + default: + elog(ERROR, "unsupported type %d", cmdtype); + break; + } + + /* Send BEGIN if we haven't yet */ + pgoutput_begin_txn(ctx, txn); + + OutputPluginPrepareWrite(ctx, true); + logicalrep_write_ddl(ctx->out, message_lsn, prefix, sz, message); + OutputPluginWrite(ctx, true); +} + /* * Currently we always forward. */ @@ -497,6 +641,32 @@ static void init_rel_sync_cache(MemoryContext cachectx) CacheRegisterThreadSyscacheCallback(PUBLICATIONRELMAP, rel_sync_cache_publication_cb, (Datum)0); } +static bool NeedPublicationDDL(PGOutputData *data, bool needall) +{ + ListCell *lc; + + /* Reload publications if needed before use. */ + if (!t_thrd.publication_cxt.publications_valid) { + MemoryContext oldctx = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + if (data->publications) + list_free_deep(data->publications); + + data->publications = LoadPublications(data->publication_names); + MemoryContextSwitchTo(oldctx); + t_thrd.publication_cxt.publications_valid = true; + } + + foreach (lc, data->publications) { + Publication *pub = (Publication*)lfirst(lc); + if (pub->pubactions.pubddl == PUBDDL_ALL) { + return true; + } else if (!needall && ENABLE_PUBDDL_TYPE(pub->pubactions.pubddl, PUBDDL_TABLE)) { + return true; + } + } + return false; +} + static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, Oid relid) { List *pubids = GetRelationPublications(relid); @@ -521,6 +691,7 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O entry->pubactions.pubinsert = false; entry->pubactions.pubupdate = false; entry->pubactions.pubdelete = false; + entry->pubactions.pubddl = 0; foreach (lc, data->publications) { Publication *pub = (Publication *)lfirst(lc); @@ -551,9 +722,13 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O entry->pubactions.pubinsert |= pub->pubactions.pubinsert; entry->pubactions.pubupdate |= pub->pubactions.pubupdate; entry->pubactions.pubdelete |= pub->pubactions.pubdelete; + if (entry->pubactions.pubddl != PUBDDL_ALL) { + entry->pubactions.pubddl |= pub->pubactions.pubddl; + } } - if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && entry->pubactions.pubdelete) + if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && entry->pubactions.pubdelete && + pub->pubactions.pubddl == PUBDDL_ALL) break; } diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h index 01ae82d15..65d427f07 100644 --- a/src/include/access/rmgrlist.h +++ b/src/include/access/rmgrlist.h @@ -80,4 +80,5 @@ PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, replorigin_type_name) PG_RMGR(RM_COMPRESSION_REL_ID, "CompressionRelation", CfsShrinkRedo, CfsShrinkDesc, NULL, NULL, NULL, NULL, NULL, \ CfsShrinkTypeName) - +PG_RMGR(RM_LOGICALDDLMSG_ID, "LogicalDDLMessage", logicalddlmsg_redo, logicalddlmsg_desc, NULL, NULL, NULL, NULL, NULL, \ + logicalddlmsg_type_name) diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h index 6d79b2d26..8a2986b8e 100644 --- a/src/include/catalog/objectaddress.h +++ b/src/include/catalog/objectaddress.h @@ -23,6 +23,25 @@ typedef enum TrDropMode { RB_DROP_MODE_PHYSICAL = 2, } TrDropMode; +typedef struct { + Oid class_oid; /* oid of catalog */ + Oid oid_index_oid; /* oid of index on system oid column */ + int oid_catcache_id; /* id of catcache on system oid column */ + int name_catcache_id; /* id of catcache on (name,namespace), or + * (name) if the object does not live in a + * namespace */ + AttrNumber attnum_name; /* attnum of name field */ + AttrNumber attnum_namespace; /* attnum of namespace field */ + AttrNumber attnum_owner; /* attnum of owner field */ + AttrNumber attnum_acl; /* attnum of acl field */ + ObjectType objtype; /* OBJECT_* of this object type */ + bool is_nsp_name_unique; /* can the nsp/name combination (or name + * alone, if there's no namespace) be + * considered a unique identifier for an + * object of this class? */ + +} ObjectPropertyType; + #define TrDropModeIsAlreadySet(obj) ((obj)->rbDropMode != RB_DROP_MODE_INVALID) #define TrObjIsEqual(objA, objB) ((objA)->classId == (objB)->classId && (objA)->objectId == (objB)->objectId) #define TrObjIsEqualEx(_classId, _objectId, objB) ((_classId) == (objB)->classId && (_objectId) == (objB)->objectId) @@ -90,6 +109,7 @@ extern char *getObjectTypeDescription(const ObjectAddress *object); extern bool is_objectclass_supported(Oid class_id); extern HeapTuple get_catalog_object_by_oid(Relation catalog, Oid objectId); +extern HeapTuple get_catalog_object_by_oid(Relation catalog, AttrNumber oidcol, Oid objectId); extern char *getObjectIdentity(const ObjectAddress *address); extern int read_objtype_from_string(const char *objtype); extern char *getObjectIdentityParts(const ObjectAddress *address, diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index 139d7c4fb..6a71b9eae 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -19,6 +19,15 @@ #include "catalog/genbki.h" #include "catalog/objectaddress.h" +#include "nodes/pg_list.h" + +/* Publication trigger events */ +#define PUB_TRIG_DDL_CMD_END "ddl_command_end" +#define PUB_TRIG_DDL_CMD_START "ddl_command_start" + +/* Publication event trigger prefix */ +#define PUB_EVENT_TRIG_FORMAT "pg_deparse_trig_%s_%u" +#define PUB_EVENT_TRIG_PREFIX "pg_deparse_trig_" /* ---------------- * pg_publication definition. cpp turns this into @@ -26,6 +35,9 @@ * * ---------------- */ + +#define int8 int64 + #define PublicationRelationId 6130 #define PublicationRelation_Rowtype_Id 6141 CATALOG(pg_publication,6130) BKI_ROWTYPE_OID(6141) BKI_SCHEMA_MACRO @@ -48,8 +60,11 @@ CATALOG(pg_publication,6130) BKI_ROWTYPE_OID(6141) BKI_SCHEMA_MACRO /* true if deletes are published */ bool pubdelete; + + int8 pubddl; } FormData_pg_publication; +#undef int8 /* ---------------- * Form_pg_publication corresponds to a pointer to a tuple with @@ -63,18 +78,20 @@ typedef FormData_pg_publication *Form_pg_publication; * ---------------- */ -#define Natts_pg_publication 6 +#define Natts_pg_publication 7 #define Anum_pg_publication_pubname 1 #define Anum_pg_publication_pubowner 2 #define Anum_pg_publication_puballtables 3 #define Anum_pg_publication_pubinsert 4 #define Anum_pg_publication_pubupdate 5 #define Anum_pg_publication_pubdelete 6 +#define Anum_pg_publication_pubddl 7 typedef struct PublicationActions { bool pubinsert; bool pubupdate; bool pubdelete; + int64 pubddl; } PublicationActions; typedef struct Publication { @@ -84,6 +101,13 @@ typedef struct Publication { PublicationActions pubactions; } Publication; +#define PUBDDL_NONE 0 +#define PUBDDL_TABLE ((int64)1 << 0) +#define PUBDDL_ALL ((int64)0xFFFFFFFFFFFFFFFF) + +#define ENABLE_PUBDDL_TYPE(pub, obj) \ + ((bool)((int64)(pub) & (obj))) + extern Publication *GetPublicationByName(const char *pubname, bool missing_ok); extern List *GetRelationPublications(Oid relid); extern List *GetPublicationRelations(Oid pubid); diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 8b79927fd..a493c0472 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -54,13 +54,15 @@ CATALOG(pg_subscription,6126) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6128) BKI_SCHE * publisher to send data in binary */ text subskiplsn; /* All changes finished at this LSN are * skipped */ + bool submatchddlowner; /* True if replicated objects by DDL replication + * should match the original owner on the publisher */ #endif } FormData_pg_subscription; typedef FormData_pg_subscription *Form_pg_subscription; -#define Natts_pg_subscription 10 +#define Natts_pg_subscription 11 #define Anum_pg_subscription_subdbid 1 #define Anum_pg_subscription_subname 2 #define Anum_pg_subscription_subowner 3 @@ -71,7 +73,7 @@ typedef FormData_pg_subscription *Form_pg_subscription; #define Anum_pg_subscription_subpublications 8 #define Anum_pg_subscription_subbinary 9 #define Anum_pg_subscription_subskiplsn 10 - +#define Anum_pg_subscription_submatchddlowner 11 typedef struct Subscription { Oid oid; /* Oid of the subscription */ @@ -86,6 +88,8 @@ typedef struct Subscription { bool binary; /* Indicates if the subscription wants data in binary format */ XLogRecPtr skiplsn; /* All changes finished at this LSN are * skipped */ + bool matchddlowner; /* Indicated if replicated objects by DDL repllication + * shold match the original owner on th publisher */ } Subscription; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_921.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_921.sql new file mode 100644 index 000000000..205c722fe --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_921.sql @@ -0,0 +1,3 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_end() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_start() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pubddl_decode(pubddl bigint) CASCADE; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_921.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_921.sql new file mode 100644 index 000000000..205c722fe --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_921.sql @@ -0,0 +1,3 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_end() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_start() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pubddl_decode(pubddl bigint) CASCADE; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_921.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_921.sql new file mode 100644 index 000000000..c13984d65 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_921.sql @@ -0,0 +1,20 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_end() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4642; +CREATE FUNCTION pg_catalog.publication_deparse_ddl_command_end () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_ddl_command_end'; + +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_start() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4643; +CREATE FUNCTION pg_catalog.publication_deparse_ddl_command_start () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_ddl_command_start'; + +DROP FUNCTION IF EXISTS pg_catalog.pubddl_decode(pubddl bigint) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4648; +CREATE FUNCTION pg_catalog.pubddl_decode (pubddl bigint) +RETURNS text +LANGUAGE INTERNAL IMMUTABLE STRICT NOT FENCED +AS 'pubddl_decode'; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_921.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_921.sql new file mode 100644 index 000000000..c13984d65 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_921.sql @@ -0,0 +1,20 @@ +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_end() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4642; +CREATE FUNCTION pg_catalog.publication_deparse_ddl_command_end () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_ddl_command_end'; + +DROP FUNCTION IF EXISTS pg_catalog.publication_deparse_ddl_command_start() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4643; +CREATE FUNCTION pg_catalog.publication_deparse_ddl_command_start () +RETURNS event_trigger +LANGUAGE INTERNAL VOLATILE STRICT NOT FENCED +AS 'publication_deparse_ddl_command_start'; + +DROP FUNCTION IF EXISTS pg_catalog.pubddl_decode(pubddl bigint) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4648; +CREATE FUNCTION pg_catalog.pubddl_decode (pubddl bigint) +RETURNS text +LANGUAGE INTERNAL IMMUTABLE STRICT NOT FENCED +AS 'pubddl_decode'; \ No newline at end of file diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h index 5311fdc9d..a4f817fff 100644 --- a/src/include/commands/event_trigger.h +++ b/src/include/commands/event_trigger.h @@ -14,6 +14,7 @@ #define EVENT_TRIGGER_H #include "catalog/pg_event_trigger.h" +#include "lib/ilist.h" #include "nodes/parsenodes.h" #include "knl/knl_session.h" #include "catalog/objectaddress.h" @@ -55,6 +56,22 @@ typedef struct EventTriggerQueryState #define AT_REWRITE_ALTER_COMPRESSION 0x0B #define currentEventTriggerState ((EventTriggerQueryState*)(u_sess->exec_cxt.EventTriggerState)) + +/* Support for dropped objects */ +typedef struct SQLDropObject { + ObjectAddress address; + const char *schemaname; + const char *objname; + const char *objidentity; + const char *objecttype; + List *addrnames; + List *addrargs; + bool original; + bool normal; + bool istemp; + slist_node next; +} SQLDropObject; + /* * EventTriggerData is the node type that is passed as fmgr "context" info * when a function is called by the event trigger manager. diff --git a/src/include/commands/sequence.h b/src/include/commands/sequence.h index 4c2067555..9fbf00852 100644 --- a/src/include/commands/sequence.h +++ b/src/include/commands/sequence.h @@ -136,6 +136,20 @@ typedef struct xl_seq_rec { /* SEQUENCE TUPLE DATA FOLLOWS AT THE END */ } xl_seq_rec; +/* Information needed to define a sequence. */ +typedef struct sequence_values +{ + bool large; + char *sequence_name; + char *last_value; + char *start_value; + char *increment_by; + char *max_value; + char *min_value; + char *cache_value; + bool is_cycled; +} sequence_values; + /* * We don't want to log each fetching of a value from a sequence, * so we pre-log a few fetches in advance. In the event of @@ -178,6 +192,7 @@ extern void seq_desc(StringInfo buf, XLogReaderState* record); extern const char* seq_type_name(uint8 subtype); extern GTM_UUID get_uuid_from_rel(Relation rel); extern void lockNextvalOnCn(Oid relid); +extern sequence_values *get_sequence_values(Oid sequenceId); extern void get_sequence_params(Relation rel, int64* uuid, int64* start, int64* increment, int64* maxvalue, int64* minvalue, int64* cache, bool* cycle); diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index cef077333..00650021e 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -1148,6 +1148,7 @@ typedef struct CreateSeqStmt { bool canCreateTempSeq; /* create sequence when "create table (like )" */ bool is_large; bool missing_ok; /* skip error if a Sequence is exists */ + bool is_autoinc; } CreateSeqStmt; typedef struct AlterSeqStmt { @@ -1159,6 +1160,7 @@ typedef struct AlterSeqStmt { bool is_serial; /* Indicates if this sequence is part of SERIAL process */ #endif bool is_large; /* Indicates if this is a large or normal sequence */ + bool is_autoinc; } AlterSeqStmt; /* ---------------------- diff --git a/src/include/replication/ddlmessage.h b/src/include/replication/ddlmessage.h new file mode 100644 index 000000000..60135e9e6 --- /dev/null +++ b/src/include/replication/ddlmessage.h @@ -0,0 +1,60 @@ +/*------------------------------------------------------------------------- + * ddlmessage.h + * + * + * Copyright (c) 2023, PostgreSQL Global Development Group + * + * src/include/replication/ddlmessage.h + *------------------------------------------------------------------------- + */ +#ifndef PG_LOGICAL_DDL_MESSAGE_H +#define PG_LOGICAL_DDL_MESSAGE_H + +#include "access/xlog.h" +#include "access/xlogdefs.h" +#include "access/xlogreader.h" +#include "nodes/nodes.h" + + +/* + * Support for keeping track of deparsed commands. + */ +typedef enum DeparsedCommandType +{ + DCT_ObjectCreate, + DCT_ObjectDrop, + DCT_SimpleCmd, + DCT_TableDropEnd, + DCT_TableDropStart +} DeparsedCommandType; + +/* + * Generic logical decoding DDL message wal record. + */ +typedef struct xl_logical_ddl_message +{ + Oid dbId; /* database Oid emitted from */ + Size prefix_size; /* length of prefix including null terminator */ + Oid relid; /* id of the table */ + DeparsedCommandType cmdtype; /* type of sql command */ + Size message_size; /* size of the message */ + + /* + * payload, including null-terminated prefix of length prefix_size + */ + char message[FLEXIBLE_ARRAY_MEMBER]; +} xl_logical_ddl_message; + +#define SizeOfLogicalDDLMessage (offsetof(xl_logical_ddl_message, message)) + +extern XLogRecPtr LogLogicalDDLMessage(const char *prefix, Oid relid, DeparsedCommandType cmdtype, + const char *ddl_message, size_t size); + +/* RMGR API*/ +#define XLOG_LOGICAL_DDL_MESSAGE 0x00 +void logicalddlmsg_redo(XLogReaderState *record); +void logicalddlmsg_desc(StringInfo buf, XLogReaderState *record); +const char *logicalddlmsg_identify(uint8 info); +const char *logicalddlmsg_type_name(uint8 subtype); + +#endif diff --git a/src/include/replication/decode.h b/src/include/replication/decode.h index 17d934641..024bf71c4 100644 --- a/src/include/replication/decode.h +++ b/src/include/replication/decode.h @@ -14,6 +14,13 @@ #ifndef DECODE_H #define DECODE_H +typedef struct XLogRecordBuffer { + XLogRecPtr origptr; + XLogRecPtr endptr; + XLogReaderState *record; + char *record_data; +} XLogRecordBuffer; + #include "access/xlogreader.h" #include "replication/reorderbuffer.h" #include "replication/logical.h" @@ -33,7 +40,7 @@ void UpdateUndoBody(Size* addLenPtr, char* data, uint8 flag, uint32* toastLen); char *UpdateOldTupleCalc(bool isInplaceUpdate, XLogReaderState *r, char **tupleOld, Size *tuplelenOld, uint32* toastLen); void DecodeUHeapToastTuple(const char * toastData, Size len, ReorderBufferTupleBuf *tuple); - +extern void logicalddl_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); extern void ParallelDecodeWorkerMain(void* point); extern void LogicalReadWorkerMain(void* point); #endif diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index f74e9478a..aab505b16 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -53,6 +53,7 @@ typedef struct LogicalRepTupleData { #define LOGICALREP_COLUMN_UNCHANGED 'u' #define LOGICALREP_COLUMN_TEXT 't' #define LOGICALREP_COLUMN_BINARY 'b' /* added in PG14 */ +#define LOGICAL_REP_MSG_DDL 'L' typedef uint32 LogicalRepRelId; @@ -108,5 +109,8 @@ extern void logicalrep_write_typ(StringInfo out, Oid typoid); extern void logicalrep_read_typ(StringInfo out, LogicalRepTyp *ltyp); extern void logicalrep_write_conninfo(StringInfo out, char* conninfo); extern void logicalrep_read_conninfo(StringInfo in, char** conninfo); +extern void logicalrep_write_ddl(StringInfo out, XLogRecPtr lsn, + const char *prefix, Size sz, const char *message); +extern char *logicalrep_read_ddl(StringInfo in, XLogRecPtr *lsn, const char **prefix, Size *sz); #endif /* LOGICALREP_PROTO_H */ diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index c3b4535ae..0b19a8713 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -54,6 +54,18 @@ typedef void (*LogicalDecodeStartupCB)(struct LogicalDecodingContext* ctx, Outpu */ typedef void (*LogicalDecodeBeginCB)(struct LogicalDecodingContext* ctx, ReorderBufferTXN* txn); +/* + * Called for the logical decoding DDL message. + */ +typedef void (*LogicalDecodeDDLMessageCB) (struct LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, + Oid relid, + DeparsedCommandType cmdtype, + Size message_size, + const char *message); + /* * Callback for every individual change in a successful transaction. */ @@ -100,6 +112,7 @@ typedef struct OutputPluginCallbacks { LogicalDecodePrepareCB prepare_cb; LogicalDecodeShutdownCB shutdown_cb; LogicalDecodeFilterByOriginCB filter_by_origin_cb; + LogicalDecodeDDLMessageCB ddl_cb; } OutputPluginCallbacks; typedef struct ParallelOutputPluginCallbacks { diff --git a/src/include/replication/parallel_decode.h b/src/include/replication/parallel_decode.h index d3b68542d..6f0700356 100644 --- a/src/include/replication/parallel_decode.h +++ b/src/include/replication/parallel_decode.h @@ -44,13 +44,6 @@ #include "catalog/pg_control.h" -typedef struct XLogRecordBuffer { - XLogRecPtr origptr; - XLogRecPtr endptr; - XLogReaderState *record; - char *record_data; -} XLogRecordBuffer; - typedef enum { NOT_DECODE_THREAD, diff --git a/src/include/replication/pgoutput.h b/src/include/replication/pgoutput.h index 088e235af..546e0dbbc 100644 --- a/src/include/replication/pgoutput.h +++ b/src/include/replication/pgoutput.h @@ -24,6 +24,7 @@ typedef struct PGOutputData { List *publication_names; List *publications; + List *deleted_relids; bool binary; } PGOutputData; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index bd61898ce..8c8319bd3 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -16,6 +16,7 @@ #include "lib/ilist.h" #include "storage/sinval.h" +#include "replication/ddlmessage.h" #include "utils/hsearch.h" #include "utils/rel.h" @@ -71,6 +72,7 @@ enum ReorderBufferChangeType { REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT, REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID, REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID, + REORDER_BUFFER_CHANGE_DDL, REORDER_BUFFER_CHANGE_UINSERT, REORDER_BUFFER_CHANGE_UUPDATE, REORDER_BUFFER_CHANGE_UDELETE @@ -127,6 +129,14 @@ typedef struct ReorderBufferChange { CommitSeqNo snapshotcsn; } utp; + struct { + char *prefix; + Size message_size; + char *message; + Oid relid; + DeparsedCommandType cmdtype; + } ddl; + /* New snapshot, set when action == *_INTERNAL_SNAPSHOT */ Snapshot snapshot; @@ -294,6 +304,11 @@ typedef struct ReorderBufferTXN { */ dlist_node node; Size size; + + /* + * Private data pointer of the output plugin. + */ + void *output_plugin_private; } ReorderBufferTXN; /* so we can define the callbacks used inside struct ReorderBuffer itself */ @@ -309,6 +324,16 @@ typedef void (*ReorderBufferBeginCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); /* commit callback signature */ typedef void (*ReorderBufferCommitCB)(ReorderBuffer* rb, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +/* DDL message callback signature */ +typedef void (*ReorderBufferDDLMessageCB) (ReorderBuffer *rb, + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, + Oid relid, + DeparsedCommandType cmdtype, + Size sz, + const char *message); + /* abort callback signature */ typedef void (*ReorderBufferAbortCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); @@ -352,6 +377,7 @@ struct ReorderBuffer { ReorderBufferCommitCB commit; ReorderBufferAbortCB abort; ReorderBufferPrepareCB prepare; + ReorderBufferDDLMessageCB ddl; /* * Pointer that will be passed untouched to the callbacks. @@ -487,5 +513,6 @@ void ReorderBufferSetRestartPoint(ReorderBuffer*, XLogRecPtr ptr); void StartupReorderBuffer(void); void ReorderBufferClear(const char *slotname); - +extern void ReorderBufferQueueDDLMessage(LogicalDecodingContext *ctx, TransactionId xid, XLogRecPtr lsn, + const char*prefix, Size message_size, const char *message, Oid relid, DeparsedCommandType cmdtype); #endif diff --git a/src/include/tcop/ddldeparse.h b/src/include/tcop/ddldeparse.h new file mode 100644 index 000000000..4f44aabe9 --- /dev/null +++ b/src/include/tcop/ddldeparse.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * ddldeparse.h + * + * Portions Copyright (c) 2023, PostgreSQL Global Development Group + * + * src/include/tcop/ddldeparse.h + * + *------------------------------------------------------------------------- + */ +#ifndef DDL_DEPARSE_H +#define DDL_DEPARSE_H + +#include "tcop/deparse_utility.h" + +/* Context info needed for deparsing ddl command */ +typedef struct +{ + /* + * include_owner indicates if the owner/role of the command should be + * included in the deparsed Json output. It is set to false for any commands + * that don't CREATE database objects (ALTER commands for example), this is + * to avoid encoding and sending the owner to downstream for replay as it is + * unnecessary for such commands. + */ + bool include_owner; + + /* The maximum volatility of functions in expressions of a DDL command. */ + char max_volatility; +} ddl_deparse_context; + +extern char *deparse_utility_command(CollectedCommand *cmd, + ddl_deparse_context * context); +extern char *deparse_ddl_json_to_string(char *jsonb, char** owner); +extern char *deparse_drop_command(const char *objidentity, const char *objecttype, Node *parsetree); + +#endif /* DDL_DEPARSE_H */ diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h index b862b96dc..8bd7f3926 100644 --- a/src/include/tcop/deparse_utility.h +++ b/src/include/tcop/deparse_utility.h @@ -44,6 +44,7 @@ typedef struct CollectedCommand { CollectedCommandType type; bool in_extension; + char *role; Node *parsetree; union diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h index 9d9797b84..6c4cf880b 100644 --- a/src/include/tcop/utility.h +++ b/src/include/tcop/utility.h @@ -218,5 +218,6 @@ extern void ClearCreateSeqStmtUUID(CreateSeqStmt* stmt); extern void ClearCreateStmtUUIDS(CreateStmt* stmt); extern bool IsSchemaInDistribution(const Oid namespaceOid); extern Oid GetNamespaceIdbyRelId(const Oid relid); - +extern char *flatten_reloptions(Oid relid); +extern void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); #endif /* UTILITY_H */ diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index f276fa67d..593fe9037 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -24,6 +24,7 @@ #include "lib/stringinfo.h" #endif #include "utils/sortsupport.h" +#include "catalog/pg_index.h" /* * Defined in adt/ @@ -891,6 +892,10 @@ extern char* pg_get_triggerdef_string(Oid trigid); extern Datum pg_get_constraintdef(PG_FUNCTION_ARGS); extern Datum pg_get_constraintdef_ext(PG_FUNCTION_ARGS); extern char* pg_get_constraintdef_string(Oid constraintId); +extern char* pg_get_constraintdef_part_string(Oid constraintId); +extern char *pg_get_partkeydef_string(Relation relation); +extern void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, + StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubPartitionLocal); extern Datum pg_get_expr(PG_FUNCTION_ARGS); extern Datum pg_get_expr_ext(PG_FUNCTION_ARGS); extern Datum pg_get_userbyid(PG_FUNCTION_ARGS); @@ -909,7 +914,6 @@ extern void get_query_def(Query* query, StringInfo buf, List* parentnamespace, T bool qrw_phase = false, bool viewdef = false, bool is_fqs = false); extern char* deparse_create_sequence(Node* stmt, bool owned_by_none = false); extern char* deparse_alter_sequence(Node* stmt, bool owned_by_none = false); - #ifdef PGXC extern void get_hint_string(HintState* hstate, StringInfo buf); extern void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool finalise_aggs, bool sortgroup_colno, @@ -1874,7 +1878,7 @@ extern Datum dss_io_stat(PG_FUNCTION_ARGS); #endif extern char *pg_ultostr(char *str, uint32 value); extern char *pg_ultostr_zeropad(char *str, uint32 value, int32 minwidth); - +extern char *printTypmod(const char *typname, int32 typmod, Oid typmodout); /* float.cpp */ extern int float8_cmp_internal(float8 a, float8 b); diff --git a/src/include/utils/rel_gs.h b/src/include/utils/rel_gs.h index 130b1b5f8..2aea5ded9 100644 --- a/src/include/utils/rel_gs.h +++ b/src/include/utils/rel_gs.h @@ -314,6 +314,10 @@ static inline int8 get_indexsplit_from_reloptions(bytea *reloptions, Oid amoid) #define RelationGetCUDescRelId(r) ((r)->rd_rel->relcudescrelid) #define RelationGetDeltaRelId(r) ((r)->rd_rel->reldeltarelid) +#define IS_BTREE(r) ((r)->rd_rel->relam == BTREE_AM_OID || \ + (r)->rd_rel->relam == CBTREE_AM_OID || \ + (r)->rd_rel->relam == UBTREE_AM_OID) + /* * @Description: get columnar relation's compress level. * @IN rel: columnar heap relation diff --git a/src/test/regress/expected/replication_ddl_publication.out b/src/test/regress/expected/replication_ddl_publication.out new file mode 100644 index 000000000..1dca9cd41 --- /dev/null +++ b/src/test/regress/expected/replication_ddl_publication.out @@ -0,0 +1,93 @@ +create schema ddl_publication; +create table ddl_publication.t1 (a int); +create table ddl_publication.t2 (a int); +create table ddl_publication.t3 (a int); +create publication regress_pub1 for all tables with (publish='insert,update', ddl='all'); +create publication regress_pub2 for table ddl_publication.t1, ddl_publication.t2 with (publish='insert,update', ddl='table'); +create publication regress_pub3 for table ddl_publication.t1, ddl_publication.t2, ddl_publication.t3; +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; + pubname | puballtables | pubinsert | pubupdate | pubdelete | pubddl_decode +--------------+--------------+-----------+-----------+-----------+--------------- + regress_pub1 | t | t | t | f | all + regress_pub2 | f | t | t | f | table + regress_pub3 | f | t | t | t | none +(3 rows) + +select p.pubname, pubddl_decode(p.pubddl), t.evtfoid from pg_event_trigger t, pg_publication p where t.evtname='pg_deparse_trig_ddl_command_end_'|| p.oid and p.pubname ~ 'regress_pub' order by p.pubname; + pubname | pubddl_decode | evtfoid +--------------+---------------+--------- + regress_pub1 | all | 4642 + regress_pub2 | table | 4642 +(2 rows) + +drop publication regress_pub1, regress_pub2, regress_pub3; +create publication regress_pub for all tables with (ddl='all'); +SELECT * FROM pg_create_logical_replication_slot('test_decoding_slot', 'test_decoding'); + slotname | xlog_position +--------------------+--------------- +--? test_decoding_slot | .* +(1 row) + +begin; +create table ddl_publication.t4 (a int, b text); +create index ddl_publication.idx_t4_1 on ddl_publication.t4 (a); +insert into ddl_publication.t4 values (1, 'asd'), (2, 'asdd'); +commit; +begin; +create table ddl_publication.t5 (a int, b text); +drop table ddl_publication.t4; +rollback; +select data from pg_logical_slot_get_changes('test_decoding_slot', NULL, NULL); +--?.*data.* +--?.* +--? BEGIN .* +--? message: prefix deparse, relid .*, cmdtype: Simple, sz: .* content: {"fmt": "CREATE %{persistence}s TABLE %{if_not_exists}s %{identity}D (%{table_elements:, }s) WITH (%{with:, }s)", "with": [{"fmt": "%{label}s = %{value}L", "label": {"fmt": "%{label}I", "label": "orientation"}, "value": "row"}, {"fmt": "%{label}s = %{value}L", "label": {"fmt": "%{label}I", "label": "compression"}, "value": "no"}], "myowner": ".*", "identity": {"objname": "t4", "schemaname": "ddl_publication"}, "persistence": "", "if_not_exists": "", "table_elements": [{"fmt": "%{name}I %{coltype}T", "name": "a", "type": "column", "coltype": {"typmod": "", "typarray": false, "typename": "int4", "schemaname": "pg_catalog"}}, {"fmt": "%{name}I %{coltype}T COLLATE %{collate}D", "name": "b", "type": "column", "collate": {"objname": "default", "schemaname": "pg_catalog"}, "coltype": {"typmod": "", "typarray": false, "typename": "text", "schemaname": "pg_catalog"}}]}.*+ +--? decode to : CREATE TABLE ddl_publication.t4 (a pg_catalog.int4, b pg_catalog.text COLLATE pg_catalog."default") WITH (orientation = 'row', compression = 'no'), [owner .*] +--? message: prefix deparse, relid .*, cmdtype: Simple, sz: .* content: {"fmt": "CREATE %{unique}s INDEX %{concurrently}s %{name}I ON %{table}D USING %{index_am}s %{definition}s", "name": "idx_t4_1", "table": {"objname": "t4", "schemaname": "ddl_publication"}, "unique": "", "myowner": ".*", "index_am": "btree", "definition": "(a pg_catalog.int4_ops)", "concurrently": ""}.*+ +--? decode to : CREATE INDEX idx_t4_1 ON ddl_publication.t4 USING btree (a pg_catalog.int4_ops), [owner .*] + table ddl_publication.t4: INSERT: a[integer]:1 b[text]:'asd' + table ddl_publication.t4: INSERT: a[integer]:2 b[text]:'asdd' +--? COMMIT .* +(6 rows) + +begin; +insert into ddl_publication.t4 values (3, 'ddasd'), (4, 'asdd'); +drop table ddl_publication.t4; +commit; +select data from pg_logical_slot_get_changes('test_decoding_slot', NULL, NULL); +--?.*data.* +--?.* +--? BEGIN .* + table ddl_publication.t4: INSERT: a[integer]:3 b[text]:'ddasd' + table ddl_publication.t4: INSERT: a[integer]:4 b[text]:'asdd' +--? message: prefix deparse, relid .*, cmdtype: Drop table, sz: .* content: Drop table command start +--? message: prefix deparse, relid .*, cmdtype: Drop Table End, sz: .* content: {"fmt": "DROP %{objtype}s %{concurrently}s %{if_exists}s %{objidentity}s %{cascade}s", "cascade": "", "objtype": "table", "if_exists": "", "objidentity": "ddl_publication.t4", "concurrently": ""}.*+ + decode to : DROP table ddl_publication.t4 , [owner none] +--? COMMIT .* +(6 rows) + +select pg_drop_replication_slot('test_decoding_slot'); + pg_drop_replication_slot +-------------------------- + +(1 row) + +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; + pubname | puballtables | pubinsert | pubupdate | pubdelete | pubddl_decode +-------------+--------------+-----------+-----------+-----------+--------------- + regress_pub | t | t | t | t | all +(1 row) + +alter publication regress_pub set (ddl='table'); +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; + pubname | puballtables | pubinsert | pubupdate | pubdelete | pubddl_decode +-------------+--------------+-----------+-----------+-----------+--------------- + regress_pub | t | t | t | t | table +(1 row) + +drop publication regress_pub; +drop schema ddl_publication cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table ddl_publication.t1 +drop cascades to table ddl_publication.t2 +drop cascades to table ddl_publication.t3 diff --git a/src/test/regress/output/publication.source b/src/test/regress/output/publication.source index f1655df1f..c7234d93c 100644 --- a/src/test/regress/output/publication.source +++ b/src/test/regress/output/publication.source @@ -200,8 +200,8 @@ select pubname, tablename from pg_publication_tables where tablename='testpub_tb --- drop publication DROP PUBLICATION testpub_foralltables_rename; select * from pg_publication where pubname='testpub_foralltables_rename'; - pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete ----------+----------+--------------+-----------+-----------+----------- + pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete | pubddl +---------+----------+--------------+-----------+-----------+-----------+-------- (0 rows) DROP PUBLICATION IF EXISTS testpub_nonexists; diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index 225e5a09e..2200e3d85 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -1031,6 +1031,7 @@ test: reindex_concurrently_partition_parallel # publication and subscription, we need to record audit log for them, so seperate them into two test group test: publication test: subscription +test: replication_ddl_publication test: fdw_audit test: gs_global_config_audit test: detail declare_multiple_variable @@ -1102,4 +1103,4 @@ test: enable_expr_fusion_flatten # test for on update timestamp and generated column test: on_update_session1 on_update_session2 -test: ts_gb18030_utf8 +test: ts_gb18030_utf8 \ No newline at end of file diff --git a/src/test/regress/sql/replication_ddl_publication.sql b/src/test/regress/sql/replication_ddl_publication.sql new file mode 100644 index 000000000..a63150ccb --- /dev/null +++ b/src/test/regress/sql/replication_ddl_publication.sql @@ -0,0 +1,39 @@ +create schema ddl_publication; +create table ddl_publication.t1 (a int); +create table ddl_publication.t2 (a int); +create table ddl_publication.t3 (a int); + +create publication regress_pub1 for all tables with (publish='insert,update', ddl='all'); +create publication regress_pub2 for table ddl_publication.t1, ddl_publication.t2 with (publish='insert,update', ddl='table'); +create publication regress_pub3 for table ddl_publication.t1, ddl_publication.t2, ddl_publication.t3; +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; +select p.pubname, pubddl_decode(p.pubddl), t.evtfoid from pg_event_trigger t, pg_publication p where t.evtname='pg_deparse_trig_ddl_command_end_'|| p.oid and p.pubname ~ 'regress_pub' order by p.pubname; + +drop publication regress_pub1, regress_pub2, regress_pub3; +create publication regress_pub for all tables with (ddl='all'); +SELECT * FROM pg_create_logical_replication_slot('test_decoding_slot', 'test_decoding'); +begin; +create table ddl_publication.t4 (a int, b text); +create index ddl_publication.idx_t4_1 on ddl_publication.t4 (a); +insert into ddl_publication.t4 values (1, 'asd'), (2, 'asdd'); +commit; + +begin; +create table ddl_publication.t5 (a int, b text); +drop table ddl_publication.t4; +rollback; + +select data from pg_logical_slot_get_changes('test_decoding_slot', NULL, NULL); + +begin; +insert into ddl_publication.t4 values (3, 'ddasd'), (4, 'asdd'); +drop table ddl_publication.t4; +commit; +select data from pg_logical_slot_get_changes('test_decoding_slot', NULL, NULL); + +select pg_drop_replication_slot('test_decoding_slot'); +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; +alter publication regress_pub set (ddl='table'); +select pubname, puballtables, pubinsert, pubupdate, pubdelete, pubddl_decode(pubddl) from pg_publication order by pubname; +drop publication regress_pub; +drop schema ddl_publication cascade; diff --git a/src/test/subscription/env_utils.sh b/src/test/subscription/env_utils.sh index c0fa037d6..071299b91 100644 --- a/src/test/subscription/env_utils.sh +++ b/src/test/subscription/env_utils.sh @@ -35,6 +35,35 @@ function exec_sql(){ fi } +function exec_sql_with_user() { + local sql_user=$username + if [ -n "$test_username" ]; then + sql_user=$test_username + fi + result=$(gsql -U $sql_user -W $passwd -d $1 -p $2 -Atq -c "$3") + if [ "$result" != "" ]; then + echo "$result" + fi +} + +function exec_sql_file() { + local sql_user=$username + if [ -n "$test_username" ]; then + sql_user=$test_username + fi + + gsql -U $sql_user -W $passwd -d $1 -p $2 -Atq -f "$3" +} + +function exec_dump_db() { + echo "dump $1 to $3.." + if [ $# -eq 4 ]; then + gs_dump $1 -f $3 -p $2 + else + gs_dump $1 -f $3 -p $2 --no-publications --no-subscriptions + fi +} + function wait_for_subscription_sync(){ max_attempts=20 attempt=0 diff --git a/src/test/subscription/pubsub.py b/src/test/subscription/pubsub.py index 1f7ee145c..982eb86d4 100644 --- a/src/test/subscription/pubsub.py +++ b/src/test/subscription/pubsub.py @@ -242,13 +242,14 @@ def main(): pub_process = Process(target=real_run, args=(pub_ptdb,run_type,)) pub_process.start() + if pub_process.is_alive(): + pub_process.join() + sub_port_arr = [sub_node1_port, sub_node2_port, sub_node3_port]; sub_ptdb = Pterodb(datanode_num, sub_port_arr, data_dir, "sub_datanode"); sub_process = Process(target=real_run, args=(sub_ptdb,run_type,)) sub_process.start() - if pub_process.is_alive(): - pub_process.join() if sub_process.is_alive(): sub_process.join() diff --git a/src/test/subscription/run_check.sh b/src/test/subscription/run_check.sh index 90f1a8830..c3b324cfe 100644 --- a/src/test/subscription/run_check.sh +++ b/src/test/subscription/run_check.sh @@ -29,7 +29,7 @@ for line in `cat $1/schedule | grep -v ^#` do printf "%-50s" $line starttime=`date +"%Y-%m-%d %H:%M:%S"` - sh $1/testcase/$line.sh $1 $2 > $1/results/$line.log 2>&1 + sh $1/testcase/$line.sh $1 $2 $3 > $1/results/$line.log 2>&1 count=`expr $count + 1` endtime=`date +"%Y-%m-%d %H:%M:%S"` starttime1=`date -d "$starttime" +%s` diff --git a/src/test/subscription/schedule b/src/test/subscription/schedule index 0635e0941..77099239b 100644 --- a/src/test/subscription/schedule +++ b/src/test/subscription/schedule @@ -14,4 +14,6 @@ change_wal_level skiplsn disable pub_subconflict -bugs \ No newline at end of file +bugs +ddl_replication +dump \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication.sh b/src/test/subscription/testcase/ddl_replication.sh new file mode 100644 index 000000000..016c12d43 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +source $1/env_utils.sh $1 $2 $3 +case_db="ddl_repldb" +subscription_dir=$1 +base_port=$2 +sql_dir="${subscription_dir}/testcase/ddl_replication_sql/${dbcompatibility}" +results_dir="${subscription_dir}/results" +dump_result_dir="${results_dir}/dump_results" +mkdir -p $dump_result_dir + +function case_setup() { + local case_use_db=$1 + echo "create $case_use_db database and tables" + + exec_sql_with_user $db $pub_node1_port "CREATE DATABASE $case_use_db" + exec_sql_with_user $db $sub_node1_port "CREATE DATABASE $case_use_db" + + if [ -f "${case_use_db}.setup" ]; then + echo "execute setup ${case_use_db}.setup" + sh ${case_use_db}.setup $subscription_dir $base_port $case_use_db + fi + + # Setup logical replication + echo "create publication and subscription" + let ppp=pub_node1_port+1 + publisher_connstr="port=$ppp host=$g_local_ip dbname=$case_use_db user=$username password=$passwd" + exec_sql $case_use_db $pub_node1_port "CREATE PUBLICATION ${case_use_db}_pub FOR ALL TABLES with (ddl='all');" + + exec_sql $case_use_db $sub_node1_port "CREATE SUBSCRIPTION ${case_use_db}_sub CONNECTION '$publisher_connstr' PUBLICATION ${case_use_db}_pub" + + wait_for_subscription_sync $case_use_db $sub_node1_port "${case_use_db}_sub" +} + +function case_teardown() { + local case_use_db=$1 + + exec_sql $case_use_db $sub_node1_port "DROP SUBSCRIPTION IF EXISTS ${case_use_db}_sub" + exec_sql $case_use_db $pub_node1_port "DROP PUBLICATION IF EXISTS ${case_use_db}_pub" + + exec_sql $db $sub_node1_port "DROP DATABASE $case_use_db" + exec_sql $db $pub_node1_port "DROP DATABASE $case_use_db" + + echo "$case_use_db tear down" +} + +function run_test() { + local case_dir=$1 + local totalerr=0 + + cd $case_dir + for testcase in $(ls -1 *.sql) + do + echo "run $testcase" + local err=0 + cur_case=${testcase%%.sql} + + case_setup $cur_case + + exec_sql_file $cur_case $pub_node1_port "$case_dir/$testcase" + + wait_for_catchup $cur_case $pub_node1_port "${cur_case}_sub" + + if [ -f "${cur_case}.teardown" ]; then + echo "execute teardown ${cur_case}.teardown" + sh ${cur_case}.teardown $subscription_dir $base_port $cur_case + fi + + exec_dump_db $cur_case $pub_node1_port "$dump_result_dir/$cur_case.dump.pub" + exec_dump_db $cur_case $sub_node1_port "$dump_result_dir/$cur_case.dump.sub" + + diff $dump_result_dir/$cur_case.dump.pub $dump_result_dir/$cur_case.dump.sub > ${results_dir}/$cur_case.dump.diff + + if [ -s ${results_dir}/$cur_case.dump.diff ]; then + if [ -f $sql_dir/acceptable_diff/$cur_case.diff ]; then + diff ${results_dir}/$cur_case.dump.diff $sql_dir/acceptable_diff/$cur_case.diff > ${results_dir}/$cur_case.acceptable.diff + if [ -s ${results_dir}/$cur_case.acceptable.diff ]; then + err=1 + else + rm ${results_dir}/$cur_case.acceptable.diff + rm ${results_dir}/$cur_case.dump.diff + err=0 + fi + else + err=1 + fi + + if [ $err -eq 1 ];then + echo "$cur_case dump compare failed, see ${results_dir}/$cur_case.dump.diff" + totalerr=1 + fi + else + rm ${results_dir}/$cur_case.dump.diff + fi + + if [ $err -eq 0 ]; then + case_teardown $cur_case + fi + + done + + if [ $totalerr -eq 1 ];then + echo "$failed_keyword when check incremental data of new table is replicated" + fi + +} + +exec_sql $db $sub_node1_port "CREATE USER ddl_test_user PASSWORD '$passwd'" +exec_sql $db $pub_node1_port "CREATE USER ddl_test_user PASSWORD '$passwd'" +exec_sql $db $sub_node1_port "ALTER ROLE ddl_test_user sysadmin" +exec_sql $db $pub_node1_port "ALTER ROLE ddl_test_user sysadmin" + +export test_username="ddl_test_user" + +run_test $sql_dir \ No newline at end of file diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff b/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff new file mode 100644 index 000000000..8733e1f3c --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/acceptable_diff/create_table.diff @@ -0,0 +1,25 @@ +6105,6128d6104 +< -- Name: tab_foreign_child_col_a_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user +< -- +< +< ALTER TABLE tab_foreign_child +< ADD CONSTRAINT tab_foreign_child_col_a_fkey FOREIGN KEY (col_a, col_b) REFERENCES tab_foreign_parent(col_1, col_2); +< +< +< -- +< -- Name: tab_product2_currency_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user +< -- +< +< ALTER TABLE tab_product2 +< ADD CONSTRAINT tab_product2_currency_id_fkey FOREIGN KEY (currency_id) REFERENCES tab_currency(col_id); +< +< +< -- +< -- Name: tab_product_currency_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: ddl_test_user +< -- +< +< ALTER TABLE tab_product +< ADD CONSTRAINT tab_product_currency_id_fkey FOREIGN KEY (currency_id) REFERENCES tab_currency(col_id); +< +< +< -- diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/create_table.setup b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.setup new file mode 100644 index 000000000..e403c84b4 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.setup @@ -0,0 +1,10 @@ +#!/bin/bsh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE SCHEMA \"SCHEMA_TEST\"" +exec_sql_with_user $case_use_db $sub_node1_port "CREATE SCHEMA \"SCHEMA_TEST\"" + + diff --git a/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql new file mode 100644 index 000000000..39fa0676e --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/A/create_table.sql @@ -0,0 +1,822 @@ +-- +-- CREATE_TABLE +-- +-- +-- CLASS DEFINITIONS +-- +CREATE TABLE src(a int) with(autovacuum_enabled = off); +insert into src values(1); + +CREATE TABLE hobbies_r ( + name text, + person text +) with(autovacuum_enabled = off); + +CREATE TABLE equipment_r ( + name text, + hobby text +) with(autovacuum_enabled = off); + +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + +CREATE TABLE tenk2 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +) with(autovacuum_enabled = off); + + +CREATE TABLE person ( + name text, + age int4, + location point +); + + +CREATE TABLE emp ( + name text, + age int4, + location point, + salary int4, + manager name +) with(autovacuum_enabled = off); + + +CREATE TABLE student ( + name text, + age int4, + location point, + gpa float8 +); + + +CREATE TABLE stud_emp ( + name text, + age int4, + location point, + salary int4, + manager name, + gpa float8, + percent int4 +) with(autovacuum_enabled = off); + +CREATE TABLE city ( + name name, + location box, + budget city_budget +) with(autovacuum_enabled = off); + +CREATE TABLE dept ( + dname name, + mgrname text +) with(autovacuum_enabled = off); + +CREATE TABLE slow_emp4000 ( + home_base box +) with(autovacuum_enabled = off); + +CREATE TABLE fast_emp4000 ( + home_base box +) with(autovacuum_enabled = off); + +CREATE TABLE road ( + name text, + thepath path +); + +CREATE TABLE ihighway( + name text, + thepath path +) with(autovacuum_enabled = off); + +CREATE TABLE shighway ( + surface text, + name text, + thepath path +) with(autovacuum_enabled = off); + +CREATE TABLE real_city ( + pop int4, + cname text, + outline path +) with(autovacuum_enabled = off); + +-- +-- test the "star" operators a bit more thoroughly -- this time, +-- throw in lots of NULL fields... +-- +-- a is the type root +-- b and c inherit from a (one-level single inheritance) +-- d inherits from b and c (two-level multiple inheritance) +-- e inherits from c (two-level single inheritance) +-- f inherits from e (three-level single inheritance) +-- +CREATE TABLE a_star ( + class char, + a int4 +) with(autovacuum_enabled = off); + +CREATE TABLE b_star ( + b text, + class char, + a int4 +) with(autovacuum_enabled = off); + +CREATE TABLE c_star ( + c name, + class char, + a int4 +) with(autovacuum_enabled = off); + +CREATE TABLE d_star ( + d float8, + b text, + class char, + a int4, + c name +) with(autovacuum_enabled = off); + +CREATE TABLE e_star ( + e int2, + c name, + class char, + a int4 +) with(autovacuum_enabled = off); + +CREATE TABLE f_star ( + f polygon, + e int2, + c name, + class char, + a int4 +) with(autovacuum_enabled = off); + +CREATE TABLE aggtest ( + a int2, + b float4 +) with(autovacuum_enabled = off); + +CREATE TABLE hash_i4_heap ( + seqno int4, + random int4 +) with(autovacuum_enabled = off); + +CREATE TABLE hash_name_heap ( + seqno int4, + random name +) with(autovacuum_enabled = off); + +CREATE TABLE hash_txt_heap ( + seqno int4, + random text +) with(autovacuum_enabled = off); + +-- PGXC: Here replication is used to ensure correct index creation +-- when a non-shippable expression is used. +-- PGXCTODO: this should be removed once global constraints are supported +CREATE TABLE hash_f8_heap ( + seqno int4, + random float8 +) with(autovacuum_enabled = off) DISTRIBUTE BY REPLICATION; + +-- don't include the hash_ovfl_heap stuff in the distribution +-- the data set is too large for what it's worth +-- +-- CREATE TABLE hash_ovfl_heap ( +-- x int4, +-- y int4 +-- ); + +CREATE TABLE bt_i4_heap ( + seqno int4, + random int4 +) with(autovacuum_enabled = off); + +CREATE TABLE bt_name_heap ( + seqno name, + random int4 +) with(autovacuum_enabled = off); + +CREATE TABLE bt_txt_heap ( + seqno text, + random int4 +); + +CREATE TABLE bt_f8_heap ( + seqno float8, + random int4 +) with(autovacuum_enabled = off); + +CREATE TABLE array_op_test ( + seqno int4, + i int4[], + t text[] +) with(autovacuum_enabled = off); + +CREATE TABLE array_index_op_test ( + seqno int4, + i int4[], + t text[] +) with(autovacuum_enabled = off); + +CREATE TABLE IF NOT EXISTS test_tsvector( + t text, + a tsvector +) distribute by replication; + +CREATE TABLE IF NOT EXISTS test_tsvector( + t text +) with(autovacuum_enabled = off); + +CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK +INSERT INTO unlogged1 VALUES (42); +CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK +CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK +CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK +CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK +CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK +CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK +DROP TABLE unlogged1, public.unlogged2; + +-- +-- CREATE TABLE AS TEST CASE: Expect the column typemod info is not lost on DN +-- +CREATE TABLE hw_create_as_1test1(C_CHAR CHAR(102400)); +CREATE TABLE hw_create_as_1test2(C_CHAR) as SELECT C_CHAR FROM hw_create_as_1test1; +CREATE TABLE hw_create_as_1test3 (C_CHAR CHAR(102400)); + + +-- DROP TABLE hw_create_as_test2; +-- DROP TABLE hw_create_as_test3; +DROP TABLE hw_create_as_1test1; + +CREATE TABLE hw_create_as_2test1(C_INT int); +CREATE TABLE hw_create_as_2test2(C_INT) as SELECT C_INT FROM hw_create_as_2test1; +CREATE TABLE hw_create_as_2test3 (C_INT int); + + +-- DROP TABLE hw_create_as_test3; +-- DROP TABLE hw_create_as_test2; +DROP TABLE hw_create_as_2test1; + +CREATE TABLE hw_create_as_3test1(COL1 numeric(10,2)); +CREATE TABLE hw_create_as_3test2(COL1) as SELECT COL1 FROM hw_create_as_test1; +CREATE TABLE hw_create_as_3test3 (COL1 numeric(10,2)); + +-- DROP TABLE hw_create_as_test3; +-- DROP TABLE hw_create_as_test2; +DROP TABLE hw_create_as_3test1; + +CREATE TABLE hw_create_as_4test1(COL1 timestamp(1)); +CREATE TABLE hw_create_as_4test2(COL1) as SELECT COL1 FROM hw_create_as_4test1; +CREATE TABLE hw_create_as_4test3 (COL1 timestamp(1)); + +-- DROP TABLE hw_create_as_test3; +-- DROP TABLE hw_create_as_test2; +DROP TABLE hw_create_as_4test1; + +CREATE TABLE hw_create_as_5test1(COL1 int[2][2]); +CREATE TABLE hw_create_as_5test2(COL1) as SELECT COL1 FROM hw_create_as_5test1; +CREATE TABLE hw_create_as_5test3 (COL1 int[2][2]); + +-- DROP TABLE hw_create_as_test3; +-- DROP TABLE hw_create_as_test2; +DROP TABLE hw_create_as_5test1; + +create table hw_create_as_6test1(col1 int); +insert into hw_create_as_6test1 values(1); +insert into hw_create_as_6test1 values(2); +create table hw_create_as_6test2 as select * from hw_create_as_6test1 with no data; + +-- drop table hw_create_as_test1; +-- drop table hw_create_as_test2; +-- drop table hw_create_as_test3; + +CREATE TABLE hw_create_as_7test1(COL1 int); +insert into hw_create_as_7test1 values(1); +insert into hw_create_as_7test1 values(2); +CREATE TABLE hw_create_as_7test2 as SELECT '001' col1, COL1 col2 FROM hw_create_as_7test1; +select * from hw_create_as_7test2 order by 1, 2; + +-- DROP TABLE hw_create_as_test2; +-- DROP TABLE hw_create_as_test1; + +-- Zero column table is not supported any more. +CREATE TABLE zero_column_table_test1() DISTRIBUTE BY REPLICATION; +CREATE TABLE zero_column_table_test2() DISTRIBUTE BY ROUNDROBIN; + +CREATE TABLE zero_column_table_test3(a INT) DISTRIBUTE BY REPLICATION; +DROP TABLE zero_column_table_test3; + +CREATE FOREIGN TABLE zero_column_table_test4() SERVER gsmpp_server OPTIONS(format 'csv', location 'gsfs://127.0.0.1:8900/lineitem.data', delimiter '|', mode 'normal'); +CREATE FOREIGN TABLE zero_column_table_test5(a INT) SERVER gsmpp_server OPTIONS(format 'csv', location 'gsfs://127.0.0.1:8900/lineitem.data', delimiter '|', mode 'normal'); +ALTER FOREIGN TABLE zero_column_table_test5 DROP COLUMN a; +DROP FOREIGN TABLE zero_column_table_test5; + +CREATE TABLE zero_column_table_test6() with (orientation = column); +CREATE TABLE zero_column_table_test7() with (orientation = column) DISTRIBUTE BY ROUNDROBIN; +CREATE TABLE zero_column_table_test8(a INT) ; +insert into zero_column_table_test8 values (2),(3),(4),(5); +DROP TABLE zero_column_table_test6, zero_column_table_test8; + +--test create table of pg_node_tree type +create table pg_node_tree_tbl1(id int,name pg_node_tree); +create table pg_node_tree_tbl2 as select * from pg_type; +create table pg_node_tree_tbl3 as select * from pg_class; + +-- test unreserved keywords for table name +CREATE TABLE app(a int); +CREATE TABLE movement(a int); +CREATE TABLE pool(a int); +CREATE TABLE profile(a int); +CREATE TABLE resource(a int); +CREATE TABLE store(a int); +CREATE TABLE than(a int); +CREATE TABLE workload(a int); + +DROP TABLE app; +DROP TABLE movement; +DROP TABLE pool; +DROP TABLE profile; +DROP TABLE resource; +DROP TABLE store; +DROP TABLE than; +DROP TABLE workload; + +-- test orientation +CREATE TABLE orientation_test_1 (c1 int) WITH (orientation = column); +CREATE TABLE orientation_test_2 (c1 int) WITH (orientation = 'column'); +CREATE TABLE orientation_test_3 (c1 int) WITH (orientation = "column"); +CREATE TABLE orientation_test_4 (c1 int) WITH (orientation = row); +CREATE TABLE orientation_test_5 (c1 int) WITH (orientation = 'row'); +CREATE TABLE orientation_test_6 (c1 int) WITH (orientation = "row"); + +DROP TABLE orientation_test_1; +DROP TABLE orientation_test_2; +DROP TABLE orientation_test_3; +DROP TABLE orientation_test_4; +DROP TABLE orientation_test_5; +DROP TABLE orientation_test_6; + +CREATE TABLE "SCHEMA_TEST"."Table" ( + column1 bigint, + column2 bigint +); + + + +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL; +CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL; + + +CREATE TABLE interval_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) INTERVAL ('1 year') +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01') +); +CREATE INDEX interval_sales_idx1 ON interval_sales(product_id) LOCAL; +CREATE INDEX interval_sales_idx2 ON interval_sales(time_id) GLOBAL; + +INSERT INTO interval_sales VALUES (1,1,'2013-01-01','A',1,1,1); +INSERT INTO interval_sales VALUES (2,2,'2012-01-01','B',2,2,2); + + +CREATE TABLE list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) +( + PARTITION channel1 VALUES ('0', '1', '2'), + PARTITION channel2 VALUES ('3', '4', '5'), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') +); +CREATE INDEX list_sales_idx1 ON list_sales(product_id) LOCAL; +CREATE INDEX list_sales_idx2 ON list_sales(time_id) GLOBAL; + + +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) GLOBAL; + +-- test DATE TIME +CREATE TABLE tab_date_time ( + col_id INT4 PRIMARY KEY, + col_t1 TIME, + col_t2 TIME WITH TIME ZONE, + col_t3 DATE, + col_t4 TIMESTAMP, + col_t5 TIMESTAMP WITH TIME ZONE, + col_t6 INTERVAL HOUR TO MINUTE +) with(autovacuum_enabled = off); + +-- test column DEFAULT/SERIAL +CREATE TABLE tab_column_default ( + col_id INT PRIMARY KEY, + con_str TEXT NOT NULL DEFAULT 'THIS IS DEFAULT STRING', + col_time TIMESTAMP WITHOUT TIME ZONE DEFAULT now() +) with(autovacuum_enabled = off); + +CREATE TABLE tab_column_serial ( + col_id SERIAL PRIMARY KEY, + con_str TEXT NOT NULL DEFAULT 'THIS IS DEFAULT STRING', + col_time TIMESTAMP WITHOUT TIME ZONE DEFAULT now() +) with(autovacuum_enabled = off); + +CREATE TABLE tab_column_large_serial ( + col_id BIGSERIAL PRIMARY KEY, + con_str TEXT NOT NULL DEFAULT 'THIS IS DEFAULT STRING', + col_time TIMESTAMP WITHOUT TIME ZONE DEFAULT now() +) with(autovacuum_enabled = off); + +-- test GENERATED +CREATE TABLE tab_generated ( + col_id INT4 PRIMARY KEY, + first_name text, + last_name text, + full_name text GENERATED ALWAYS AS (first_name || ' ' || last_name) STORED +) with(autovacuum_enabled = off); + +-- test TEMPORARY TABLE +-- ON COMMIT DROP not support +CREATE TEMPORARY TABLE tab_temp_drop (col_id int primary key, col_name text) ON COMMIT DROP; +CREATE GLOBAL TEMPORARY TABLE tab_temp_drop2 (col_id int primary key, col_name text) ON COMMIT DROP; +CREATE TEMPORARY TABLE tab_temp_delete (col_id int primary key, col_name text) ON COMMIT DELETE ROWS; +CREATE GLOBAL TEMPORARY TABLE tab_temp_delete2 (col_id int primary key, col_name text) ON COMMIT DELETE ROWS; +CREATE TEMPORARY TABLE tab_temp_preserve (col_id int primary key, col_name text) ON COMMIT PRESERVE ROWS; +CREATE GLOBAL TEMPORARY TABLE tab_temp_preserve2 (col_id int primary key, col_name text) ON COMMIT PRESERVE ROWS; + +-- test CHECK +CREATE TABLE tab_unique (col_id INT4 PRIMARY KEY, name text UNIQUE NOT NULL) with(autovacuum_enabled = off); +CREATE TABLE tab_currency (col_id INT4 PRIMARY KEY, shortcut CHAR(3)) with(autovacuum_enabled = off); +CREATE TABLE tab_product (col_id INT4 PRIMARY KEY, name TEXT, currency_id INT REFERENCES tab_currency (col_id)) with(autovacuum_enabled = off); +CREATE TABLE tab_product2 (col_id INT4 PRIMARY KEY, name TEXT, currency_id INT REFERENCES tab_currency (col_id)) with(autovacuum_enabled = off); +CREATE TABLE tab_foreign_parent( + col_1 INT NOT NULL, + col_2 INT, + col_3 TEXT not null default 'THIS IS DEFALT STRING', +CONSTRAINT uk_tbl_unique_col_1_col_2 UNIQUE(col_1,col_2) +) with(autovacuum_enabled = off); +CREATE TABLE tab_foreign_child( + col_id INT PRIMARY KEY, + col_a INT, + col_b INT, + FOREIGN KEY (col_a, col_b) REFERENCES tab_foreign_parent (col_1,col_2) +) with(autovacuum_enabled = off); +-- EXCLUDE no support +CREATE TABLE t_exclude ( + col_id INT PRIMARY KEY, + name TEXT, + age INT , + addresses CHAR(50), + salary REAL, + EXCLUDE USING gist (name WITH =, age WITH <>) +) with(autovacuum_enabled = off); + +-- +-- CREATE_INDEX +-- Create ancillary data structures (i.e. indices) +-- + +-- +-- BTREE +-- +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); + +CREATE INDEX onek_unique2 ON onek USING btree(unique2 int4_ops); + +CREATE INDEX onek_hundred ON onek USING btree(hundred int4_ops); + +CREATE INDEX onek_stringu1 ON onek USING btree(stringu1 name_ops); + +CREATE INDEX tenk1_unique1 ON tenk1 USING btree(unique1 int4_ops); + +CREATE INDEX tenk1_unique2 ON tenk1 USING btree(unique2 int4_ops); + +CREATE INDEX tenk1_hundred ON tenk1 USING btree(hundred int4_ops); + +CREATE INDEX tenk1_thous_tenthous ON tenk1 (thousand, tenthous); + +CREATE INDEX tenk2_unique1 ON tenk2 USING btree(unique1 int4_ops); + +CREATE INDEX tenk2_unique2 ON tenk2 USING btree(unique2 int4_ops); + +CREATE INDEX tenk2_hundred ON tenk2 USING btree(hundred int4_ops); + +CREATE INDEX rix ON road USING btree (name text_ops); + +CREATE INDEX iix ON ihighway USING btree (name text_ops); + +CREATE INDEX six ON shighway USING btree (name text_ops); + + + +-- +-- BTREE ascending/descending cases +-- +-- we load int4/text from pure descending data (each key is a new +-- low key) and name/f8 from pure ascending data (each key is a new +-- high key). we had a bug where new low keys would sometimes be +-- "lost". +-- +CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops); + +CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops); + +CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops); + +CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops); + +-- +-- BTREE partial indices +-- +CREATE INDEX onek2_u1_prtl ON onek2 USING btree(unique1 int4_ops) + where unique1 < 20 or unique1 > 980; + +CREATE INDEX onek2_u2_prtl ON onek2 USING btree(unique2 int4_ops) + where stringu1 < 'B'; + +CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops) + where onek2.stringu1 >= 'J' and onek2.stringu1 < 'K'; + +-- +-- GiST (rtree-equivalent opclasses only) +-- +CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base); + +CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1); + +CREATE INDEX gcircleind ON circle_tbl USING gist (f1); + +CREATE TABLE POINT_TBL(f1 point); + +INSERT INTO POINT_TBL(f1) VALUES ('(0.0,0.0)'); + +INSERT INTO POINT_TBL(f1) VALUES ('(-10.0,0.0)'); + +INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)'); + +INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)'); + +INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)'); +INSERT INTO POINT_TBL(f1) VALUES (NULL); + +CREATE INDEX gpointind ON point_tbl USING gist (f1); + +CREATE TEMP TABLE gpolygon_tbl AS + SELECT polygon(home_base) AS f1 FROM slow_emp4000; +INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); + +CREATE TEMP TABLE gcircle_tbl AS + SELECT circle(home_base) AS f1 FROM slow_emp4000; + +CREATE INDEX ggpolygonind ON gpolygon_tbl USING gist (f1); + +CREATE INDEX ggcircleind ON gcircle_tbl USING gist (f1); + +-- +-- SP-GiST +-- + +CREATE TABLE quad_point_tbl AS + SELECT point(unique1,unique2) AS p FROM tenk1; + +INSERT INTO quad_point_tbl + SELECT '(333.0,400.0)'::point FROM generate_series(1,1000); + +INSERT INTO quad_point_tbl VALUES (NULL), (NULL), (NULL); + +CREATE INDEX sp_quad_ind ON quad_point_tbl USING spgist (p); + +CREATE TABLE kd_point_tbl AS SELECT * FROM quad_point_tbl; + +CREATE INDEX sp_kd_ind ON kd_point_tbl USING spgist (p kd_point_ops); + +CREATE TABLE suffix_text_tbl AS + SELECT name AS t FROM road WHERE name !~ '^[0-9]'; + +INSERT INTO suffix_text_tbl + SELECT 'P0123456789abcdef' FROM generate_series(1,1000); +INSERT INTO suffix_text_tbl VALUES ('P0123456789abcde'); +INSERT INTO suffix_text_tbl VALUES ('P0123456789abcdefF'); + +CREATE INDEX sp_suff_ind ON suffix_text_tbl USING spgist (t); + +CREATE INDEX intarrayidx ON array_index_op_test USING gin (i); + + +DROP INDEX intarrayidx, textarrayidx; + +CREATE INDEX botharrayidx ON array_index_op_test USING gin (i, t); + + +-- +-- HASH +-- +CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops); + +CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops); + +CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); + +CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops); + +-- CREATE INDEX hash_ovfl_index ON hash_ovfl_heap USING hash (x int4_ops); + + +-- +-- Test functional index +-- +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap (textcat(f1,f2)); + +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); + + +-- +-- Same test, expressional index +-- +DROP TABLE func_index_heap; +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) text_ops); + +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); + +-- +-- Also try building functional, expressional, and partial indexes on +-- tables that already contain data. +-- +create unique index hash_f8_index_1 on hash_f8_heap(abs(random)); +create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random); +create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000; + +-- +-- Try some concurrent index builds +-- +-- Unfortunately this only tests about half the code paths because there are +-- no concurrent updates happening to the table at the same time. + +CREATE TABLE concur_heap (f1 text, f2 text); +-- empty table +CREATE INDEX CONCURRENTLY concur_index1 ON concur_heap(f2,f1); +INSERT INTO concur_heap VALUES ('a','b'); +INSERT INTO concur_heap VALUES ('b','b'); +-- unique index +CREATE UNIQUE INDEX CONCURRENTLY concur_index2 ON concur_heap(f1); +-- check if constraint is set up properly to be enforced +INSERT INTO concur_heap VALUES ('b','x'); +-- check if constraint is enforced properly at build time +CREATE UNIQUE INDEX CONCURRENTLY concur_index3 ON concur_heap(f2); +-- test that expression indexes and partial indexes work concurrently +CREATE INDEX CONCURRENTLY concur_index4 on concur_heap(f2) WHERE f1='a'; +CREATE INDEX CONCURRENTLY concur_index5 on concur_heap(f2) WHERE f1='x'; +-- here we also check that you can default the index name +CREATE INDEX CONCURRENTLY on concur_heap((f2||f1)); + +-- You can't do a concurrent index build in a transaction +BEGIN; +CREATE INDEX CONCURRENTLY concur_index7 ON concur_heap(f1); +COMMIT; + +-- But you can do a regular index build in a transaction +BEGIN; +CREATE INDEX std_index on concur_heap(f2); +COMMIT; + +-- +-- Try some concurrent index drops +-- +DROP INDEX CONCURRENTLY "concur_index2"; -- works +DROP INDEX CONCURRENTLY IF EXISTS "concur_index2"; -- notice + +-- successes +DROP INDEX CONCURRENTLY IF EXISTS "concur_index3"; +DROP INDEX CONCURRENTLY "concur_index4"; +DROP INDEX CONCURRENTLY "concur_index5"; +DROP INDEX CONCURRENTLY "concur_index1"; +DROP INDEX CONCURRENTLY "concur_heap_expr_idx"; diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/create_table.setup b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.setup new file mode 100644 index 000000000..e403c84b4 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.setup @@ -0,0 +1,10 @@ +#!/bin/bsh + +source $1/env_utils.sh $1 $2 +subscription_dir=$1 +case_use_db=$3 + +exec_sql_with_user $case_use_db $pub_node1_port "CREATE SCHEMA \"SCHEMA_TEST\"" +exec_sql_with_user $case_use_db $sub_node1_port "CREATE SCHEMA \"SCHEMA_TEST\"" + + diff --git a/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql new file mode 100644 index 000000000..60fb85d61 --- /dev/null +++ b/src/test/subscription/testcase/ddl_replication_sql/B/create_table.sql @@ -0,0 +1,94 @@ +CREATE TABLE tab_auto_increment ( + col_id INT PRIMARY KEY AUTO_INCREMENT, + col_name TEXT +) AUTO_INCREMENT = 100 COMMENT = 'this is auto increment table'; + +CREATE TABLE tab_constraint ( + col_id INT PRIMARY KEY, + col_age INT, + CONSTRAINT check_age CHECK ( col_age > 18) +); + +CREATE TABLE tab_on_update1 ( + col_id INT, + col_name TEXT, + upd_time DATE DEFAULT CURRENT_DATE ON UPDATE CURRENT_DATE COMMENT 'update record date', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update2 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT CURRENT_TIME ON UPDATE CURRENT_TIME COMMENT 'update record time', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update3 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT CURRENT_TIME(2) ON UPDATE CURRENT_TIME(2) COMMENT 'update record time(2)', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update4 ( + col_id INT, + col_name TEXT, + upd_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update record timestamp', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update5 ( + col_id INT, + col_name TEXT, + upd_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(2) COMMENT 'update record timestamp(2)', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update6 ( + col_id INT, + col_name TEXT, + upd_time TIMESTAMP ON UPDATE CURRENT_TIMESTAMP + 1 COMMENT 'update record timestamp + 1', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update7 ( + col_id INT, + col_name TEXT, + upd_time TIMESTAMP ON UPDATE CURRENT_TIMESTAMP - 1 COMMENT 'update record timestamp - 1', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update8 ( + col_id INT, + col_name TEXT, + upd_time TIMESTAMP ON UPDATE CURRENT_TIMESTAMP + '1 hour' COMMENT 'update record timestamp - 1', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update9 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT LOCALTIME ON UPDATE LOCALTIME COMMENT 'update record local time', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update10 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT LOCALTIME(2) ON UPDATE LOCALTIME(2) COMMENT 'update record local time(2)', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update11 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT LOCALTIMESTAMP ON UPDATE LOCALTIMESTAMP COMMENT 'update record local timestamp', + PRIMARY KEY (col_id) +); + +CREATE TABLE tab_on_update12 ( + col_id INT, + col_name TEXT, + upd_time TIME DEFAULT LOCALTIMESTAMP(2) ON UPDATE LOCALTIMESTAMP(2) COMMENT 'update record local timestamp(2)', + PRIMARY KEY (col_id) +); \ No newline at end of file diff --git a/src/test/subscription/testcase/dump.sh b/src/test/subscription/testcase/dump.sh new file mode 100644 index 000000000..bfa24f700 --- /dev/null +++ b/src/test/subscription/testcase/dump.sh @@ -0,0 +1,78 @@ +#!/bin/sh + +source $1/env_utils.sh $1 $2 $3 + +case_db="dump_db" +subscription_dir=$1 +dump_expected_dir="${subscription_dir}/testcase/dump_expected" +results_dir="${subscription_dir}/results" +dump_result_dir="${results_dir}/dump_results" +mkdir -p $dump_result_dir + +function test_1() { + pub_ddl=$1 + echo "create database and tables." + exec_sql $db $pub_node1_port "CREATE DATABASE $case_db" + exec_sql $db $sub_node1_port "CREATE DATABASE $case_db" + + echo "create publication and subscription." + publisher_connstr="port=$pub_node1_port host=$g_local_ip dbname=$case_db user=$username password=$passwd" + + if [ "${pub_ddl}" = "all" ]; then + exec_sql $case_db $pub_node1_port "CREATE PUBLICATION mypub FOR ALL TABLES WITH (ddl='all')" + else + exec_sql $case_db $pub_node1_port "CREATE PUBLICATION mypub FOR ALL TABLES WITH (ddl='table')" + fi + + exec_sql $case_db $sub_node1_port "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub" + + # Wait for initial sync to finish + wait_for_subscription_sync $case_db $sub_node1_port + + exec_sql $case_db $pub_node1_port "CREATE TABLE test1 (a int, b text)" + + exec_sql $case_db $pub_node1_port "INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two')" + + wait_for_catchup $case_db $pub_node1_port "mysub" + + if [ "$(exec_sql $case_db $sub_node1_port "SELECT a, b FROM test1")" = "1|one +2|two" ]; then + echo "check initial data replicated to subscriber success" + else + echo "$failed_keyword when initial data replicated to subscriber" + exit 1 + fi + + exec_dump_db $case_db $pub_node1_port "$dump_result_dir/dump_db_pub${pub_ddl}.pub" "all" + diff $dump_result_dir/dump_db_pub${pub_ddl}.pub $dump_expected_dir/dump_db_pub${pub_ddl}.pub > ${dump_result_dir}/dump_pub${pub_ddl}_pub.diff + if [ -s ${dump_result_dir}/dump_puball_pub.diff ]; then + echo "$failed_keyword when dump publication" + exit 1 + else + echo "check publication dump data success" + fi + + exec_dump_db $case_db $sub_node1_port "$dump_result_dir/dump_db_pub${pub_ddl}.sub" "all" + diff $dump_result_dir/dump_db_pub${pub_ddl}.sub $dump_expected_dir/dump_db_pub${pub_ddl}.sub > ${dump_result_dir}/dump_pub${pub_ddl}_sub.diff --ignore-matching-lines='password=encryptOpt' + if [ -s ${dump_result_dir}/dump_pub${pub_ddl}_sub.diff ]; then + echo "$failed_keyword when dump subscription" + exit 1 + else + echo "check subscription dump data success" + fi +} + +function tear_down() { + exec_sql $case_db $sub_node1_port "DROP SUBSCRIPTION IF EXISTS mysub" + exec_sql $case_db $pub_node1_port "DROP PUBLICATION IF EXISTS mypub" + + exec_sql $db $sub_node1_port "DROP DATABASE $case_db" + exec_sql $db $pub_node1_port "DROP DATABASE $case_db" + + echo "tear down" +} + +test_1 'all' +tear_down +test_1 'table' +tear_down \ No newline at end of file diff --git a/src/test/subscription/testcase/dump_expected/dump_db_puball.pub b/src/test/subscription/testcase/dump_expected/dump_db_puball.pub new file mode 100644 index 000000000..d7febb574 --- /dev/null +++ b/src/test/subscription/testcase/dump_expected/dump_db_puball.pub @@ -0,0 +1,64 @@ +-- +-- openGauss database dump +-- + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET session_replication_role = replica; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: test1; Type: TABLE; Schema: public; Owner: gauss; Tablespace: +-- + +CREATE TABLE test1 ( + a integer, + b text +) +WITH (orientation=row, compression=no); + + +ALTER TABLE public.test1 OWNER TO gauss; + +-- +-- Data for Name: test1; Type: TABLE DATA; Schema: public; Owner: gauss +-- + +COPY test1 (a, b) FROM stdin; +1 one +2 two +\. +; + +-- +-- Name: mypub; Type: PUBLICATION; Schema: -; Owner: gauss +-- + +CREATE PUBLICATION mypub FOR ALL TABLES WITH (publish = 'insert, update, delete',ddl = 'all'); + + +ALTER PUBLICATION mypub OWNER TO gauss; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: gauss +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +REVOKE ALL ON SCHEMA public FROM gauss; +GRANT CREATE,USAGE ON SCHEMA public TO gauss; +GRANT USAGE ON SCHEMA public TO PUBLIC; + + +-- +-- openGauss database dump complete +-- + diff --git a/src/test/subscription/testcase/dump_expected/dump_db_puball.sub b/src/test/subscription/testcase/dump_expected/dump_db_puball.sub new file mode 100644 index 000000000..c5be0a5e4 --- /dev/null +++ b/src/test/subscription/testcase/dump_expected/dump_db_puball.sub @@ -0,0 +1,64 @@ +-- +-- openGauss database dump +-- + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET session_replication_role = replica; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: test1; Type: TABLE; Schema: public; Owner: gauss; Tablespace: +-- + +CREATE TABLE test1 ( + a integer, + b text +) +WITH (orientation=row, compression=no); + + +ALTER TABLE public.test1 OWNER TO gauss; + +-- +-- Data for Name: test1; Type: TABLE DATA; Schema: public; Owner: gauss +-- + +COPY test1 (a, b) FROM stdin; +1 one +2 two +\. +; + +-- +-- Name: mysub; Type: SUBSCRIPTION; Schema: -; Owner: gauss +-- + +CREATE SUBSCRIPTION mysub CONNECTION 'dbname=dump_db user=gauss password=encryptOptjE2MXqiXDNgPvWxcN5Aquu7xei4bJNFcxJodbu8jH3GC9i31q1H8plPam0fqj39x host=127.0.0.1,127.0.0.1,127.0.0.1 port=25800,25803,25806' PUBLICATION mypub WITH (enabled = false, slot_name = 'mysub'); + + +ALTER SUBSCRIPTION mysub OWNER TO gauss; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: gauss +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +REVOKE ALL ON SCHEMA public FROM gauss; +GRANT CREATE,USAGE ON SCHEMA public TO gauss; +GRANT USAGE ON SCHEMA public TO PUBLIC; + + +-- +-- openGauss database dump complete +-- + diff --git a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub new file mode 100644 index 000000000..0bf04ced5 --- /dev/null +++ b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.pub @@ -0,0 +1,64 @@ +-- +-- openGauss database dump +-- + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET session_replication_role = replica; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: test1; Type: TABLE; Schema: public; Owner: gauss; Tablespace: +-- + +CREATE TABLE test1 ( + a integer, + b text +) +WITH (orientation=row, compression=no); + + +ALTER TABLE public.test1 OWNER TO gauss; + +-- +-- Data for Name: test1; Type: TABLE DATA; Schema: public; Owner: gauss +-- + +COPY test1 (a, b) FROM stdin; +1 one +2 two +\. +; + +-- +-- Name: mypub; Type: PUBLICATION; Schema: -; Owner: gauss +-- + +CREATE PUBLICATION mypub FOR ALL TABLES WITH (publish = 'insert, update, delete',ddl = 'table'); + + +ALTER PUBLICATION mypub OWNER TO gauss; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: gauss +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +REVOKE ALL ON SCHEMA public FROM gauss; +GRANT CREATE,USAGE ON SCHEMA public TO gauss; +GRANT USAGE ON SCHEMA public TO PUBLIC; + + +-- +-- openGauss database dump complete +-- + diff --git a/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub new file mode 100644 index 000000000..c5be0a5e4 --- /dev/null +++ b/src/test/subscription/testcase/dump_expected/dump_db_pubtable.sub @@ -0,0 +1,64 @@ +-- +-- openGauss database dump +-- + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET session_replication_role = replica; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: test1; Type: TABLE; Schema: public; Owner: gauss; Tablespace: +-- + +CREATE TABLE test1 ( + a integer, + b text +) +WITH (orientation=row, compression=no); + + +ALTER TABLE public.test1 OWNER TO gauss; + +-- +-- Data for Name: test1; Type: TABLE DATA; Schema: public; Owner: gauss +-- + +COPY test1 (a, b) FROM stdin; +1 one +2 two +\. +; + +-- +-- Name: mysub; Type: SUBSCRIPTION; Schema: -; Owner: gauss +-- + +CREATE SUBSCRIPTION mysub CONNECTION 'dbname=dump_db user=gauss password=encryptOptjE2MXqiXDNgPvWxcN5Aquu7xei4bJNFcxJodbu8jH3GC9i31q1H8plPam0fqj39x host=127.0.0.1,127.0.0.1,127.0.0.1 port=25800,25803,25806' PUBLICATION mypub WITH (enabled = false, slot_name = 'mysub'); + + +ALTER SUBSCRIPTION mysub OWNER TO gauss; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: gauss +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +REVOKE ALL ON SCHEMA public FROM gauss; +GRANT CREATE,USAGE ON SCHEMA public TO gauss; +GRANT USAGE ON SCHEMA public TO PUBLIC; + + +-- +-- openGauss database dump complete +-- +