From 46e9fd015b5420247c8fe95a97ef30c01e96a071 Mon Sep 17 00:00:00 2001 From: yanghao Date: Tue, 7 Mar 2023 20:47:24 +0800 Subject: [PATCH] sync all inner code --- doc/src/sgml/ref/alter_schema.sgmlin | 4 +- doc/src/sgml/ref/alter_table.sgmlin | 2 +- doc/src/sgml/ref/create_schema.sgmlin | 4 +- doc/src/sgml/ref/create_table.sgmlin | 2 +- .../sgml/ref/create_table_partition.sgmlin | 2 +- .../sgml/ref/create_table_subpartition.sgmlin | 2 +- src/bin/pg_dump/pg_dump.cpp | 4 +- .../backend/catalog/gs_job_attribute.cpp | 16 ++- src/common/backend/catalog/pg_proc.cpp | 1 - src/common/backend/libpq/pqcomm.cpp | 3 + src/common/backend/nodes/equalfuncs.cpp | 1 + src/common/backend/nodes/outfuncs.cpp | 26 ++--- src/common/backend/nodes/readfuncs.cpp | 28 ++--- src/common/backend/parser/gram.y | 42 +++++-- src/common/backend/parser/parse_clause.cpp | 18 +-- src/common/backend/parser/parse_expr.cpp | 2 +- src/common/backend/parser/parse_utilcmd.cpp | 13 ++- src/common/backend/utils/adt/network.cpp | 3 +- src/common/backend/utils/adt/numeric.cpp | 9 +- src/common/backend/utils/adt/rangetypes.cpp | 2 - src/common/backend/utils/adt/ruleutils.cpp | 11 +- src/common/backend/utils/adt/varlena.cpp | 20 ++-- src/common/backend/utils/adt/xml.cpp | 5 +- .../backend/utils/misc/guc/guc_storage.cpp | 9 +- src/common/backend/utils/sort/tuplesort.cpp | 65 ++++++----- src/common/backend/utils/time/snapmgr.cpp | 1 - .../libpq/client_logic_fmt/numeric.cpp | 27 +++-- src/common/pl/plpgsql/src/gram.y | 8 +- src/common/pl/plpgsql/src/pl_comp.cpp | 19 +++- src/common/pl/plpgsql/src/pl_package.cpp | 10 +- .../cbb/instruments/event/instr_waitevent.cpp | 4 +- .../cbb/instruments/gs_stack/gs_stack.cpp | 54 +++++---- .../cbb/instruments/percentile/percentile.cpp | 2 + .../instruments/statement/instr_statement.cpp | 3 +- .../ddes/adapter/ss_dms_bufmgr.cpp | 5 +- .../ddes/adapter/ss_dms_callback.cpp | 10 +- .../ddes/adapter/ss_transaction.cpp | 2 +- src/gausskernel/optimizer/commands/copy.cpp | 17 ++- .../optimizer/commands/eventcmds.cpp | 21 ++++ .../optimizer/commands/explain.cpp | 15 ++- .../optimizer/commands/extension.cpp | 4 - .../optimizer/commands/indexcmds.cpp | 20 +++- .../optimizer/commands/tablecmds.cpp | 20 ++-- .../optimizer/commands/trigger.cpp | 1 - .../optimizer/commands/variable.cpp | 13 ++- src/gausskernel/optimizer/plan/createplan.cpp | 17 +-- .../optimizer/plan/planstartwith.cpp | 3 +- .../optimizer/rewrite/rewriteHandler.cpp | 1 + .../process/postmaster/autovacuum.cpp | 2 +- .../process/postmaster/barrier_arch.cpp | 1 + .../process/postmaster/barrier_creator.cpp | 1 + .../process/postmaster/barrier_preparse.cpp | 1 + .../process/postmaster/gaussdb_version.cpp | 3 +- .../process/postmaster/pagerepair.cpp | 2 +- .../process/postmaster/pagewriter.cpp | 3 +- .../process/postmaster/pgaudit.cpp | 1 + src/gausskernel/process/postmaster/pgstat.cpp | 2 +- src/gausskernel/runtime/executor/execQual.cpp | 9 +- .../runtime/executor/execTuples.cpp | 12 +- .../runtime/executor/execUtils.cpp | 2 +- src/gausskernel/runtime/executor/nodeHash.cpp | 62 +++++----- .../runtime/executor/nodeHashjoin.cpp | 11 +- .../runtime/executor/nodeIndexscan.cpp | 3 +- .../runtime/executor/nodeModifyTable.cpp | 4 +- .../runtime/executor/nodePartIterator.cpp | 2 +- .../runtime/executor/nodeSamplescan.cpp | 6 +- .../runtime/executor/nodeSubplan.cpp | 3 +- .../runtime/executor/nodeSubqueryscan.cpp | 1 - .../runtime/opfusion/opfusion_update.cpp | 8 +- .../vecexecutor/vecnode/vechashjoin.cpp | 1 + .../storage/access/common/heaptuple.cpp | 58 +++++----- .../storage/access/common/tupdesc.cpp | 13 ++- .../storage/access/redo/redo_segpage.cpp | 15 +-- .../storage/access/table/tableam.cpp | 25 ++--- .../storage/access/ubtree/ubtree.cpp | 1 + .../storage/access/ustore/knl_uheap.cpp | 24 ++-- .../storage/access/ustore/knl_utuple.cpp | 5 +- src/gausskernel/storage/buffer/bufmgr.cpp | 7 +- .../storage/cstore/cstore_insert.cpp | 8 +- src/gausskernel/storage/ipc/procarray.cpp | 6 +- src/gausskernel/storage/ipc/standby.cpp | 4 +- src/gausskernel/storage/lmgr/proc.cpp | 72 +++++------- src/gausskernel/storage/page/pageparse.cpp | 18 ++- .../storage/replication/logical/launcher.cpp | 1 + .../storage/replication/logical/worker.cpp | 1 + .../xlog_share_storage/xlog_share_storage.cpp | 1 + src/include/access/tableam.h | 42 ++++--- src/include/access/tupdesc.h | 3 +- .../rollback-post_catalog_maindb_92_844.sql | 1 + .../rollback-post_catalog_otherdb_92_844.sql | 1 + src/include/ddes/dms/dms_api.h | 1 - src/include/executor/executor.h | 47 ++++---- src/include/executor/tuptable.h | 5 +- src/include/instruments/gs_stack.h | 1 + src/include/knl/knl_thread.h | 3 +- src/include/miscadmin.h | 3 +- src/include/nodes/execnodes.h | 2 +- src/include/parser/parse_node.h | 4 +- src/include/storage/buf/buf_internals.h | 1 - src/include/storage/proc.h | 7 +- src/include/tcop/utility.h | 2 +- src/include/utils/numeric_gs.h | 24 ++-- src/include/utils/partitionmap_gs.h | 52 ++++----- src/include/utils/plpgsql.h | 2 +- src/test/regress/expected/alter_table_000.out | 2 +- .../expected/alter_table_modify_ustore.out | 36 +++--- src/test/regress/expected/event.out | 39 +++++++ .../expected/hw_subpartition_createtable.out | 2 +- .../expected/plpgsql_cursor_rowtype.out | 68 ++++++++++- .../regress/expected/plsql_show_all_error.out | 106 ++++++++++++++++++ .../expected/test_b_format_collate.out | 36 ++++++ .../regress/expected/test_ustore_index.out | 46 +++++++- src/test/regress/input/db4ai_snapshots.source | 8 ++ .../regress/input/select_into_file.source | 4 + .../regress/output/charset_b_format.source | 16 +-- .../regress/output/db4ai_snapshots.source | 13 +++ .../regress/output/event_dump_audit.source | 2 +- .../regress/output/select_into_file.source | 11 +- src/test/regress/parallel_schedule0 | 2 +- src/test/regress/parallel_schedule0U | 7 ++ src/test/regress/pg_regress.cpp | 2 +- .../regress/sql/alter_table_modify_ustore.sql | 18 +-- src/test/regress/sql/event.sql | 30 +++++ .../regress/sql/plpgsql_cursor_rowtype.sql | 55 +++++++++ src/test/regress/sql/plsql_show_all_error.sql | 43 +++++++ .../regress/sql/test_b_format_collate.sql | 28 +++++ src/test/regress/sql/test_ustore_index.sql | 31 ++++- 127 files changed, 1220 insertions(+), 556 deletions(-) diff --git a/doc/src/sgml/ref/alter_schema.sgmlin b/doc/src/sgml/ref/alter_schema.sgmlin index b2937f349..5fa767c59 100644 --- a/doc/src/sgml/ref/alter_schema.sgmlin +++ b/doc/src/sgml/ref/alter_schema.sgmlin @@ -16,8 +16,8 @@ ALTER SCHEMA schema_name OWNER TO new_owner; ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN; ALTER SCHEMA schema_name - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]; -NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database! + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]; +NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]' is only available in CENTRALIZED mode and B-format database! \ No newline at end of file diff --git a/doc/src/sgml/ref/alter_table.sgmlin b/doc/src/sgml/ref/alter_table.sgmlin index a6ea942da..a18b894ce 100755 --- a/doc/src/sgml/ref/alter_table.sgmlin +++ b/doc/src/sgml/ref/alter_table.sgmlin @@ -53,7 +53,7 @@ column_clause | ENCRYPTION KEY ROTATION | AUTO_INCREMENT [ = ] value | ALTER INDEX index_name [ VISBLE | INVISIBLE ] - | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ] + | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ] | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ] NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database! NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database! diff --git a/doc/src/sgml/ref/create_schema.sgmlin b/doc/src/sgml/ref/create_schema.sgmlin index fc1a9833c..dd3db609c 100644 --- a/doc/src/sgml/ref/create_schema.sgmlin +++ b/doc/src/sgml/ref/create_schema.sgmlin @@ -13,8 +13,8 @@ CREATE SCHEMA [ IF NOT EXISTS ] schema_name [ AUTHORIZATION user_name ] [WITH BLOCKCHAIN] [ schema_element [ ... ] ]; CREATE SCHEMA schema_name - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]; -NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database! + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]; +NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]' is only available in CENTRALIZED mode and B-format database! \ No newline at end of file diff --git a/doc/src/sgml/ref/create_table.sgmlin b/doc/src/sgml/ref/create_table.sgmlin index ed166b65c..25fc07121 100644 --- a/doc/src/sgml/ref/create_table.sgmlin +++ b/doc/src/sgml/ref/create_table.sgmlin @@ -17,7 +17,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI | LIKE source_table [ like_option [...] ] } [, ... ]) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] [ COMPRESS | NOCOMPRESS ] diff --git a/doc/src/sgml/ref/create_table_partition.sgmlin b/doc/src/sgml/ref/create_table_partition.sgmlin index 57ce3dea1..f4e2d0993 100644 --- a/doc/src/sgml/ref/create_table_partition.sgmlin +++ b/doc/src/sgml/ref/create_table_partition.sgmlin @@ -19,7 +19,7 @@ CREATE TABLE [ IF NOT EXISTS ] partition_table_name [, ... ] ] ) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ][ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ COMPRESS | NOCOMPRESS ] [ TABLESPACE tablespace_name ] diff --git a/doc/src/sgml/ref/create_table_subpartition.sgmlin b/doc/src/sgml/ref/create_table_subpartition.sgmlin index 509a328d4..de8a3e81d 100644 --- a/doc/src/sgml/ref/create_table_subpartition.sgmlin +++ b/doc/src/sgml/ref/create_table_subpartition.sgmlin @@ -18,7 +18,7 @@ CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name [, ... ] ) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ][ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ COMPRESS | NOCOMPRESS ] [ TABLESPACE tablespace_name ] diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index d6b731940..5d047e76c 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -21939,7 +21939,8 @@ static void dumpEvent(Archive *fout, EventInfo *einfo) if (einfo->evdefiner) { appendPQExpBuffer(query, "definer=%s ", einfo->evdefiner); } - appendPQExpBuffer(query, "EVENT %s.%s ON SCHEDULE ", fmtId(einfo->nspname), fmtId(einfo->evname)); + appendPQExpBuffer(query, "EVENT %s.", fmtId(einfo->nspname)); + appendPQExpBuffer(query, "%s ON SCHEDULE ", fmtId(einfo->evname)); if (strcmp(einfo->intervaltime, "null") != 0) { int interval_len = 10; char* begin_pos = einfo->intervaltime + interval_len; @@ -22005,6 +22006,7 @@ static void dumpEvent(Archive *fout, EventInfo *einfo) NULL, NULL); destroyPQExpBuffer(query); + destroyPQExpBuffer(delqry); } /* * dumpRlsPolicy diff --git a/src/common/backend/catalog/gs_job_attribute.cpp b/src/common/backend/catalog/gs_job_attribute.cpp index 668e5f7f8..f2c6dc40c 100644 --- a/src/common/backend/catalog/gs_job_attribute.cpp +++ b/src/common/backend/catalog/gs_job_attribute.cpp @@ -1214,7 +1214,7 @@ void create_job_1_internal(PG_FUNCTION_ARGS) Datum program_name = CStringGetTextDatum(c_program_name); pfree_ext(c_program_name); - static const short nrgs_job = 17; + static const short nrgs_job = 19; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); @@ -1239,6 +1239,8 @@ void create_job_1_internal(PG_FUNCTION_ARGS) fcinfo_job.arg[14] = job_action; /* job action */ fcinfo_job.arg[15] = job_type; /* job type */ fcinfo_job.argnull[16] = true; + fcinfo_job.argnull[17] = true; + fcinfo_job.argnull[18] = true; create_job_raw(&fcinfo_job); } @@ -1324,7 +1326,7 @@ void create_job_2_internal(PG_FUNCTION_ARGS) Datum enabled; get_program_info(program_name, &job_type, &job_action, &num_of_args, &enabled); - static const short nrgs_job = 17; + static const short nrgs_job = 19; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); @@ -1351,6 +1353,8 @@ void create_job_2_internal(PG_FUNCTION_ARGS) fcinfo_job.arg[14] = job_action; /* job action */ fcinfo_job.arg[15] = job_type; /* job type */ fcinfo_job.argnull[16] = true; + fcinfo_job.argnull[17] = true; + fcinfo_job.argnull[18] = true; create_job_raw(&fcinfo_job); } @@ -1376,7 +1380,7 @@ void create_job_3_internal(PG_FUNCTION_ARGS) Datum enabled; get_program_info(program_name, &job_type, &job_action, &num_of_args, &enabled); - static const short nrgs_job = 17; + static const short nrgs_job = 19; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); @@ -1403,6 +1407,8 @@ void create_job_3_internal(PG_FUNCTION_ARGS) fcinfo_job.arg[14] = job_action; /* job action */ fcinfo_job.arg[15] = job_type; /* job type */ fcinfo_job.argnull[16] = true; + fcinfo_job.argnull[17] = true; + fcinfo_job.argnull[18] = true; create_job_raw(&fcinfo_job); } @@ -1430,7 +1436,7 @@ void create_job_4_internal(PG_FUNCTION_ARGS) Datum program_name = CStringGetTextDatum(c_program_name); pfree_ext(c_program_name); - static const short nrgs_job = 17; + static const short nrgs_job = 19; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); @@ -1453,6 +1459,8 @@ void create_job_4_internal(PG_FUNCTION_ARGS) fcinfo_job.arg[14] = job_action; /* job action */ fcinfo_job.arg[15] = job_type; /* job type */ fcinfo_job.argnull[16] = true; + fcinfo_job.argnull[17] = true; + fcinfo_job.argnull[18] = true; create_job_raw(&fcinfo_job); } diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp index 6fe618e11..a2a947ad2 100644 --- a/src/common/backend/catalog/pg_proc.cpp +++ b/src/common/backend/catalog/pg_proc.cpp @@ -2324,7 +2324,6 @@ void delete_file_handle(const char* library_path) AutoMutexLock libraryLock(&file_list_lock); libraryLock.lock(); - char* fullname = expand_dynamic_library_name(library_path); for (file_scanner = file_list; file_scanner != NULL; file_scanner = file_scanner->next) { if (strncmp(fullname, file_scanner->filename, strlen(fullname) + 1) == 0) { diff --git a/src/common/backend/libpq/pqcomm.cpp b/src/common/backend/libpq/pqcomm.cpp index dc63ac6d1..757486aea 100644 --- a/src/common/backend/libpq/pqcomm.cpp +++ b/src/common/backend/libpq/pqcomm.cpp @@ -1049,12 +1049,15 @@ int StreamConnection(pgsocket server_fd, Port* port) int opval = 0; on = 1; +#ifndef USE_LIBNET + /* libnet not support SO_PROTOCOL in lwip */ socklen_t oplen = sizeof(opval); /* CommProxy Support */ if (comm_getsockopt(port->sock, SOL_SOCKET, SO_PROTOCOL, &opval, &oplen) < 0) { ereport(LOG, (errmsg("comm_getsockopt(SO_PROTOCOL) failed: %m"))); return STATUS_ERROR; } +#endif /* CommProxy Support */ if (comm_setsockopt(port->sock, IPPROTO_TCP, TCP_NODELAY, (char*)&on, sizeof(on)) < 0) { diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index 8c9f1e1f0..b607f1c6e 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -4435,6 +4435,7 @@ bool equal(const void* a, const void* b) retval = _equalIndexHintRelationData((IndexHintRelationData *)a, (IndexHintRelationData *)b); break; + default: ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized node type: %d", (int)nodeTag(a)))); diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 02a3f5ac5..5991c615b 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -155,24 +155,24 @@ } \ } while (0) -#define WRITE_BASE_TYPE_ARRAY(fldname, size, format) \ - do { \ - appendStringInfo(str, " :" CppAsString(fldname) " "); \ - if (size <= 0) { \ - appendStringInfo(str, "<>"); \ - } else { \ - for (int i = 0; i < size; i++) { \ - appendStringInfo(str, format, node->fldname[i]); \ - } \ - } \ - } while(0) +#define WRITE_BASE_TYPE_ARRAY(fldname, size, format) \ + do { \ + appendStringInfo(str, " :" CppAsString(fldname) " "); \ + if ((size) <= 0) { \ + appendStringInfo(str, "<>"); \ + } else { \ + for (int i = 0; i < (size); i++) { \ + appendStringInfo(str, (format), node->fldname[i]); \ + } \ + } \ + } while (0) #define WRITE_NODE_ARRAY(fldname, size) \ do { \ - if (node->fldname == nullptr || size <= 0) { \ + if (node->fldname == nullptr || (size) <= 0) { \ appendStringInfo(str, " :" CppAsString(fldname) " <>"); \ } else { \ - for (int i = 0; i < size; i++) { \ + for (int i = 0; i < (size); i++) { \ WRITE_NODE_FIELD(fldname[i]); \ } \ } \ diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 6b17100c4..f84bbae59 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -418,16 +418,16 @@ THR_LOCAL bool skip_read_extern_fields = false; securec_check(reterrno, "\0", "\0"); \ } while (0) -#define READ_NODE_ARRAY(fldname, size, itemtype) \ - do { \ - if (size <= 0) { \ - READ_NODE_FIELD(fldname); /* must be null */ \ - } else { \ - local_node->fldname = (itemtype*)palloc0(sizeof(itemtype) * (size)); \ - for (int i = 0; i < size; i++) { \ - READ_NODE_FIELD(fldname[i]); \ - } \ - } \ +#define READ_NODE_ARRAY(fldname, size, itemtype) \ + do { \ + if ((size) <= 0) { \ + READ_NODE_FIELD(fldname); /* must be null */ \ + } else { \ + local_node->fldname = (itemtype *)palloc0(sizeof(itemtype) * (size)); \ + for (int i = 0; i < (size); i++) { \ + READ_NODE_FIELD(fldname[i]); \ + } \ + } \ } while (0) /* Read a bitmapset field */ @@ -1461,8 +1461,8 @@ static RightRefState* _readRightRefState(Query* query) token = pg_strtok(&length); if (token == nullptr || token[0] != '}') { - ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("did not find '}' at end of RightRefState node"))); + ereport(ERROR, + (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("did not find '}' at end of RightRefState node"))); } READ_DONE(); @@ -1483,8 +1483,8 @@ static RightRefState* _readRightRefStateWrap(Query* query) token = pg_strtok(&length); /* left brace */ if (token == nullptr || token[0] != '{') { - ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("did not find '{' at end of RightRefState node"))); + ereport(ERROR, + (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("did not find '{' at end of RightRefState node"))); } token = pg_strtok(&length); /* read node name */ if (length != 13 && memcmp(token, "RIGHTREFSTATE", 13) != 0) { diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 5e585ea30..e65bf124f 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -218,6 +218,7 @@ static char *pg_strsep(char **stringp, const char *delim); static long long get_pid(const char *strsid); static Node *MakeAnonyBlockFuncStmt(int flag, const char * str); static CharsetCollateOptions* MakeCharsetCollateOptions(CharsetCollateOptions *options, CharsetCollateOptions *option); +static Node *checkNullNode(Node *n); #define TYPE_LEN 4 /* strlen("TYPE") */ #define DATE_LEN 4 /* strlen("DATE") */ #define DECLARE_LEN 9 /* strlen(" DECLARE ") */ @@ -9272,9 +9273,10 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("create_snapshot")), - lcons(makeStringConst($4->schemaname, @4), list_make4(makeStringConst($4->relname, @4), + lcons(checkNullNode(makeStringConst($4->schemaname, @4)), + list_make4(checkNullNode(makeStringConst($4->relname, @4)), makeAArrayExpr(list_make1(make_node_from_scanbuf(@10, yylloc , yyscanner)), @10), - makeStringConst($5, @5),makeStringConst($8, @8)))); + checkNullNode(makeStringConst($5, @5)), checkNullNode(makeStringConst($8, @8))))); } | CREATE OptTemp SNAPSHOT qualified_name OptSnapshotVersion /* PGXC_BEGIN */ @@ -9338,8 +9340,11 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("prepare_snapshot")), - lcons(makeStringConst($4->schemaname, @4), list_make4(makeStringConst($4->relname, @4), - makeAArrayExpr($13, @13), makeStringConst($5, @5), makeStringConst($10, @10)))); + lcons(checkNullNode(makeStringConst($4->schemaname, @4)), + list_make4(checkNullNode(makeStringConst($4->relname, @4)), + makeAArrayExpr($13, @13), + checkNullNode(makeStringConst($5, @5)), + checkNullNode(makeStringConst($10, @10))))); } | SAMPLE SNAPSHOT qualified_name SnapshotVersion OptSnapshotStratify SnapshotSampleList { @@ -9358,7 +9363,8 @@ SnapshotStmt: List *stratify = NIL; foreach_cell (c, $5) { ColumnRef *r = (ColumnRef*)lfirst(c); - stratify = lappend(stratify, makeStringConst(((Value *)llast(r->fields))->val.str, @5)); + stratify = lappend(stratify, + checkNullNode(makeStringConst(((Value *)llast(r->fields))->val.str, @5))); } List *names = NIL, *ratios = NIL, *comments = NIL; @@ -9372,8 +9378,8 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("sample_snapshot")), - lcons(makeStringConst($3->schemaname, @3), - lcons(makeStringConst($3->relname, @3), + lcons(checkNullNode(makeStringConst($3->schemaname, @3)), + lcons(checkNullNode(makeStringConst($3->relname, @3)), list_make4(makeAArrayExpr(names, -1), makeAArrayExpr(ratios, -1), (stratify == NIL) ? makeNullAConst(-1) : makeAArrayExpr(stratify, @5), makeAArrayExpr(comments, -1))))); @@ -9394,7 +9400,8 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("archive_snapshot")), - list_make2(makeStringConst($3->schemaname, @3), makeStringConst($3->relname, @3))); + list_make2(checkNullNode(makeStringConst($3->schemaname, @3)), + checkNullNode(makeStringConst($3->relname, @3)))); } | PUBLISH SNAPSHOT qualified_name SnapshotVersion { @@ -9412,7 +9419,8 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("publish_snapshot")), - list_make2(makeStringConst($3->schemaname, @3), makeStringConst($3->relname, @3))); + list_make2(checkNullNode(makeStringConst($3->schemaname, @3)), + checkNullNode(makeStringConst($3->relname, @3)))); } | PURGE SNAPSHOT qualified_name SnapshotVersion {} { @@ -9430,7 +9438,8 @@ SnapshotStmt: $$ = makeCallFuncStmt( list_make2(makeString("db4ai"), makeString("purge_snapshot")), - list_make2(makeStringConst($3->schemaname, @3), makeStringConst($3->relname, @3))); + list_make2(checkNullNode(makeStringConst($3->schemaname, @3)), + checkNullNode(makeStringConst($3->relname, @3)))); } ; @@ -31324,6 +31333,19 @@ static void CheckUserHostIsValid() ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("user@host is only supported in b database when the test_user_host is on"))); } +static Node *checkNullNode(Node *n) +{ + A_Const *c = (A_Const *)n; + A_Const *r = makeNode(A_Const); + if (c->val.val.str == NULL || strlen(c->val.val.str) == 0) + { + r->val.type = T_Null; + r->val.val.str = c->val.val.str; + r->location = c->location; + return (Node *)r; + } + return n; +} /* * Must undefine this stuff before including scan.c, since it has different * definitions for these macros. diff --git a/src/common/backend/parser/parse_clause.cpp b/src/common/backend/parser/parse_clause.cpp index 4e3ccac8c..27bdcaa2f 100644 --- a/src/common/backend/parser/parse_clause.cpp +++ b/src/common/backend/parser/parse_clause.cpp @@ -2369,11 +2369,12 @@ List* transformDistinctClause(ParseState* pstate, List** targetlist, List* sortC if (allowOrderbyExpr) { continue; } else { - ereport(ERROR, + ereport( + ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - is_agg ? errmsg("in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list") - : errmsg("for SELECT DISTINCT, ORDER BY expressions must appear in select list"), - parser_errposition(pstate, exprLocation((Node*)tle->expr)))); + is_agg ? errmsg("in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list") + : errmsg("for SELECT DISTINCT, ORDER BY expressions must appear in select list"), + parser_errposition(pstate, exprLocation((Node *)tle->expr)))); } } result = lappend(result, copyObject(scl)); @@ -2440,10 +2441,11 @@ static void CheckOrderbyColumns(ParseState* pstate, List* targetList, bool isAgg if (!isFound) { ereport(ERROR, - (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - isAggregate ? errmsg("in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list") - : errmsg("for SELECT DISTINCT, ORDER BY expressions must appear in select list"), - parser_errposition(pstate, colRef->location))); + (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), + isAggregate + ? errmsg("in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list") + : errmsg("for SELECT DISTINCT, ORDER BY expressions must appear in select list"), + parser_errposition(pstate, colRef->location))); } } diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index b2c222f89..c188cbdad 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -181,7 +181,7 @@ static Const* BuildColumnBaseValue(Form_pg_attribute attTup) if (IsBaseRightRefSupportType(attTup->atttypid)) { Datum datum = GetTypeZeroValue(attTup); return makeConst(attTup->atttypid, - attTup->atttypmod, + attTup->atttypmod, attTup->attcollation, attTup->attlen, datum, diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index 1561fa3f7..c5af06a4c 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -1978,6 +1978,17 @@ static void transformTableLikeClause( * table can have different column numbers. */ attmap = (AttrNumber*)palloc0(sizeof(AttrNumber) * tupleDesc->natts); + int colCount = list_length(cxt->columns); + for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) { + Form_pg_attribute attribute = &tupleDesc->attrs[parent_attno - 1]; + if (attribute->attisdropped && (!u_sess->attr.attr_sql.enable_cluster_resize || RelationIsTsStore(relation))) + continue; + if (attribute->attkvtype == ATT_KV_HIDE && table_like_clause->options != CREATE_TABLE_LIKE_ALL) { + continue; + } + colCount++; + attmap[parent_attno - 1] = colCount; + } /* * Insert the copied attributes into the cxt for the new table definition. @@ -2089,8 +2100,6 @@ static void transformTableLikeClause( */ cxt->columns = lappend(cxt->columns, def); - attmap[parent_attno - 1] = list_length(cxt->columns); - /* * Copy default, if present and the default has been requested */ diff --git a/src/common/backend/utils/adt/network.cpp b/src/common/backend/utils/adt/network.cpp index 094b7af87..d1adf4645 100644 --- a/src/common/backend/utils/adt/network.cpp +++ b/src/common/backend/utils/adt/network.cpp @@ -73,7 +73,7 @@ static int ip_addrsize(inet* inetptr) static inet* network_in(char* src, bool is_cidr, bool can_ignore = false) { int bits; - inet* dst = (inet*)palloc0(sizeof(inet));; + inet *dst = (inet *)palloc0(sizeof(inet)); bool should_reset_base = false; int level = can_ignore ? WARNING : ERROR; @@ -98,7 +98,6 @@ static inet* network_in(char* src, bool is_cidr, bool can_ignore = false) should_reset_base = true; } - /* * Error check: CIDR values must not have any bits set beyond the masklen. */ diff --git a/src/common/backend/utils/adt/numeric.cpp b/src/common/backend/utils/adt/numeric.cpp index 051e940c9..5a753ae05 100644 --- a/src/common/backend/utils/adt/numeric.cpp +++ b/src/common/backend/utils/adt/numeric.cpp @@ -5920,8 +5920,9 @@ static void div_var(NumericVar* var1, NumericVar* var2, NumericVar* result, int idivisor = idivisor * NBASE + var2->digits[1]; idivisor_weight--; } - if (var2->sign == NUMERIC_NEG) + if (var2->sign == NUMERIC_NEG) { idivisor = -idivisor; + } div_var_int(var1, idivisor, idivisor_weight, result, rscale, round); return; @@ -6182,8 +6183,9 @@ static void div_var_fast(NumericVar* var1, NumericVar* var2, NumericVar* result, idivisor = idivisor * NBASE + var2->digits[1]; idivisor_weight--; } - if (var2->sign == NUMERIC_NEG) + if (var2->sign == NUMERIC_NEG) { idivisor = -idivisor; + } div_var_int(var1, idivisor, idivisor_weight, result, rscale, round); return; @@ -6451,7 +6453,7 @@ div_var_int(const NumericVar *var, int ival, int ival_weight, NumericVar *result res_ndigits++; res_buf = digitbuf_alloc(res_ndigits + 1); - res_buf[0] = 0; /* spare digit for later rounding */ + res_buf[0] = 0; /* spare digit for later rounding */ res_digits = res_buf + 1; /* @@ -6465,7 +6467,6 @@ div_var_int(const NumericVar *var, int ival, int ival_weight, NumericVar *result * integer if this exceeds UINT_MAX. */ divisor = Abs(ival); - if (divisor <= UINT_MAX / NBASE) { /* carry cannot overflow 32 bits */ uint32 carry = 0; diff --git a/src/common/backend/utils/adt/rangetypes.cpp b/src/common/backend/utils/adt/rangetypes.cpp index 34144c884..88be58d86 100644 --- a/src/common/backend/utils/adt/rangetypes.cpp +++ b/src/common/backend/utils/adt/rangetypes.cpp @@ -1826,7 +1826,6 @@ static void range_parse(const char* string, bool can_ignore, char* flags, char** return; } - ptr = range_parse_bound(string, ptr, lbound_str, &infinite); if (infinite) *flags = (unsigned char)(*flags) | RANGE_LB_INF; @@ -1842,7 +1841,6 @@ static void range_parse(const char* string, bool can_ignore, char* flags, char** return; } - ptr = range_parse_bound(string, ptr, ubound_str, &infinite); if (infinite) *flags |= RANGE_UB_INF; diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index 74a3c3ded..4c8ff9ec4 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -3049,15 +3049,14 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) tgfbody = TextDatumGetCString(value); value = fastgetattr(ht_trig, Anum_pg_trigger_tgowner, tgrel->rd_att, &isnull); if (DatumGetObjectId(value) != GetUserId()) { - appendStringInfo( - &buf, "CREATE DEFINER = %s TRIGGER %s ", GetUserNameFromId(DatumGetObjectId(value)), quote_identifier(tgname)); + appendStringInfo(&buf, "CREATE DEFINER = %s TRIGGER %s ", GetUserNameFromId(DatumGetObjectId(value)), + quote_identifier(tgname)); } else { - appendStringInfo( - &buf, "CREATE TRIGGER %s ", quote_identifier(tgname)); + appendStringInfo(&buf, "CREATE TRIGGER %s ", quote_identifier(tgname)); } } else { - appendStringInfo( - &buf, "CREATE %sTRIGGER %s ", OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "", quote_identifier(tgname)); + appendStringInfo(&buf, "CREATE %sTRIGGER %s ", OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "", + quote_identifier(tgname)); } if (TRIGGER_FOR_BEFORE(trigrec->tgtype)) diff --git a/src/common/backend/utils/adt/varlena.cpp b/src/common/backend/utils/adt/varlena.cpp index 756a4db26..fb72c9bc3 100644 --- a/src/common/backend/utils/adt/varlena.cpp +++ b/src/common/backend/utils/adt/varlena.cpp @@ -1920,19 +1920,19 @@ int text_cmp(text* arg1, text* arg2, Oid collid) bool texteq_with_collation(PG_FUNCTION_ARGS) { - Datum arg1 = PG_GETARG_DATUM(0); - Datum arg2 = PG_GETARG_DATUM(1); - bool result = false; + Datum arg1 = PG_GETARG_DATUM(0); + Datum arg2 = PG_GETARG_DATUM(1); + bool result = false; - text* targ1 = DatumGetTextPP(arg1); - text* targ2 = DatumGetTextPP(arg2); + text* targ1 = DatumGetTextPP(arg1); + text* targ2 = DatumGetTextPP(arg2); - /* text_cmp return 0 means equal */ - result = (text_cmp(targ1, targ2, PG_GET_COLLATION()) == 0); - PG_FREE_IF_COPY(targ1, 0); - PG_FREE_IF_COPY(targ2, 1); + /* text_cmp return 0 means equal */ + result = (text_cmp(targ1, targ2, PG_GET_COLLATION()) == 0); + PG_FREE_IF_COPY(targ1, 0); + PG_FREE_IF_COPY(targ2, 1); - return result; + return result; } /* diff --git a/src/common/backend/utils/adt/xml.cpp b/src/common/backend/utils/adt/xml.cpp index 78db5adff..f7cbbaf01 100644 --- a/src/common/backend/utils/adt/xml.cpp +++ b/src/common/backend/utils/adt/xml.cpp @@ -526,7 +526,6 @@ xmltype* xmlelement(XmlExprState* xmlExpr, ExprContext* econtext) xmltype* result = NULL; List* named_arg_strings = NIL; List* arg_strings = NIL; - int i; ListCell* arg = NULL; ListCell* narg = NULL; PgXmlErrorContext* xmlerrcxt = NULL; @@ -540,7 +539,6 @@ xmltype* xmlelement(XmlExprState* xmlExpr, ExprContext* econtext) * terms. */ named_arg_strings = NIL; - i = 0; foreach (arg, xmlExpr->named_args) { ExprState* e = (ExprState*)lfirst(arg); Datum value; @@ -553,7 +551,6 @@ xmltype* xmlelement(XmlExprState* xmlExpr, ExprContext* econtext) else str = map_sql_value_to_xml_value(value, exprType((Node*)e->expr), false); named_arg_strings = lappend(named_arg_strings, str); - i++; } arg_strings = NIL; @@ -826,7 +823,7 @@ void pg_xml_init_library(void) */ if (sizeof(char) != sizeof(xmlChar)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("could not initialize XML library"), - errdetail("libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u.", + errdetail("libxml2 has incompatible char type: sizeof(char)=%d, sizeof(xmlChar)=%d.", (int)sizeof(char), (int)sizeof(xmlChar)))); #ifdef USE_LIBXMLCONTEXT diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 26f0f891e..e5bc80c3e 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -4902,8 +4902,6 @@ static int IsReplConnInfoChanged(const char* replConnInfo, const char* newval) char* temptok = NULL; char* toker = NULL; char* temp = NULL; - char* token = NULL; - char* tmpToken = NULL; char* oldReplStr = NULL; char* newReplStr = NULL; int repl_length = 0; @@ -4984,8 +4982,11 @@ static int IsReplConnInfoChanged(const char* replConnInfo, const char* newval) if (temptok == NULL) { /* Modify the replication info message, the new message does not carry disaster recovery information */ - token = strtok_r(oldReplStr, "d", &tmpToken); - if (strncasecmp(token, newReplStr, strlen(newReplStr)) == 0) { + if (strcmp(newReplStr, "") == 0) { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return REMOVE_DISASTER_RECOVERY_INFO; + } else if (strncasecmp(oldReplStr, newReplStr, strlen(newReplStr)) == 0) { pfree_ext(oldReplStr); pfree_ext(newReplStr); return NO_CHANGE; diff --git a/src/common/backend/utils/sort/tuplesort.cpp b/src/common/backend/utils/sort/tuplesort.cpp index 51afe2c1f..e11eeac68 100644 --- a/src/common/backend/utils/sort/tuplesort.cpp +++ b/src/common/backend/utils/sort/tuplesort.cpp @@ -197,8 +197,7 @@ typedef struct { #define SLAB_SLOT_SIZE 1024 -typedef union SlabSlot -{ +typedef union SlabSlot { union SlabSlot *nextfree; char buffer[SLAB_SLOT_SIZE]; } SlabSlot; @@ -244,13 +243,13 @@ struct Tuplesortstate { * tuples to return? */ bool boundUsed; /* true if we made use of a bounded heap */ int bound; /* if bounded, the maximum number of tuples */ - bool tuples; /* Can SortTuple.tuple ever be set? */ + bool tuples; /* Can SortTuple.tuple ever be set? */ int64 availMem; /* remaining memory available, in bytes */ int64 allowedMem; /* total memory allowed, in bytes */ int maxTapes; /* number of tapes (Knuth's T) */ int tapeRange; /* maxTapes-1 (Knuth's P) */ MemoryContext sortcontext; /* memory context holding all sort data */ - MemoryContext tuplecontext; /* memory context holding tuple data */ + MemoryContext tuplecontext; /* memory context holding tuple data */ LogicalTapeSet* tapeset; /* logtape.c object for tapes in a temp file */ #ifdef PGXC Oid current_xcnode; /* node from where we are got last tuple */ @@ -343,9 +342,9 @@ struct Tuplesortstate { */ bool slabAllocatorUsed; - char *slabMemoryBegin; /* beginning of slab memory arena */ - char *slabMemoryEnd; /* end of slab memory arena */ - SlabSlot *slabFreeHead; /* head of free list */ + char *slabMemoryBegin; /* beginning of slab memory arena */ + char *slabMemoryEnd; /* end of slab memory arena */ + SlabSlot *slabFreeHead; /* head of free list */ /* * When we return a tuple to the caller in tuplesort_gettuple_XXX, that @@ -522,17 +521,16 @@ struct Tuplesortstate { * Return the given tuple to the slab memory free list, or free it * if it was palloc'd. */ -#define RELEASE_SLAB_SLOT(state, tuple) \ - do { \ - SlabSlot *buf = (SlabSlot *) tuple; \ - \ - if (IS_SLAB_SLOT((state), buf)) \ - { \ - buf->nextfree = (state)->slabFreeHead; \ - (state)->slabFreeHead = buf; \ - } else \ - pfree(buf); \ - } while(0) +#define RELEASE_SLAB_SLOT(state, tuple) \ + do { \ + SlabSlot *buf = (SlabSlot *)(tuple); \ + \ + if (IS_SLAB_SLOT((state), buf)) { \ + buf->nextfree = (state)->slabFreeHead; \ + (state)->slabFreeHead = buf; \ + } else \ + pfree(buf); \ + } while (0) #define COMPARETUP(state, a, b) ((*(state)->comparetup)(a, b, state)) #define COPYTUP(state, stup, tup) ((*(state)->copytup)(state, stup, tup)) @@ -553,9 +551,9 @@ static bool LACKMEM(Tuplesortstate* state) { int64 usedMem = state->allowedMem - state->availMem; - if ((state->availMem < 0 && !state->slabAllocatorUsed) || - gs_sysmemory_busy(usedMem * state->dop, true)) + if ((state->availMem < 0 && !state->slabAllocatorUsed) || gs_sysmemory_busy(usedMem * state->dop, true)) { return true; + } return false; } @@ -811,7 +809,7 @@ static Tuplesortstate* tuplesort_begin_common(int64 workMem, bool randomAccess, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, - STANDARD_CONTEXT, + STANDARD_CONTEXT, workMem * 1024L); /* @@ -2456,8 +2454,7 @@ static void selectnewtape(Tuplesortstate* state) /* * Initialize the slab allocation arena, for the given number of slots. */ -static void -init_slab_allocator(Tuplesortstate *state, int numSlots) +static void init_slab_allocator(Tuplesortstate *state, int numSlots) { if (numSlots > 0) { char *p; @@ -2490,14 +2487,18 @@ init_slab_allocator(Tuplesortstate *state, int numSlots) * numInputTapes tapes, and one tape is used for output (unless we do an * on-the-fly final merge, in which case we don't have an output tape). */ -static void -init_tape_buffers(Tuplesortstate *state, int numInputTapes) +static void init_tape_buffers(Tuplesortstate *state, int numInputTapes) { int64 availBlocks; int64 blocksPerTape; int remainder; int tapenum; + if (numInputTapes == 0) { + elog(ERROR, "init_tape_buffers: numInputTapes can not be 0"); + return; + } + /* * Divide availMem evenly among the number of input tapes. */ @@ -2639,10 +2640,11 @@ static void mergeruns(Tuplesortstate* state) * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism * to track memory usage of individual tuples. */ - if (state->tuples) + if (state->tuples) { init_slab_allocator(state, numInputTapes + 1); - else + } else { init_slab_allocator(state, 0); + } /* * Use all the spare memory we have available for read buffers for the @@ -2763,8 +2765,9 @@ static void mergeruns(Tuplesortstate* state) state->status = TSS_SORTEDONTAPE; for (tapenum = 0; tapenum < state->maxTapes; tapenum++) { - if (tapenum != state->result_tape) + if (tapenum != state->result_tape) { LogicalTapeRewindForWrite(state->tapeset, tapenum); + } } } @@ -3410,9 +3413,9 @@ static void* readtup_alloc(Tuplesortstate *state, Size tuplen) */ Assert(state->slabFreeHead); - if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead) + if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead) { return MemoryContextAlloc(state->sortcontext, tuplen); - else { + } else { buf = state->slabFreeHead; /* Reuse this slot */ state->slabFreeHead = buf->nextfree; @@ -3563,7 +3566,7 @@ static void copytup_heap(Tuplesortstate* state, SortTuple* stup, void* tup) /* set up first-column key value */ htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET; htup.t_data = (HeapTupleHeader)((char*)tuple - MINIMAL_TUPLE_OFFSET); - original = tableam_tops_tuple_getattr(&htup, state->sortKeys[0].ssup_attno, state->tupDesc, &stup->isnull1); + original = tableam_tops_tuple_getattr(&htup, state->sortKeys[0].ssup_attno, state->tupDesc, &stup->isnull1); MemoryContextSwitchTo(oldcontext); diff --git a/src/common/backend/utils/time/snapmgr.cpp b/src/common/backend/utils/time/snapmgr.cpp index 7c78e5390..b910010d9 100644 --- a/src/common/backend/utils/time/snapmgr.cpp +++ b/src/common/backend/utils/time/snapmgr.cpp @@ -372,7 +372,6 @@ loop: } else { csn = SSTransactionIdGetCommitSeqNo(xid, true, true, false, snapshot, NULL); } - } else { csn = TransactionIdGetCommitSeqNo(xid, true, true, false, snapshot); } diff --git a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp index 6c2f57796..3b088b199 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp @@ -42,20 +42,20 @@ #define NUMERIC_IS_SHORT(n) (NUMERIC_FLAGBITS(n) == NUMERIC_SHORT) #define NUMERIC_SHORT_SIGN_MASK 0x2000 #define NUMERIC_DSCALE_MASK 0x3FFF -#define NUMERIC_SIGN(n) \ - (NUMERIC_HEADER_IS_SHORT(n) ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) : \ - NUMERIC_FLAGBITS(n)) -#define NUMERIC_DSCALE(n) \ - (NUMERIC_HEADER_IS_SHORT((n)) ? \ - ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT : \ - ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) +#define NUMERIC_SIGN(n) \ + (NUMERIC_HEADER_IS_SHORT(n) \ + ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) \ + : NUMERIC_FLAGBITS(n)) +#define NUMERIC_DSCALE(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT \ + : ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) - -#define NUMERIC_WEIGHT(n) \ - (NUMERIC_HEADER_IS_SHORT((n)) ? \ - (((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ - ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) : \ - ((n)->choice.n_long.n_weight)) +#define NUMERIC_WEIGHT(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ + ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) \ + : ((n)->choice.n_long.n_weight)) #define NUMERIC_DIGITS(num) (NUMERIC_HEADER_IS_SHORT(num) ? (num)->choice.n_short.n_data : (num)->choice.n_long.n_data) #define NUMERIC_64 0xD000 @@ -83,7 +83,6 @@ #define NUMERIC_NDIGITS(num) ((VARSIZE(num) - NUMERIC_HEADER_SIZE(num)) / sizeof(NumericDigit)) #define VARSIZE(PTR) VARSIZE_4B(PTR) -#define NUMERIC_HEADER_SIZE(n) (sizeof(uint16) + (((NUMERIC_FLAGBITS(n) & 0x8000) == 0) ? sizeof(int16) : 0)) #define NUMERIC_NB_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_BI_MASK) // nan or biginteger #define NUMERIC_IS_BI(n) (NUMERIC_NB_FLAGBITS(n) > NUMERIC_NAN) diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 2e9990a3a..2a783fa43 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -3514,11 +3514,17 @@ for_control : for_variable K_IN newp->argquery = read_cursor_args(cursor, K_LOOP, "LOOP"); + TupleDesc tupleDesc = NULL; + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ALLOW_PROCEDURE_COMPILE_CHECK && + cursor->cursor_explicit_expr->query != NULL) { + tupleDesc = getCursorTupleDesc(cursor->cursor_explicit_expr, false); + } /* create loop's private RECORD variable */ newp->rec = plpgsql_build_record($1.name, $1.lineno, - true); + true, + tupleDesc); $$ = (PLpgSQL_stmt *) newp; } diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index 0950c7452..a7da1738d 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -1048,11 +1048,11 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, errhint("The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead."))); } /* Add the record for referencing NEW */ - rec = plpgsql_build_record("new", 0, true); + rec = plpgsql_build_record("new", 0, true, NULL); func->new_varno = rec->dno; /* Add the record for referencing OLD */ - rec = plpgsql_build_record("old", 0, true); + rec = plpgsql_build_record("old", 0, true, NULL); func->old_varno = rec->dno; /* Add the variable tg_name */ @@ -3536,7 +3536,7 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ /* "record" type -- build a record variable */ PLpgSQL_rec* rec = NULL; - rec = plpgsql_build_record(refname, lineno, add2namespace); + rec = plpgsql_build_record(refname, lineno, add2namespace, NULL); rec->addNamespace = add2namespace; rec->varname = varname == NULL ? NULL : pstrdup(varname); result = (PLpgSQL_variable*)rec; @@ -3656,7 +3656,7 @@ PLpgSQL_variable* plpgsql_build_tableType(const char* refname, int lineno, PLpgS /* * Build empty named record variable, and optionally add it to namespace */ -PLpgSQL_rec* plpgsql_build_record(const char* refname, int lineno, bool add2namespace) +PLpgSQL_rec* plpgsql_build_record(const char* refname, int lineno, bool add2namespace, TupleDesc tupleDesc) { PLpgSQL_rec* rec = NULL; int varno; @@ -3666,7 +3666,8 @@ PLpgSQL_rec* plpgsql_build_record(const char* refname, int lineno, bool add2name rec->refname = pstrdup(refname); rec->lineno = lineno; rec->tup = NULL; - rec->tupdesc = NULL; + rec->tupdesc = tupleDesc; + rec->freetupdesc = (tupleDesc != NULL) ? true : false; rec->freetup = false; varno = plpgsql_adddatum((PLpgSQL_datum*)rec); char* pkgname = NULL; @@ -4909,6 +4910,10 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP PG_TRY(); { List* parsetreeList = pg_parse_query(expr->query); + if (parsetreeList == NULL) { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unexpected null parsetree list"))); + } ListCell* cell = NULL; List* queryList = NIL; foreach(cell, parsetreeList) { @@ -4938,6 +4943,10 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP queryList = pg_analyze_and_rewrite_params(parsetree, expr->query, (ParserSetupHook)plpgsql_parser_setup, (void*)expr); } + if (queryList == NULL) { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unexpected null query list"))); + } Query* query = (Query*)linitial(queryList); Assert(IsA(query, Query)); if (!isOnlyParse) { diff --git a/src/common/pl/plpgsql/src/pl_package.cpp b/src/common/pl/plpgsql/src/pl_package.cpp index 31806c865..e3b25c5c2 100644 --- a/src/common/pl/plpgsql/src/pl_package.cpp +++ b/src/common/pl/plpgsql/src/pl_package.cpp @@ -341,7 +341,15 @@ int plpgsql_pkg_add_unknown_var_to_namespace(List* name) bool isSamePkg = false; PLpgSQL_datum* datum = GetPackageDatum(name, &isSamePkg); if (datum != NULL) { - return plpgsql_build_pkg_variable(name, datum, isSamePkg); + /* + * The current memory context is temp context, when this function is called by yylex_inparam etc, + * so we should swtich to function context. + * If add package var, plpgsql_ns_additem will swtich to package context. + */ + MemoryContext oldCxt = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_cxt); + int varno = plpgsql_build_pkg_variable(name, datum, isSamePkg); + (void)MemoryContextSwitchTo(oldCxt); + return varno; } else { return -1; } diff --git a/src/gausskernel/cbb/instruments/event/instr_waitevent.cpp b/src/gausskernel/cbb/instruments/event/instr_waitevent.cpp index 5623788be..3bb1fcccc 100644 --- a/src/gausskernel/cbb/instruments/event/instr_waitevent.cpp +++ b/src/gausskernel/cbb/instruments/event/instr_waitevent.cpp @@ -33,8 +33,8 @@ #include "storage/lmgr.h" #include "workload/statctl.h" #include "instruments/instr_statement.h" -#include "instruments/instr_waitevent.h" #include "ddes/dms/ss_dms.h" +#include "instruments/instr_waitevent.h" const int MASK_CLASS_ID = 0xFF000000; const int MASK_EVENT_ID = 0x00FFFFFF; @@ -402,7 +402,7 @@ static void set_dms_event_tuple_value(WaitInfo* gsInstrWaitInfo, Datum* values, values[++i] = CStringGetTextDatum("DMS_EVENT"); values[++i] = CStringGetTextDatum(pgstat_get_wait_dms(WaitEventDMS(eventId + PG_WAIT_DMS))); unsigned long long cnt = 0; - unsigned long long time = 0; + unsigned long long time = 0; if (g_instance.dms_cxt.dmsInited) { dms_get_event(dms_wait_event_t(eventId), &cnt, &time); } diff --git a/src/gausskernel/cbb/instruments/gs_stack/gs_stack.cpp b/src/gausskernel/cbb/instruments/gs_stack/gs_stack.cpp index 0c732249e..09698371d 100644 --- a/src/gausskernel/cbb/instruments/gs_stack/gs_stack.cpp +++ b/src/gausskernel/cbb/instruments/gs_stack/gs_stack.cpp @@ -30,7 +30,7 @@ #include "utils/elf_parser.h" #include -#define MAX_LEN_KEY 20 +#define MAX_LEN_KEY 30 #define GS_STACK_HASHTBL "stack hash table" const int FINISH_STACK = 2; @@ -261,8 +261,11 @@ bool find_symbol_entry(const char *filename, HTAB *gs_stack_hashtbl, GsStackEntr char* base_name = basename((char*)filename); key_gs_stack.length_name = strlen(base_name); - n_ret = sprintf_s(key_gs_stack.elfname, MAX_LEN_KEY, "%s", base_name); - securec_check_ss(n_ret, "\0", "\0"); + n_ret = snprintf_s(key_gs_stack.elfname, MAX_LEN_KEY, MAX_LEN_KEY - 1, "%s", base_name); + if (n_ret < 0) { + /* if file name longer than keysize, we just choose first keysize byte from filename as key. */ + ereport(LOG, (errmsg("elf name %s is too long.", base_name))); + } *entry = (GsStackEntry *)hash_search(gs_stack_hashtbl, (const void*)(&key_gs_stack), HASH_ENTER, &found); @@ -421,28 +424,37 @@ void get_stack_according_to_tid(ThreadId tid, StringInfoData* call_stack return; } (void)LWLockAcquire(GsStackLock, LW_EXCLUSIVE); - init_backtrace_info(); + PG_TRY(); + { + init_backtrace_info(); - signal_child(tid, SIGURG, -1); - for (i = 0; i < MAX_WAIT; i++) { - if (ready_to_get_backtrace()) { - break; + signal_child(tid, SIGURG, -1); + for (i = 0; i < MAX_WAIT; i++) { + if (ready_to_get_backtrace()) { + break; + } + pg_usleep(US_PER_WAIT); } - pg_usleep(US_PER_WAIT); + ereport(LOG, (errmsg("wait %d times.", i))); + if (i == MAX_WAIT) { + ereport(WARNING, + (errmodule(MOD_GSSTACK), + errcode(ERRCODE_INVALID_STATUS), + (errmsg("can not get backtrace for thread %lu.", tid), + errdetail("This thread maybe finished," + "or the signal handler of this thread had not been registed.")))); + appendStringInfo(call_stack, "thread %lu not available\n", tid); + } else { + get_stack(call_stack); + } + finish_backtrace(); } - ereport(LOG, (errmsg("wait %d times.", i))); - if (i == MAX_WAIT) { - ereport(WARNING, - (errmodule(MOD_GSSTACK), - errcode(ERRCODE_INVALID_STATUS), - (errmsg("can not get backtrace for thread %lu.", tid), - errdetail("This thread maybe finished, or the signal handler of this thread had not been registed.")))); - appendStringInfo(call_stack, "thread %lu not available\n", tid); - } else { - get_stack(call_stack); + PG_CATCH(); + { + LWLockRelease(GsStackLock); + PG_RE_THROW(); } - - finish_backtrace(); + PG_END_TRY(); LWLockRelease(GsStackLock); } diff --git a/src/gausskernel/cbb/instruments/percentile/percentile.cpp b/src/gausskernel/cbb/instruments/percentile/percentile.cpp index b8af55149..85069f650 100755 --- a/src/gausskernel/cbb/instruments/percentile/percentile.cpp +++ b/src/gausskernel/cbb/instruments/percentile/percentile.cpp @@ -304,6 +304,7 @@ void PercentileSpace::calculatePercentileOfSingleNode(void) { (void)MemoryContextSwitchTo(oldcxt); pfree_ext(sqlRT); + LWLockReleaseAll(); FlushErrorState(); elog(WARNING, "Percentile job failed"); } @@ -323,6 +324,7 @@ void PercentileSpace::calculatePercentileOfMultiNode(void) /* free all handles */ release_pgxc_handles(t_thrd.percentile_cxt.pgxc_all_handles); t_thrd.percentile_cxt.pgxc_all_handles = NULL; + LWLockReleaseAll(); FlushErrorState(); elog(WARNING, "Percentile job failed"); } diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp index 5e0b7fe29..afccc4b6b 100755 --- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp @@ -990,6 +990,7 @@ static void SetupSignal(void) (void)gspqsignal(SIGTTOU, SIG_DFL); (void)gspqsignal(SIGCONT, SIG_DFL); (void)gspqsignal(SIGWINCH, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); @@ -2417,7 +2418,7 @@ static void get_wait_events_full_info(StatementStatContext *statement_stat, Stri event_idx = virt_event_idx - wait_event_io_event_max_index; event_str = pgstat_get_wait_dms(WaitEventDMS(event_idx + PG_WAIT_DMS)); event_type = STATEMENT_EVENT_TYPE_DMS; - }else if (virt_event_idx < wait_event_lock_event_max_index) { + } else if (virt_event_idx < wait_event_lock_event_max_index) { event_idx = virt_event_idx - wait_event_dms_event_max_index; event_str = GetLockNameFromTagType(event_idx); event_type = STATEMENT_EVENT_TYPE_LOCK; diff --git a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp index 4a0deec3c..f8323e2af 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_bufmgr.cpp @@ -221,10 +221,11 @@ void SmgrNetPageCheckDiskLSN(BufferDesc *buf_desc, ReadBufferMode read_mode, con SMGR_READ_STATUS rdStatus; if (pblk != NULL) { rdStatus = SmgrNetPageCheckRead(smgr->smgr_rnode.node.spcNode, smgr->smgr_rnode.node.dbNode, pblk->relNode, - buf_desc->tag.forkNum, pblk->block, (char *)temp_buf); + buf_desc->tag.forkNum, pblk->block, (char *)temp_buf); } else if (buf_desc->extra->seg_fileno != EXTENT_INVALID) { rdStatus = SmgrNetPageCheckRead(smgr->smgr_rnode.node.spcNode, smgr->smgr_rnode.node.dbNode, - buf_desc->extra->seg_fileno, buf_desc->tag.forkNum, buf_desc->extra->seg_blockno, (char *)temp_buf); + buf_desc->extra->seg_fileno, buf_desc->tag.forkNum, + buf_desc->extra->seg_blockno, (char *)temp_buf); } else { rdStatus = smgrread(smgr, buf_desc->tag.forkNum, buf_desc->tag.blockNum, (char *)temp_buf); } diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index c96897b3f..6fd448f32 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -683,11 +683,12 @@ static void CBVerifyPage(dms_buf_ctrl_t *buf_ctrl, char *new_page) if (buf_desc->extra->seg_fileno == EXTENT_INVALID) { buf_desc->extra->seg_fileno = buf_ctrl->seg_fileno; buf_desc->extra->seg_blockno = buf_ctrl->seg_blockno; - } else if (buf_desc->extra->seg_fileno != buf_ctrl->seg_fileno || buf_desc->extra->seg_blockno != buf_ctrl->seg_blockno) { + } else if (buf_desc->extra->seg_fileno != buf_ctrl->seg_fileno || + buf_desc->extra->seg_blockno != buf_ctrl->seg_blockno) { ereport(PANIC, (errmsg("[%u/%u/%u/%d/%d %d-%u] location mismatch, seg_fileno:%d, seg_blockno:%u", - buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, - buf_desc->tag.rnode.bucketNode, buf_desc->tag.rnode.opt, buf_desc->tag.forkNum, buf_desc->tag.blockNum, - buf_desc->extra->seg_fileno, buf_desc->extra->seg_blockno))); + buf_desc->tag.rnode.spcNode, buf_desc->tag.rnode.dbNode, buf_desc->tag.rnode.relNode, + buf_desc->tag.rnode.bucketNode, buf_desc->tag.rnode.opt, buf_desc->tag.forkNum, + buf_desc->tag.blockNum, buf_desc->extra->seg_fileno, buf_desc->extra->seg_blockno))); } /* page content is not valid */ @@ -1331,7 +1332,6 @@ static int CBFlushCopy(void *db_handle, char *pageid) LockBuffer(buffer, BUFFER_LOCK_SHARE); BufferDesc* buf_desc = GetBufferDescriptor(buffer - 1); XLogRecPtr pagelsn = BufferGetLSN(buf_desc); - if (XLByteLT(g_instance.dms_cxt.ckptRedo, pagelsn)) { dms_buf_ctrl_t *buf_ctrl = GetDmsBufCtrl(buffer - 1); buf_ctrl->state |= BUF_DIRTY_NEED_FLUSH; diff --git a/src/gausskernel/ddes/adapter/ss_transaction.cpp b/src/gausskernel/ddes/adapter/ss_transaction.cpp index d62ef10b4..36ef6cc15 100644 --- a/src/gausskernel/ddes/adapter/ss_transaction.cpp +++ b/src/gausskernel/ddes/adapter/ss_transaction.cpp @@ -229,7 +229,7 @@ void SSTransactionIdIsInProgress(TransactionId transactionId, bool *in_progress) { dms_context_t dms_ctx; InitDmsContext(&dms_ctx); - dms_ctx.xid_ctx.xid = *(uint64 *)(&transactionId); + dms_ctx.xid_ctx.xid = *(uint64 *)(&transactionId); do { dms_ctx.xid_ctx.inst_id = (unsigned char)SS_PRIMARY_ID; diff --git a/src/gausskernel/optimizer/commands/copy.cpp b/src/gausskernel/optimizer/commands/copy.cpp index 8f898ff44..2ad65be5b 100644 --- a/src/gausskernel/optimizer/commands/copy.cpp +++ b/src/gausskernel/optimizer/commands/copy.cpp @@ -463,6 +463,11 @@ static void CopySendData(CopyState cstate, const void* databuf, int datasize) appendBinaryStringInfo(cstate->fe_msgbuf, (const char*)databuf, datasize); } +static void CopySendDumpFileNullStr(CopyState cstate, const char* str) +{ + appendBinaryStringInfo(cstate->fe_msgbuf, str, 1); +} + void CopySendString(CopyState cstate, const char* str) { appendBinaryStringInfo(cstate->fe_msgbuf, str, strlen(str)); @@ -1686,6 +1691,12 @@ void ProcessCopyOptions(CopyState cstate, bool is_from, List* options) cstate->without_escaping = defGetBoolean(defel); noescapingSpecified = true; } else if (strcmp(defel->defname, "formatter") == 0) { + if (IS_PGXC_COORDINATOR || IS_SINGLE_NODE) { + if (cstate->fileformat != FORMAT_FIXED) { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("The formatter must be used together with the fixed length."))); + } + } TransformFormatter(cstate, (List*)(defel->arg)); } else if (strcmp(defel->defname, "fileheader") == 0) { if (cstate->headerFilename) @@ -3361,7 +3372,11 @@ void CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum* values, const bool* nul switch (cstate->fileformat) { case FORMAT_CSV: case FORMAT_TEXT: - CopySendString(cstate, cstate->null_print_client); + if (cstate->is_dumpfile) { + CopySendDumpFileNullStr(cstate, cstate->null_print_client); + } else { + CopySendString(cstate, cstate->null_print_client); + } break; case FORMAT_BINARY: CopySendInt32(cstate, -1); diff --git a/src/gausskernel/optimizer/commands/eventcmds.cpp b/src/gausskernel/optimizer/commands/eventcmds.cpp index d0fb20353..a7aee72ac 100755 --- a/src/gausskernel/optimizer/commands/eventcmds.cpp +++ b/src/gausskernel/optimizer/commands/eventcmds.cpp @@ -327,10 +327,29 @@ Datum TranslateArg(char *act_name, Node *act_node) return result; } +void CheckDefinerPriviledge(char *user_name) +{ + Oid user_oid = GetSysCacheOid1(AUTHNAME, CStringGetDatum(user_name)); + int init_user_id = 10; + if (user_oid != GetUserId()) { + if (g_instance.attr.attr_security.enablePrivilegesSeparate) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("definer_name cannot be specified when PrivilegesSeparate is enabled."))); + } else if (user_oid == init_user_id) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("definer_name cannot be specified as the initial user."))); + } else if (is_role_independent(user_oid) || isMonitoradmin(user_oid) || isOperatoradmin(user_oid)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("definer_name cannot be specified as a private user, operator admin, or monitoradmin."))); + } + } +} + Datum SetDefinerName(char *def_name, Datum program_name, char** definer_oid) { Datum curuser = get_priv_user(program_name, CharGetDatum(JOB_INTYPE_PLAIN)); if (def_name) { + CheckDefinerPriviledge(def_name); if (!superuser()) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("The current user does not have sufficient permissions to specify the definer."))); @@ -794,6 +813,8 @@ void AlterEventCommand(AlterEventStmt *stmt) /* Check if object is visible for current user. */ check_object_is_visible(ev_name, false); if (stmt->def_name) { + Value *ev_definer_node = (Value *)stmt->def_name->arg; + CheckDefinerPriviledge(ev_definer_node->val.str); if (!superuser()) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("The current user does not have sufficient permissions to specify the definer."))); diff --git a/src/gausskernel/optimizer/commands/explain.cpp b/src/gausskernel/optimizer/commands/explain.cpp index 841aa7989..f639fe019 100755 --- a/src/gausskernel/optimizer/commands/explain.cpp +++ b/src/gausskernel/optimizer/commands/explain.cpp @@ -216,7 +216,8 @@ static void show_datanode_time(ExplainState* es, PlanState* planstate); static void ShowStreamRunNodeInfo(Stream* stream, ExplainState* es); static void ShowRunNodeInfo(const ExecNodes* en, ExplainState* es, const char* qlabel); template -static void show_datanode_hash_info(ExplainState* es, int nbatch, int nbuckets_original, int nbatch_original, int nbuckets, long spacePeakKb); +static void show_datanode_hash_info(ExplainState *es, int nbatch, int nbuckets_original, int nbatch_original, + int nbuckets, long spacePeakKb); static void ShowRoughCheckInfo(ExplainState* es, Instrumentation* instrument, int nodeIdx, int smpIdx); static void show_hashAgg_info(AggState* hashaggstate, ExplainState* es); static void ExplainPrettyList(List* data, ExplainState* es); @@ -4321,7 +4322,8 @@ static void show_sort_info(SortState* sortstate, ExplainState* es) } template -static void show_datanode_hash_info(ExplainState* es, int nbatch, int nbuckets_original, int nbatch_original, int nbuckets, long spacePeakKb) +static void show_datanode_hash_info(ExplainState *es, int nbatch, int nbuckets_original, int nbatch_original, + int nbuckets, long spacePeakKb) { if (es->format != EXPLAIN_FORMAT_TEXT) { ExplainPropertyLong("Hash Buckets", nbuckets, es); @@ -4334,7 +4336,7 @@ static void show_datanode_hash_info(ExplainState* es, int nbatch, int nbuckets_o if (nbatch_original != nbatch) { appendStringInfo(es->planinfo->m_staticInfo->info_str, " Buckets: %d (originally %d) Batches: %d (originally %d) Memory Usage: %ldkB\n", - nbuckets, + nbuckets, nbuckets_original, nbatch, nbatch_original, @@ -4350,7 +4352,7 @@ static void show_datanode_hash_info(ExplainState* es, int nbatch, int nbuckets_o if (nbatch_original != nbatch) { appendStringInfo(es->str, " Buckets: %d (originally %d) Batches: %d (originally %d) Memory Usage: %ldkB\n", - nbuckets, + nbuckets, nbuckets_original, nbatch, nbatch_original, @@ -4924,10 +4926,11 @@ static void show_hash_info(HashState* hashstate, ExplainState* es) nbatch = hashinfo.nbatch; nbatch_original = hashinfo.nbatch_original; nbuckets = hashinfo.nbuckets; - if (es->analyze) + if (es->analyze) { nbuckets_original = hashtable ? hashtable->nbuckets_original : nbuckets; - else + } else { nbuckets_original = nbuckets; + } /* wlm_statistics_plan_max_digit: this variable is used to judge, isn't it a active sql */ if (es->wlm_statistics_plan_max_digit == NULL) { if (es->format == EXPLAIN_FORMAT_TEXT) diff --git a/src/gausskernel/optimizer/commands/extension.cpp b/src/gausskernel/optimizer/commands/extension.cpp index c9907e6a4..805c07d0b 100644 --- a/src/gausskernel/optimizer/commands/extension.cpp +++ b/src/gausskernel/optimizer/commands/extension.cpp @@ -58,10 +58,6 @@ #include "utils/snapmgr.h" #include "access/heapam.h" -#if (!defined(ENABLE_MULTIPLE_NODES)) && (!defined(ENABLE_PRIVATEGAUSS)) -extern void InitBSqlPluginHookIfNeeded(); -#endif - /* Globally visible state variables */ THR_LOCAL bool creating_extension = false; diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index 07ad87841..c81ed349d 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -474,15 +474,18 @@ static bool CheckLedgerIndex_walker(Node* node, int* context) { if (IsA(node, Var)) { Var *var = (Var*)node; - if (var->varattno == *context) + if (var->varattno == *context) { return true; + } } if (IsA(node, IndexElem)) { IndexElem *elem = (IndexElem *)node; - if (elem->name && strcmp(elem->name, "hash") == 0) + if (elem->name && strcmp(elem->name, "hash") == 0) { return true; - if (elem->expr) + } + if (elem->expr) { return CheckLedgerIndex_walker(elem->expr, context); + } return false; } @@ -1804,6 +1807,13 @@ ObjectAddress DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, return address; } + if (RELATION_IS_PARTITIONED(rel)) { + releasePartitionOidList(&partitionOidList); + } + if (RelationIsSubPartitioned(rel)) { + ReleaseSubPartitionOidList(&subPartitionOidList); + } + /* Roll back any GUC changes executed by index functions. */ AtEOXact_GUC(false, root_save_nestlevel); @@ -5376,11 +5386,11 @@ static void CheckIndexParamsNumber(IndexStmt* stmt) { } } - static bool CheckIdxParamsOwnPartKey(Relation rel, const List* indexParams) { - if (!PartExprKeyIsNull(rel, NULL)) + if (!PartExprKeyIsNull(rel, NULL)) { return false; + } int2vector* partKey = ((RangePartitionMap*)rel->partMap)->partitionKey; for (int i = 0; i < partKey->dim1; i++) { int2 attNum = partKey->values[i]; diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index bcf3334a7..267913be4 100755 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -16113,7 +16113,6 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", colName))); } - /* Look up the target type (should not fail, since prep found it) */ typeTuple = typenameType(NULL, typname, &targettypmod); tform = (Form_pg_type)GETSTRUCT(typeTuple); @@ -21987,7 +21986,6 @@ static void CheckListPartitionKeyType(FormData_pg_attribute* attrs, List* pos) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("column %s cannot serve as a list partitioning column because of its datatype", NameStr(attrs[location].attname)))); - } } } @@ -22276,8 +22274,8 @@ static void FillListPartitionValueList(List** result, RowExpr* row, const List* /* transform the const to target datatype */ targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(posCell)], (Const*)lfirst(keyCell), false, false); if (targetExpr == NULL) { - for (int i = 0; i <= boundId; i++) { - list_free_ext(result[boundId]); + for (int i = 0; i < boundId; i++) { + list_free_ext(result[i]); } pfree_ext(result); ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), @@ -22288,7 +22286,8 @@ static void FillListPartitionValueList(List** result, RowExpr* row, const List* } } -static List** GetListPartitionValueLists(const List* keyPos, FormData_pg_attribute* attrs, List* value, bool partkeyIsFunc) +static List **GetListPartitionValueLists(const List *keyPos, FormData_pg_attribute *attrs, List *value, + bool partkeyIsFunc) { Node* cell = NULL; ListCell* valueCell = NULL; @@ -22315,8 +22314,8 @@ static List** GetListPartitionValueLists(const List* keyPos, FormData_pg_attribu /* transform the const to target datatype */ targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(keyPos->head)], (Const*)cell, false, partkeyIsFunc); if (targetExpr == NULL) { - for (int i = 0; i <= count; i++) { - list_free_ext(result[count]); + for (int i = 0; i < count; i++) { + list_free_ext(result[i]); } pfree_ext(result); ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), @@ -22336,7 +22335,6 @@ static List** GetListPartitionValueLists(const List* keyPos, FormData_pg_attribu return result; } - /* * @@GaussDB@@ * Target : data partition @@ -22718,7 +22716,8 @@ void CompareListValue(const List* pos, FormData_pg_attribute* attrs, List *parti * Description : * Notes : */ -void ComparePartitionValue(List* pos, FormData_pg_attribute* attrs, List *partitionList, bool isPartition, bool partkeyIsFunc) +void ComparePartitionValue(List *pos, FormData_pg_attribute *attrs, List *partitionList, bool isPartition, + bool partkeyIsFunc) { Const* pre_value = NULL; Const* cur_value = NULL; @@ -24088,7 +24087,8 @@ void ATExecSetIndexVisibleState(Oid objOid, bool newState) replaces[Anum_pg_index_indisvisible - 1] = true; values[Anum_pg_index_indisvisible - 1] = DatumGetBool(newState); - newitup = (HeapTuple)tableam_tops_modify_tuple(sys_tuple, RelationGetDescr(sys_table), values, nulls, replaces); + newitup = + (HeapTuple)tableam_tops_modify_tuple(sys_tuple, RelationGetDescr(sys_table), values, nulls, replaces); simple_heap_update(sys_table, &(sys_tuple->t_self), newitup); CatalogUpdateIndexes(sys_table, newitup); tableam_tops_free_tuple(newitup); diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index 377e0936d..d1676dfbb 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -661,7 +661,6 @@ ObjectAddress CreateTrigger(CreateTrigStmt* stmt, const char* queryString, Oid r NULL); /* @hdfs informational constraint */ } - values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel)); values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein, CStringGetDatum(trigname)); values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid); diff --git a/src/gausskernel/optimizer/commands/variable.cpp b/src/gausskernel/optimizer/commands/variable.cpp index bf2d0c439..e622da2c3 100644 --- a/src/gausskernel/optimizer/commands/variable.cpp +++ b/src/gausskernel/optimizer/commands/variable.cpp @@ -718,10 +718,10 @@ bool check_mix_replication_param(bool* newval, void** extra, GucSource source) return true; } -static void check_danger_character(const char *inputEnvValue) +static bool check_danger_character(const char *inputEnvValue) { if (inputEnvValue == NULL) { - return; + return true; } const char *dangerCharacterList[] = { ";", "`", "\\", "'", "\"", ">", "<", "&", "|", "!", NULL }; @@ -729,9 +729,11 @@ static void check_danger_character(const char *inputEnvValue) for (i = 0; dangerCharacterList[i] != NULL; i++) { if (strstr(inputEnvValue, dangerCharacterList[i]) != NULL) { - ereport(ERROR, (errmsg("Failed to check input value: invalid token \"%s\".\n", dangerCharacterList[i]))); + ereport(LOG, (errmsg("Failed to check input value: invalid token \"%s\".\n", dangerCharacterList[i]))); + return false; } } + return true; } bool check_security_path(char **newval, void **extra, GucSource source) @@ -741,12 +743,11 @@ bool check_security_path(char **newval, void **extra, GucSource source) } // judge length if (strlen(*newval) > PATH_MAX) { - ereport(ERROR, (errmsg("The length of path cannot be more than \"%d\".\n", PATH_MAX))); + ereport(LOG, (errmsg("The length of path cannot be more than \"%d\".\n", PATH_MAX))); return false; } // danger character - check_danger_character(*const_cast(newval)); - return true; + return check_danger_character(*const_cast(newval)); } /* diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index 43a41899d..d00c16a0d 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -168,7 +168,7 @@ static BitmapOr* make_bitmap_or(List* bitmapplans); static NestLoop* make_nestloop(List* tlist, List* joinclauses, List* otherclauses, List* nestParams, Plan* lefttree, Plan* righttree, JoinType jointype, bool inner_unique); static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclauses, List* hashclauses, Plan* lefttree, - Plan* righttree, JoinType jointype, List *hashcollations, bool inner_unique); + Plan* righttree, JoinType jointype, bool inner_unique); static Hash* make_hash( Plan* lefttree, Oid skewTable, AttrNumber skewColumn, bool skewInherit, Oid skewColType, int32 skewColTypmod); static MergeJoin* make_mergejoin(List* tlist, List* joinclauses, List* otherclauses, List* mergeclauses, @@ -4812,7 +4812,6 @@ static HashJoin* create_hashjoin_plan(PlannerInfo* root, HashPath* best_path, Pl HashJoin* join_plan = NULL; Hash* hash_plan = NULL; Relids left_relids = NULL; - List *hashcollations = NIL; /* Sort join qual clauses into best execution order */ joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo); @@ -4888,19 +4887,12 @@ static HashJoin* create_hashjoin_plan(PlannerInfo* root, HashPath* best_path, Pl } } - ListCell *lc; - foreach(lc, hashclauses) - { - OpExpr *hclause = lfirst_node(OpExpr, lc); - hashcollations = lappend_oid(hashcollations, hclause->inputcollid); - } - /* * Build the hash node and hash join node. */ hash_plan = make_hash(inner_plan, skewTable, skewColumn, skewInherit, skewColType, skewColTypmod); join_plan = make_hashjoin(tlist, joinclauses, otherclauses, hashclauses, outer_plan, (Plan*)hash_plan, - best_path->jpath.jointype, hashcollations, best_path->jpath.inner_unique); + best_path->jpath.jointype, best_path->jpath.inner_unique); /* * @hdfs @@ -6615,7 +6607,7 @@ HashJoin* create_direct_hashjoin( } hash_plan = (Plan*)make_hash(innerPlan, skewTable, skewColumn, skewInherit, skewColType, skewColTypmod); - join_plan = make_hashjoin(tlist, joinClauses, NIL, hashclauses, outerPlan, hash_plan, joinType, NULL, false); + join_plan = make_hashjoin(tlist, joinClauses, NIL, hashclauses, outerPlan, hash_plan, joinType, false); /* estimate the mem_info for join_plan, refered to the function initial_cost_hashjoin */ estimate_directHashjoin_Cost(root, hashclauses, outerPlan, hash_plan, join_plan); @@ -6828,7 +6820,7 @@ Plan* create_direct_righttree( } static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclauses, List* hashclauses, Plan* lefttree, - Plan* righttree, JoinType jointype, List *hashcollations, bool inner_unique) + Plan* righttree, JoinType jointype, bool inner_unique) { HashJoin* node = makeNode(HashJoin); Plan* plan = &node->join.plan; @@ -6842,7 +6834,6 @@ static HashJoin* make_hashjoin(List* tlist, List* joinclauses, List* otherclause node->join.jointype = jointype; node->join.inner_unique = inner_unique; node->join.joinqual = joinclauses; - node->hash_collations = hashcollations; return node; } diff --git a/src/gausskernel/optimizer/plan/planstartwith.cpp b/src/gausskernel/optimizer/plan/planstartwith.cpp index fda5e25b4..4ac937485 100644 --- a/src/gausskernel/optimizer/plan/planstartwith.cpp +++ b/src/gausskernel/optimizer/plan/planstartwith.cpp @@ -1187,8 +1187,7 @@ static bool PullUpConnectByFuncExprWalker(Node *node, PullUpConnectByFuncExprCon } } - return expression_tree_walker(node, - (bool (*)())PullUpConnectByFuncExprWalker, (void*)context); + return expression_tree_walker(node, (bool (*)())PullUpConnectByFuncExprWalker, (void *)context); } List *pullUpConnectByFuncExprs(Node* node) diff --git a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp index 97f9fb536..16a8e672b 100644 --- a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp +++ b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp @@ -4657,6 +4657,7 @@ static void _copy_top_HintState(HintState *dest, HintState *src) } char* GetInsertIntoStmt(CreateTableAsStmt* stmt, bool hasNewColumn) + { /* Get the SELECT query string */ /* diff --git a/src/gausskernel/process/postmaster/autovacuum.cpp b/src/gausskernel/process/postmaster/autovacuum.cpp index 276f5b2f6..59f3d9c74 100755 --- a/src/gausskernel/process/postmaster/autovacuum.cpp +++ b/src/gausskernel/process/postmaster/autovacuum.cpp @@ -1212,7 +1212,7 @@ NON_EXEC_STATIC void AutoVacWorkerMain() (void)gspqsignal(SIGFPE, FloatExceptionHandler); (void)gspqsignal(SIGCHLD, SIG_DFL); (void)gspqsignal(SIGHUP, SIG_IGN); - + (void)gspqsignal(SIGURG, print_stack); /* Early initialization */ BaseInit(); diff --git a/src/gausskernel/process/postmaster/barrier_arch.cpp b/src/gausskernel/process/postmaster/barrier_arch.cpp index b6d74aa68..96c38565b 100755 --- a/src/gausskernel/process/postmaster/barrier_arch.cpp +++ b/src/gausskernel/process/postmaster/barrier_arch.cpp @@ -196,6 +196,7 @@ static void BarrierArchSetupSignalHook(void) (void)gspqsignal(SIGTTOU, SIG_DFL); (void)gspqsignal(SIGCONT, SIG_DFL); (void)gspqsignal(SIGWINCH, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); /* We allow SIGQUIT (quickdie) at all times */ (void)sigdelset(&t_thrd.libpq_cxt.BlockSig, SIGQUIT); diff --git a/src/gausskernel/process/postmaster/barrier_creator.cpp b/src/gausskernel/process/postmaster/barrier_creator.cpp index 6a4f51600..d130fb60d 100755 --- a/src/gausskernel/process/postmaster/barrier_creator.cpp +++ b/src/gausskernel/process/postmaster/barrier_creator.cpp @@ -143,6 +143,7 @@ static void barrier_creator_setup_signal_hook(void) (void)gspqsignal(SIGTTOU, SIG_DFL); (void)gspqsignal(SIGCONT, SIG_DFL); (void)gspqsignal(SIGWINCH, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); /* We allow SIGQUIT (quickdie) at all times */ (void)sigdelset(&t_thrd.libpq_cxt.BlockSig, SIGQUIT); diff --git a/src/gausskernel/process/postmaster/barrier_preparse.cpp b/src/gausskernel/process/postmaster/barrier_preparse.cpp index dee4ee89c..c3e86df3a 100644 --- a/src/gausskernel/process/postmaster/barrier_preparse.cpp +++ b/src/gausskernel/process/postmaster/barrier_preparse.cpp @@ -198,6 +198,7 @@ void BarrierPreParseMain(void) (void)gspqsignal(SIGPIPE, SIG_IGN); (void)gspqsignal(SIGUSR1, BarrierPreParseSigUsr1Handler); (void)gspqsignal(SIGUSR2, SIG_IGN); + (void)gspqsignal(SIGURG, print_stack); /* * Reset some signals that are accepted by postmaster but not here diff --git a/src/gausskernel/process/postmaster/gaussdb_version.cpp b/src/gausskernel/process/postmaster/gaussdb_version.cpp index 7d54c400f..7c158c061 100644 --- a/src/gausskernel/process/postmaster/gaussdb_version.cpp +++ b/src/gausskernel/process/postmaster/gaussdb_version.cpp @@ -120,7 +120,8 @@ static bool isInitialized = false; * And reset after the package operation. * Please do not modify it. */ -const char *sha256_digests[SHA256_DIGESTS_COUNT] = {"5237e9ad5b6ecf8d0abba664972bdcb106595b9ec2f52083915e7c829d348f0d", "06354c2857fbf21e5862005a7e60ad210dc4b635dbde891d6e60cbddea465b16"}; +const char *sha256_digests[SHA256_DIGESTS_COUNT] = {"5237e9ad5b6ecf8d0abba664972bdcb106595b9ec2f52083915e7c829d348f0d", + "06354c2857fbf21e5862005a7e60ad210dc4b635dbde891d6e60cbddea465b16"}; /* The product control file information. */ static LicenseControl versionControl = {PRODUCT_VERSION_FILE, PRODUCT_VERSION_UNKNOWN, {{0}}, false}; /* The license control file information. */ diff --git a/src/gausskernel/process/postmaster/pagerepair.cpp b/src/gausskernel/process/postmaster/pagerepair.cpp index a5ce26c51..eb0264f7d 100644 --- a/src/gausskernel/process/postmaster/pagerepair.cpp +++ b/src/gausskernel/process/postmaster/pagerepair.cpp @@ -600,7 +600,7 @@ static void SetupPageRepairSignalHook(void) (void)gspqsignal(SIGPIPE, SIG_IGN); (void)gspqsignal(SIGUSR1, PageRepairSigUsr1Handler); (void)gspqsignal(SIGUSR2, PageRepairSigUsr2Handler); - + (void)gspqsignal(SIGURG, print_stack); /* * Reset some signals that are accepted by postmaster but not here */ diff --git a/src/gausskernel/process/postmaster/pagewriter.cpp b/src/gausskernel/process/postmaster/pagewriter.cpp index 27e6f4cc6..64210d4ab 100755 --- a/src/gausskernel/process/postmaster/pagewriter.cpp +++ b/src/gausskernel/process/postmaster/pagewriter.cpp @@ -2153,7 +2153,8 @@ static bool check_buffer_dirty_flag(BufferDesc* buf_desc) XLByteLT(buf_desc->extra->lsn_on_disk, PageGetLSN(tmpBlock)) && RecoveryInProgress() && !segment_buf; if (ENABLE_DMS && check_lsn_not_match && - (XLogRecPtrIsInvalid(buf_desc->extra->lsn_on_disk) || GetDmsBufCtrl(buf_desc->buf_id)->state & BUF_DIRTY_NEED_FLUSH)) { + (XLogRecPtrIsInvalid(buf_desc->extra->lsn_on_disk) || + GetDmsBufCtrl(buf_desc->buf_id)->state & BUF_DIRTY_NEED_FLUSH)) { return false; } diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index 4b1d05e77..aae9dfda5 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -460,6 +460,7 @@ static void init_audit_signal_handlers() (void)gspqsignal(SIGTTOU, SIG_DFL); (void)gspqsignal(SIGCONT, SIG_DFL); (void)gspqsignal(SIGWINCH, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); diff --git a/src/gausskernel/process/postmaster/pgstat.cpp b/src/gausskernel/process/postmaster/pgstat.cpp index 3d26b9776..08f2cfc0c 100644 --- a/src/gausskernel/process/postmaster/pgstat.cpp +++ b/src/gausskernel/process/postmaster/pgstat.cpp @@ -4729,7 +4729,7 @@ const char* pgstat_get_wait_dms(WaitEventDMS w) break; case WAIT_EVENT_LATCH_S_REMOTE: event_name = "LatchSRemote"; - break; + break; default: event_name = "unknown wait event"; break; diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index 0a4337f57..741682014 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -630,17 +630,18 @@ static Datum ExecEvalScalarVar(ExprState* exprstate, ExprContext* econtext, bool RightRefState* refState = econtext->rightRefState; int index = attnum - 1; - if (refState && refState->values && (IS_ENABLE_INSERT_RIGHT_REF(refState) || - (IS_ENABLE_UPSERT_RIGHT_REF(refState) && refState->hasExecs[index] && index < refState->colCnt))) { + if (refState && refState->values && + (IS_ENABLE_INSERT_RIGHT_REF(refState) || + (IS_ENABLE_UPSERT_RIGHT_REF(refState) && refState->hasExecs[index] && index < refState->colCnt))) { *isNull = refState->isNulls[index]; return refState->values[index]; } if (slot == nullptr) { - ereport(ERROR,(errcode(ERRCODE_INVALID_ATTRIBUTE), errmodule(MOD_EXECUTOR), + ereport(ERROR, (errcode(ERRCODE_INVALID_ATTRIBUTE), errmodule(MOD_EXECUTOR), errmsg("attribute number %d does not exists.", attnum))); } - + /* * If it's a user attribute, check validity (bogus system attnums will be * caught inside table's getattr). What we have to check for here is the diff --git a/src/gausskernel/runtime/executor/execTuples.cpp b/src/gausskernel/runtime/executor/execTuples.cpp index 87b86148a..741c8e516 100644 --- a/src/gausskernel/runtime/executor/execTuples.cpp +++ b/src/gausskernel/runtime/executor/execTuples.cpp @@ -205,7 +205,8 @@ void ExecResetTupleTable(List* tuple_table, /* tuple table */ } } -TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, const TableAmRoutine* tam_ops) +TupleTableSlot *ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot *slot, + const TableAmRoutine *tam_ops) { if (unlikely(RELATION_CREATE_BUCKET(tableScan->rs_rd))) { tableScan = ((HBktTblScanDesc)tableScan)->currBktScan; @@ -673,8 +674,7 @@ Datum ExecFetchSlotTupleDatum(TupleTableSlot* slot) * to scribble on. * -------------------------------- */ -static FORCE_INLINE -HeapTuple ExecMaterializeSlot_impl(TupleTableSlot* slot) +static FORCE_INLINE HeapTuple ExecMaterializeSlot_impl(TupleTableSlot *slot) { /* * sanity checks @@ -685,7 +685,6 @@ HeapTuple ExecMaterializeSlot_impl(TupleTableSlot* slot) return tableam_tslot_materialize(slot); } - HeapTuple ExecMaterializeSlot(TupleTableSlot* slot) { return ExecMaterializeSlot_impl(slot); @@ -1150,10 +1149,11 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; - if(should_free) + if (should_free) { slot->tts_flags |= TTS_FLAG_SHOULDFREE_ROW; - else + } else { slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; + } slot->tts_tuple = NULL; slot->tts_mintuple = NULL; slot->tts_dataRow = msg; diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index 22eb185c5..8a3df3ebd 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -2716,7 +2716,7 @@ Datum GetTypeZeroValue(Form_pg_attribute att_tup) Type targetType = typeidType(att_tup->atttypid); result = stringTypeDatum(targetType, "", att_tup->atttypmod, true); ReleaseSysCache(targetType); - break; + break; } default: { bool typeIsVarlena = (!att_tup->attbyval) && (att_tup->attlen == -1); diff --git a/src/gausskernel/runtime/executor/nodeHash.cpp b/src/gausskernel/runtime/executor/nodeHash.cpp index a9a75f6d2..32babb28f 100644 --- a/src/gausskernel/runtime/executor/nodeHash.cpp +++ b/src/gausskernel/runtime/executor/nodeHash.cpp @@ -387,9 +387,8 @@ HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNull hashtable->outer_hashfunctions = (FmgrInfo*)palloc(nkeys * sizeof(FmgrInfo)); hashtable->inner_hashfunctions = (FmgrInfo*)palloc(nkeys * sizeof(FmgrInfo)); hashtable->hashStrict = (bool*)palloc(nkeys * sizeof(bool)); - hashtable->collations = (Oid *)palloc(nkeys * sizeof(Oid)); i = 0; - forboth (ho, hashOperators, hc, hash_collations) { + foreach (ho, hashOperators) { Oid hashop = lfirst_oid(ho); Oid left_hashfn; Oid right_hashfn; @@ -402,10 +401,19 @@ HashJoinTable ExecHashTableCreate(Hash* node, List* hashOperators, bool keepNull fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]); fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]); hashtable->hashStrict[i] = op_strict(hashop); - hashtable->collations[i] = lfirst_oid(hc); i++; } + if (hash_collations != NULL) { + int nums = list_length(hash_collations); + hashtable->collations = (Oid *)palloc(nums * sizeof(Oid)); + i = 0; + foreach(hc, hash_collations) { + hashtable->collations[i] = lfirst_oid(hc); + i++; + } + } + /* * Create temporary memory contexts in which to keep the hashtable working * storage. See notes in executor/hashjoin.h. @@ -1037,8 +1045,8 @@ static void ExecHashIncreaseNumBatches(HashJoinTable hashtable) hashtable->nbuckets = hashtable->nbuckets_optimal; hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; - hashtable->buckets = (struct HashJoinTupleData**) repalloc( - hashtable->buckets, sizeof(HashJoinTuple) * hashtable->nbuckets); + hashtable->buckets = + (struct HashJoinTupleData **)repalloc(hashtable->buckets, sizeof(HashJoinTuple) * hashtable->nbuckets); } /* @@ -1133,8 +1141,9 @@ static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable) errno_t rc; /* do nothing if not an increase (it's called increase for a reason) */ - if (hashtable->nbuckets >= hashtable->nbuckets_optimal) + if (hashtable->nbuckets >= hashtable->nbuckets_optimal) { return; + } /* * We already know the optimal number of buckets, so let's just @@ -1159,10 +1168,7 @@ static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable) */ hashtable->buckets = (HashJoinTuple *)repalloc(hashtable->buckets, hashtable->nbuckets * sizeof(HashJoinTuple)); - rc = memset_s(hashtable->buckets, - sizeof(void *) * hashtable->nbuckets, - 0, - sizeof(void *) * hashtable->nbuckets); + rc = memset_s(hashtable->buckets, sizeof(void *) * hashtable->nbuckets, 0, sizeof(void *) * hashtable->nbuckets); securec_check(rc, "\0", "\0"); /* scan through all tuples in all chunks to rebuild the hash table */ @@ -1202,8 +1208,8 @@ static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable) * case by not forcing the slot contents into minimal form; not clear if it's * worth the messiness required. */ -void ExecHashTableInsert( - HashJoinTable hashtable, TupleTableSlot* slot, uint32 hashvalue, int planid, int dop, Instrumentation* instrument) +void ExecHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue, int planid, int dop, + Instrumentation *instrument) { MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot); int bucketno; @@ -1264,46 +1270,38 @@ void ExecHashTableInsert( hashtable->spacePeak = hashtable->spaceUsed; } bool sysBusy = gs_sysmemory_busy(hashtable->spaceUsed * dop, false); - if (hashtable->spaceUsed + int64(hashtable->nbuckets_optimal * sizeof(HashJoinTuple)) > hashtable->spaceAllowed - || sysBusy) { - AllocSetContext* set = (AllocSetContext*)(hashtable->hashCxt); + if (hashtable->spaceUsed + int64(hashtable->nbuckets_optimal * sizeof(HashJoinTuple)) > + hashtable->spaceAllowed || + sysBusy) { + AllocSetContext *set = (AllocSetContext *)(hashtable->hashCxt); if (sysBusy) { hashtable->causedBySysRes = true; hashtable->spaceAllowed = hashtable->spaceUsed; set->maxSpaceSize = hashtable->spaceUsed; /* if hashtable failed to grow, this branch can be kicked many times */ if (hashtable->growEnabled) { - MEMCTL_LOG(LOG, - "HashJoin(%d) early spilled, workmem: %ldKB, usedmem: %ldKB", - planid, - hashtable->spaceAllowed / 1024L, - hashtable->spaceUsed / 1024L); + MEMCTL_LOG(LOG, "HashJoin(%d) early spilled, workmem: %ldKB, usedmem: %ldKB", planid, + hashtable->spaceAllowed / 1024L, hashtable->spaceUsed / 1024L); pgstat_add_warning_early_spill(); } - /* try to auto spread memory if possible */ + /* try to auto spread memory if possible */ } else if (hashtable->curbatch == 0 && hashtable->maxMem > hashtable->spaceAllowed) { hashtable->spaceAllowed = hashtable->spaceUsed; int64 spreadMem = Min(Min(dywlm_client_get_memory() * 1024L, hashtable->spaceAllowed), - hashtable->maxMem - hashtable->spaceAllowed); + hashtable->maxMem - hashtable->spaceAllowed); if (spreadMem > hashtable->spaceAllowed * MEM_AUTO_SPREAD_MIN_RATIO) { hashtable->spaceAllowed += spreadMem; hashtable->spreadNum++; ExecHashIncreaseBuckets(hashtable); set->maxSpaceSize += spreadMem; - MEMCTL_LOG(DEBUG2, - "HashJoin(%d) auto mem spread %ldKB succeed, and work mem is %ldKB.", - planid, - spreadMem / 1024L, - hashtable->spaceAllowed / 1024L); + MEMCTL_LOG(DEBUG2, "HashJoin(%d) auto mem spread %ldKB succeed, and work mem is %ldKB.", planid, + spreadMem / 1024L, hashtable->spaceAllowed / 1024L); return; } /* if hashtable failed to grow, this branch can be kicked many times */ if (hashtable->growEnabled) { - MEMCTL_LOG(LOG, - "HashJoin(%d) auto mem spread %ldKB failed, and work mem is %ldKB.", - planid, - spreadMem / 1024L, - hashtable->spaceAllowed / 1024L); + MEMCTL_LOG(LOG, "HashJoin(%d) auto mem spread %ldKB failed, and work mem is %ldKB.", planid, + spreadMem / 1024L, hashtable->spaceAllowed / 1024L); if (hashtable->spreadNum) { pgstat_add_warning_spill_on_memory_spread(); } diff --git a/src/gausskernel/runtime/executor/nodeHashjoin.cpp b/src/gausskernel/runtime/executor/nodeHashjoin.cpp index 28e3a9b40..5a8c2c317 100755 --- a/src/gausskernel/runtime/executor/nodeHashjoin.cpp +++ b/src/gausskernel/runtime/executor/nodeHashjoin.cpp @@ -181,15 +181,15 @@ static TupleTableSlot* ExecHashJoin(PlanState* state) * create the hash table, sometimes we should keep nulls */ if (hashNode->ps.nodeContext) { - /*enable_memory_limit*/ + /* enable_memory_limit */ oldcxt = MemoryContextSwitchTo(hashNode->ps.nodeContext); } hashtable = ExecHashTableCreate((Hash*)hashNode->ps.plan, node->hj_HashOperators, - HJ_FILL_INNER(node) || node->js.nulleqqual != NIL, node->hj_hash_collations); + HJ_FILL_INNER(node) || node->js.nulleqqual != NIL, node->hj_hashCollations); if (oldcxt) { - /*enable_memory_limit*/ + /* enable_memory_limit */ MemoryContextSwitchTo(oldcxt); } @@ -525,6 +525,7 @@ HashJoinState* ExecInitHashJoin(HashJoin* node, EState* estate, int eflags) List* lclauses = NIL; List* rclauses = NIL; List* hoperators = NIL; + List* hcollations = NIL; ListCell* l = NULL; /* check for unsupported flags */ @@ -649,6 +650,7 @@ HashJoinState* ExecInitHashJoin(HashJoin* node, EState* estate, int eflags) lclauses = NIL; rclauses = NIL; hoperators = NIL; + hcollations = NIL; foreach (l, hjstate->hashclauses) { FuncExprState* fstate = (FuncExprState*)lfirst(l); OpExpr* hclause = NULL; @@ -659,11 +661,12 @@ HashJoinState* ExecInitHashJoin(HashJoin* node, EState* estate, int eflags) lclauses = lappend(lclauses, linitial(fstate->args)); rclauses = lappend(rclauses, lsecond(fstate->args)); hoperators = lappend_oid(hoperators, hclause->opno); + hcollations = lappend_oid(hcollations, hclause->inputcollid); } hjstate->hj_OuterHashKeys = lclauses; hjstate->hj_InnerHashKeys = rclauses; hjstate->hj_HashOperators = hoperators; - hjstate->hj_hash_collations = node->hash_collations; + hjstate->hj_hashCollations = hcollations; /* child Hash node needs to evaluate inner hash keys, too */ ((HashState*)innerPlanState(hjstate))->hashkeys = rclauses; diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp index c49020bac..ff808cccf 100644 --- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp @@ -89,7 +89,6 @@ static TupleTableSlot* IndexNext(IndexScanState* node) // we should change abs_idx_getnext to call IdxScanAm(scan)->idx_getnext and channge .idx_getnext in g_HeapIdxAm to // IndexGetnextSlot while (true) { - CHECK_FOR_INTERRUPTS(); IndexScanDesc indexScan = GetIndexScanDesc(scandesc); @@ -163,7 +162,7 @@ static bool IndexRecheck(IndexScanState* node, TupleTableSlot* slot) * ExecIndexScan(node) * ---------------------------------------------------------------- */ -TupleTableSlot* ExecIndexScan(PlanState* state) +static TupleTableSlot* ExecIndexScan(PlanState* state) { IndexScanState* node = castNode(IndexScanState, state); /* diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index 8ba0599d4..5e36663a1 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -4080,8 +4080,8 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl * Initialize result tuple slot and assign its rowtype using the first * RETURNING list. We assume the rest will look the same. */ - tup_desc = ExecTypeFromTL((List*)linitial(node->returningLists), false, false, - mt_state->resultRelInfo->ri_RelationDesc->rd_tam_ops); + tup_desc = ExecTypeFromTL((List *)linitial(node->returningLists), false, false, + mt_state->resultRelInfo->ri_RelationDesc->rd_tam_ops); /* Set up a slot for the output of the RETURNING projection(s) */ ExecInitResultTupleSlot(estate, &mt_state->ps); diff --git a/src/gausskernel/runtime/executor/nodePartIterator.cpp b/src/gausskernel/runtime/executor/nodePartIterator.cpp index 89a239f21..dd33f78ca 100755 --- a/src/gausskernel/runtime/executor/nodePartIterator.cpp +++ b/src/gausskernel/runtime/executor/nodePartIterator.cpp @@ -48,7 +48,7 @@ PartIteratorState* ExecInitPartIterator(PartIterator* node, EState* estate, int state = makeNode(PartIteratorState); state->ps.plan = (Plan*)node; state->ps.state = estate; - state->ps.ExecProcNode = ExecPartIterator; + state->ps.ExecProcNode = ExecPartIterator; /* initiate sub node */ state->ps.lefttree = ExecInitNode(node->plan.lefttree, estate, eflags); diff --git a/src/gausskernel/runtime/executor/nodeSamplescan.cpp b/src/gausskernel/runtime/executor/nodeSamplescan.cpp index dfd9392f3..6ed3d360c 100644 --- a/src/gausskernel/runtime/executor/nodeSamplescan.cpp +++ b/src/gausskernel/runtime/executor/nodeSamplescan.cpp @@ -118,10 +118,8 @@ TupleTableSlot* HeapSeqSampleNext(SeqScanState* node) TupleTableSlot* slot = node->ss_ScanTupleSlot; node->ss_ScanTupleSlot->tts_tupleDescriptor->td_tam_ops = node->ss_currentRelation->rd_tam_ops; HeapTuple tuple = SampleFetchNextTuple(node); - return ExecMakeTupleSlot(tuple, - GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), - slot, - node->ss_currentRelation->rd_tam_ops); + return ExecMakeTupleSlot(tuple, GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), slot, + node->ss_currentRelation->rd_tam_ops); } TupleTableSlot* UHeapSeqSampleNext(SeqScanState* node) diff --git a/src/gausskernel/runtime/executor/nodeSubplan.cpp b/src/gausskernel/runtime/executor/nodeSubplan.cpp index 5fe955f52..244303671 100644 --- a/src/gausskernel/runtime/executor/nodeSubplan.cpp +++ b/src/gausskernel/runtime/executor/nodeSubplan.cpp @@ -575,9 +575,8 @@ bool findPartialMatch(TupleHashTable hashtable, TupleTableSlot* slot, FmgrInfo* InitTupleHashIterator(hashtable, &hashiter); while ((entry = ScanTupleHashTable(&hashiter)) != NULL) { - CHECK_FOR_INTERRUPTS(); - + ExecStoreMinimalTuple(entry->firstTuple, hashtable->tableslot, false); if (!execTuplesUnequal(slot, hashtable->tableslot, num_cols, key_col_idx, eqfunctions, hashtable->tempcxt, hashtable->tab_collations)) { diff --git a/src/gausskernel/runtime/executor/nodeSubqueryscan.cpp b/src/gausskernel/runtime/executor/nodeSubqueryscan.cpp index 3b0e40ead..05e80403d 100644 --- a/src/gausskernel/runtime/executor/nodeSubqueryscan.cpp +++ b/src/gausskernel/runtime/executor/nodeSubqueryscan.cpp @@ -105,7 +105,6 @@ SubqueryScanState* ExecInitSubqueryScan(SubqueryScan* node, EState* estate, int sub_query_state->ss.ps.state = estate; sub_query_state->ss.ps.ExecProcNode = ExecSubqueryScan; - /* * Miscellaneous initialization * diff --git a/src/gausskernel/runtime/opfusion/opfusion_update.cpp b/src/gausskernel/runtime/opfusion/opfusion_update.cpp index 069859bed..0eb260c48 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_update.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_update.cpp @@ -366,16 +366,18 @@ lreplace: result_rel_info = result_rel_info + m_c_local.m_estate->result_rel_index; if (relkind == RELKIND_RELATION || RELKIND_IS_SEQUENCE(relkind)) { if (result_rel_info->ri_junkFilter != NULL) { - tupleid = (ItemPointer)DatumGetPointer(ExecGetJunkAttribute(m_local.m_reslot, result_rel_info->ri_junkFilter->jf_junkAttNo, &isNull)); + tupleid = (ItemPointer)DatumGetPointer( + ExecGetJunkAttribute(m_local.m_reslot, result_rel_info->ri_junkFilter->jf_junkAttNo, &isNull)); } else { - tupleid = (ItemPointer)&(((HeapTuple)tup)->t_self); + tupleid = (ItemPointer) & (((HeapTuple)tup)->t_self); } } temp_isnull = m_local.m_reslot->tts_isnull; m_local.m_reslot->tts_isnull = m_local.m_isnull; temp_values = m_local.m_reslot->tts_values; m_local.m_reslot->tts_values = m_local.m_values; - bool update_fix_result = ExecComputeStoredUpdateExpr(result_rel_info, m_c_local.m_estate, m_local.m_reslot, tup, CMD_UPDATE, tupleid, InvalidOid, bucketid); + bool update_fix_result = ExecComputeStoredUpdateExpr(result_rel_info, m_c_local.m_estate, m_local.m_reslot, + tup, CMD_UPDATE, tupleid, InvalidOid, bucketid); if (!update_fix_result) { if (tup != m_local.m_reslot->tts_tuple) { tableam_tops_free_tuple(tup); diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp index f5ba51b82..0a2b1a78b 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp @@ -211,6 +211,7 @@ VecHashJoinState* ExecInitVecHashJoin(VecHashJoin* node, EState* estate, int efl hash_state->hj_OuterHashKeys = lclauses; hash_state->hj_InnerHashKeys = rclauses; hash_state->hj_HashOperators = hoperators; + hash_state->hj_hashCollations = NIL; hash_state->eqfunctions = eqfunctions; hash_state->js.ps.ps_TupFromTlist = false; diff --git a/src/gausskernel/storage/access/common/heaptuple.cpp b/src/gausskernel/storage/access/common/heaptuple.cpp index c25cfe852..33bc78fc2 100644 --- a/src/gausskernel/storage/access/common/heaptuple.cpp +++ b/src/gausskernel/storage/access/common/heaptuple.cpp @@ -292,11 +292,13 @@ bool heap_attisnull_impl(HeapTuple tup, int attnum, TupleDesc tupDesc) return false; } -bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupDesc) { +bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupDesc) +{ return heap_attisnull_impl(tup, attnum, tupDesc); } -bool heapam_attisnull(Tuple tup, int attnum, TupleDesc tuple_desc) { +bool heapam_attisnull(Tuple tup, int attnum, TupleDesc tuple_desc) +{ return heap_attisnull_impl((HeapTuple)tup, attnum, tuple_desc); } @@ -552,8 +554,7 @@ Datum nocachegetattr(HeapTuple tuple, uint32 attnum, TupleDesc tupleDesc) * has already determined that the attnum refers to a system attribute. * ---------------- */ -static FORCE_INLINE -Datum heap_getsysattr_impl(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) +static FORCE_INLINE Datum heap_getsysattr_impl(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) { Datum result; @@ -610,11 +611,13 @@ Datum heap_getsysattr_impl(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool return result; } -Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) { +Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) +{ return heap_getsysattr_impl(tup, attnum, tupleDesc, isnull); } -Datum heapam_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool* isnull, Buffer buff) { +Datum heapam_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool *isnull, Buffer buff) +{ return heap_getsysattr_impl((HeapTuple)tup, attnum, tuple_desc, isnull); } @@ -704,11 +707,13 @@ void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest) securec_check(rc, "\0", "\0"); } -HeapTuple heap_copytuple(HeapTuple tuple) { +HeapTuple heap_copytuple(HeapTuple tuple) +{ return heap_copytuple_impl(tuple); } -Tuple heapam_copytuple(Tuple tuple) { +Tuple heapam_copytuple(Tuple tuple) +{ Assert(TUPLE_IS_HEAP_TUPLE(HeapTuple(tuple))); return heap_copytuple((HeapTuple)tuple); } @@ -733,10 +738,10 @@ HeapTuple heap_form_tuple_impl(TupleDesc tupleDescriptor, Datum *values, bool *i int i; if (numberOfAttributes > MaxTupleAttributeNumber) { - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("number of columns (%d) exceeds limit (%d), AM type (%d), type id (%u)", numberOfAttributes, - MaxTupleAttributeNumber, GetTableAmType(tupleDescriptor->td_tam_ops), tupleDescriptor->tdtypeid))); + ereport(ERROR, (errcode(ERRCODE_TOO_MANY_COLUMNS), + errmsg("number of columns (%d) exceeds limit (%d), AM type (%d), type id (%u)", + numberOfAttributes, MaxTupleAttributeNumber, GetTableAmType(tupleDescriptor->td_tam_ops), + tupleDescriptor->tdtypeid))); } /* @@ -878,9 +883,8 @@ HeapTuple heap_formtuple(TupleDesc tupleDescriptor, Datum *values, const char *n * * The result is allocated in the current memory context. */ -static FORCE_INLINE -HeapTuple heap_modify_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *replValues, const bool *replIsnull, - const bool *doReplace) +static FORCE_INLINE HeapTuple heap_modify_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *replValues, + const bool *replIsnull, const bool *doReplace) { int numberOfAttributes = tupleDesc->natts; int attoff; @@ -944,12 +948,12 @@ HeapTuple heap_modify_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *replVal return heap_modify_tuple_impl(tuple, tupleDesc, replValues, replIsnull, doReplace); } -Tuple heapam_modify_tuple(Tuple tuple, TupleDesc tuple_desc, Datum* repl_values, const bool* repl_isnull, const bool* do_replace) +Tuple heapam_modify_tuple(Tuple tuple, TupleDesc tuple_desc, Datum *repl_values, const bool *repl_isnull, + const bool *do_replace) { return (Tuple)heap_modify_tuple_impl((HeapTuple)tuple, tuple_desc, repl_values, repl_isnull, do_replace); } - /* * heap_modifytuple * @@ -1000,8 +1004,7 @@ HeapTuple heap_modifytuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *replValu * heap_getattr; the loop will become O(N^2) as soon as any * noncacheable attribute offsets are involved. */ -static FORCE_INLINE void -heap_deform_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull) +static FORCE_INLINE void heap_deform_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull) { HeapTupleHeader tup = tuple->t_data; bool hasnulls = HeapTupleHasNulls(tuple); @@ -2611,10 +2614,10 @@ HeapTuple heap_form_cmprs_tuple(TupleDesc tupleDescriptor, FormCmprTupleData *cm int i; if (numberOfAttributes > MaxTupleAttributeNumber) { - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("number of columns (%d) exceeds limit (%d), AM type (%d), type id (%u)", numberOfAttributes, - MaxTupleAttributeNumber, GetTableAmType(tupleDescriptor->td_tam_ops), tupleDescriptor->tdtypeid))); + ereport(ERROR, (errcode(ERRCODE_TOO_MANY_COLUMNS), + errmsg("number of columns (%d) exceeds limit (%d), AM type (%d), type id (%u)", + numberOfAttributes, MaxTupleAttributeNumber, GetTableAmType(tupleDescriptor->td_tam_ops), + tupleDescriptor->tdtypeid))); } /* @@ -2729,8 +2732,8 @@ HeapTuple heap_form_cmprs_tuple(TupleDesc tupleDescriptor, FormCmprTupleData *cm * heap_getattr; the loop will become O(N^2) as soon as any * noncacheable attribute offsets are involved. */ -static FORCE_INLINE void -heap_deform_cmprs_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull, char *cmprsInfo) +static FORCE_INLINE void heap_deform_cmprs_tuple_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull, + char *cmprsInfo) { HeapTupleHeader tup = tuple->t_data; bool hasnulls = HeapTupleHasNulls(tuple); @@ -2825,9 +2828,8 @@ void heapam_deform_cmprs_tuple(Tuple tuple, TupleDesc tuple_desc, Datum* values, heap_deform_cmprs_tuple_impl((HeapTuple)tuple, tuple_desc, values, isnull, cmprs_info); } - -static FORCE_INLINE void -heap_deform_tuple2_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull, Buffer buffer) +static FORCE_INLINE void heap_deform_tuple2_impl(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull, + Buffer buffer) { Assert((tuple != NULL) && (tuple->t_data != NULL)); if (!HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) { diff --git a/src/gausskernel/storage/access/common/tupdesc.cpp b/src/gausskernel/storage/access/common/tupdesc.cpp index c0c62b849..d0cc0741f 100644 --- a/src/gausskernel/storage/access/common/tupdesc.cpp +++ b/src/gausskernel/storage/access/common/tupdesc.cpp @@ -86,15 +86,18 @@ TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid, const TableAmRoutine* * Tuple type ID information is initially set for an anonymous record type; * caller can overwrite this if needed. */ -TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute* attrs, const TableAmRoutine* tam_ops) +TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute *attrs, const TableAmRoutine *tam_ops) { TupleDesc desc; int i; + errno_t rc; desc = CreateTemplateTupleDesc(natts, hasoid, tam_ops); - for (i = 0; i < natts; ++i) - memcpy(TupleDescAttr(desc, i), attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + for (i = 0; i < natts; ++i) { + rc = memcpy_s(TupleDescAttr(desc, i), ATTRIBUTE_FIXED_PART_SIZE, attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "\0", "\0"); + } return desc; } @@ -132,13 +135,15 @@ TupleDesc CreateTupleDescCopy(TupleDesc tupdesc) { TupleDesc desc; int i; + errno_t rc; desc = CreateTemplateTupleDesc(tupdesc->natts, tupdesc->tdhasoid, tupdesc->td_tam_ops); for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = TupleDescAttr(desc, i); - memcpy(att, &tupdesc->attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + rc = memcpy_s(att, ATTRIBUTE_FIXED_PART_SIZE, &tupdesc->attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "\0", "\0"); att->attnotnull = false; att->atthasdef = false; } diff --git a/src/gausskernel/storage/access/redo/redo_segpage.cpp b/src/gausskernel/storage/access/redo/redo_segpage.cpp index 8644411d6..2bfc1adfe 100644 --- a/src/gausskernel/storage/access/redo/redo_segpage.cpp +++ b/src/gausskernel/storage/access/redo/redo_segpage.cpp @@ -468,14 +468,15 @@ void MarkSegPageRedoChildPageDirty(RedoBufferInfo *bufferinfo) mode = PANIC; #endif const uint32 shiftSz = 32; - ereport(mode, (errmsg("extreme_rto segment page not mark dirty:lsn %X/%X, lsn_disk %X/%X, \ + ereport(mode, + (errmsg("extreme_rto segment page not mark dirty:lsn %X/%X, lsn_disk %X/%X, \ lsn_page %X/%X, page %u/%u/%u %u", - (uint32)(bufferinfo->lsn >> shiftSz), (uint32)(bufferinfo->lsn), - (uint32)(bufDesc->extra->lsn_on_disk >> shiftSz), (uint32)(bufDesc->extra->lsn_on_disk), - (uint32)(PageGetLSN(bufferinfo->pageinfo.page) >> shiftSz), - (uint32)(PageGetLSN(bufferinfo->pageinfo.page)), - bufferinfo->blockinfo.rnode.spcNode, bufferinfo->blockinfo.rnode.dbNode, - bufferinfo->blockinfo.rnode.relNode, bufferinfo->blockinfo.blkno))); + (uint32)(bufferinfo->lsn >> shiftSz), (uint32)(bufferinfo->lsn), + (uint32)(bufDesc->extra->lsn_on_disk >> shiftSz), (uint32)(bufDesc->extra->lsn_on_disk), + (uint32)(PageGetLSN(bufferinfo->pageinfo.page) >> shiftSz), + (uint32)(PageGetLSN(bufferinfo->pageinfo.page)), bufferinfo->blockinfo.rnode.spcNode, + bufferinfo->blockinfo.rnode.dbNode, bufferinfo->blockinfo.rnode.relNode, + bufferinfo->blockinfo.blkno))); } #ifdef USE_ASSERT_CHECKING bufDesc->lsn_dirty = PageGetLSN(bufferinfo->pageinfo.page); diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index f2b7029a8..7962512ee 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -135,7 +135,7 @@ static void HeapamTopsDestroyTuple(Tuple tuple) pfree_ext(tup); } -static void HeapamTopsUpdateTupleWithOid (Relation rel, Tuple tuple, TupleTableSlot *slot) +static void HeapamTopsUpdateTupleWithOid(Relation rel, Tuple tuple, TupleTableSlot *slot) { return; } @@ -228,23 +228,20 @@ void HeapamTupleGetLatestTid(Relation relation, Snapshot snapshot, * ------------------------------------------------------------------------ */ -Oid HeapamTupleInsert (Relation relation, Tuple tup, CommandId cid, - int options, struct BulkInsertStateData *bistate) { +Oid HeapamTupleInsert(Relation relation, Tuple tup, CommandId cid, int options, struct BulkInsertStateData *bistate) +{ return heap_insert(relation, (HeapTuple)tup, cid, options, bistate); } -int HeapamTupleMultiInsert(Relation relation, Relation parent, - Tuple* tuples, int ntuples, CommandId cid, - int options, struct BulkInsertStateData *bistate, - HeapMultiInsertExtraArgs *args) { - return heap_multi_insert(relation, parent, (HeapTuple*)tuples, ntuples, cid, options, bistate, args); +int HeapamTupleMultiInsert(Relation relation, Relation parent, Tuple *tuples, int ntuples, CommandId cid, int options, + struct BulkInsertStateData *bistate, HeapMultiInsertExtraArgs *args) +{ + return heap_multi_insert(relation, parent, (HeapTuple *)tuples, ntuples, cid, options, bistate, args); } -TM_Result HeapamTupleDelete(Relation relation, ItemPointer tid, - CommandId cid, Snapshot crosscheck, Snapshot snapshot, - bool wait, TupleTableSlot** oldslot, TM_FailureData *tmfd, - bool allow_delete_self) { - +TM_Result HeapamTupleDelete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, Snapshot snapshot, + bool wait, TupleTableSlot **oldslot, TM_FailureData *tmfd, bool allow_delete_self) +{ return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, allow_delete_self); } @@ -403,7 +400,7 @@ void HeapamAbortSpeculative(Relation relation, Tuple tuple) heap_abort_speculative(relation, (HeapTuple) tuple); } -void HeapamTcapPromoteLock (Relation relation, LOCKMODE *lockmode) +void HeapamTcapPromoteLock(Relation relation, LOCKMODE *lockmode) { /* Protect old versions from recycling during timecapsule. */ *lockmode = AccessExclusiveLock; diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index af1e8f85d..2798bac5f 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -329,6 +329,7 @@ Datum ubtgetbitmap(PG_FUNCTION_ARGS) } /* Save tuple ID, and continue scanning */ + scan->xs_recheck_itup = so->currPos.items[so->currPos.itemIndex].needRecheck; heapTid = &so->currPos.items[so->currPos.itemIndex].heapTid; currPartOid = so->currPos.items[so->currPos.itemIndex].partitionOid; tbm_add_tuples(tbm, heapTid, 1, scan->xs_recheck_itup, currPartOid); diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index f2db7db31..10eae062f 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -402,18 +402,20 @@ Datum UHeapFastGetAttr(UHeapTuple tup, int attnum, TupleDesc tupleDesc, bool *is * See comments in att_align_pointer() */ char *tp = (char *)(tup)->disk_tuple + (tup)->disk_tuple->t_hoff; - char *dp = ((tupleDesc)->attrs[0].attlen >= 0) ? - tp : - (char *)att_align_pointer(tp, (tupleDesc)->attrs[(attnum)-1].attalign, -1, tp); + char *dp = ((tupleDesc)->attrs[0].attlen >= 0) + ? tp + : (char *)att_align_pointer(tp, (tupleDesc)->attrs[(attnum)-1].attalign, -1, tp); - return ((attnum) > 0 ? ((*(isnull) = false), UHeapDiskTupNoNulls(tup->disk_tuple) ? - ( TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 ? - (fetchatt( TupleDescAttr((tupleDesc), (attnum)-1), (dp + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff)) - ) : - (UHeapNoCacheGetAttr((tup), (attnum), (tupleDesc)))) : - (att_isnull((attnum)-1, (tup)->disk_tuple->data) ? ((*(isnull) = true), (Datum)NULL) : - (UHeapNoCacheGetAttr((tup), (attnum), (tupleDesc))))) : - ((Datum)NULL)); + return ((attnum) > 0 ? ((*(isnull) = false), + UHeapDiskTupNoNulls(tup->disk_tuple) + ? (TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 + ? (fetchatt(TupleDescAttr((tupleDesc), (attnum)-1), + (dp + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff))) + : (UHeapNoCacheGetAttr((tup), (attnum), (tupleDesc)))) + : (att_isnull((attnum)-1, (tup)->disk_tuple->data) + ? ((*(isnull) = true), (Datum)NULL) + : (UHeapNoCacheGetAttr((tup), (attnum), (tupleDesc))))) + : ((Datum)NULL)); } enum UHeapDMLType { diff --git a/src/gausskernel/storage/access/ustore/knl_utuple.cpp b/src/gausskernel/storage/access/ustore/knl_utuple.cpp index cb1db90eb..8db7eda05 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuple.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuple.cpp @@ -1804,10 +1804,11 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh */ slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; - if (shouldFree) + if (shouldFree) { slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; - else + } else { slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + } slot->tts_tuple = &slot->tts_minhdr; slot->tts_mintuple = mtup; diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 0253baf7f..87de77358 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -2592,8 +2592,8 @@ void PageCheckIfCanEliminate(BufferDesc *buf, uint32 *oldFlags, bool *needGetLoc Block tmpBlock = BufHdrGetBlock(buf); - if ((*oldFlags & BM_TAG_VALID) && !XLByteEQ(buf->extra->lsn_on_disk, PageGetLSN(tmpBlock)) && !(*oldFlags & BM_DIRTY) && - RecoveryInProgress()) { + if ((*oldFlags & BM_TAG_VALID) && !XLByteEQ(buf->extra->lsn_on_disk, PageGetLSN(tmpBlock)) && + !(*oldFlags & BM_DIRTY) && RecoveryInProgress()) { int mode = DEBUG1; #ifdef USE_ASSERT_CHECKING mode = PANIC; @@ -4767,7 +4767,8 @@ void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod, boo iocb_ptr->data = (void *)bufdesc; DSSAioAppendIOCB(aio_cxt, iocb_ptr); } else { - seg_physical_write(spc, fakenode, bufferinfo.blockinfo.forknum, bufdesc->extra->seg_blockno, bufToWrite, false); + seg_physical_write(spc, fakenode, bufferinfo.blockinfo.forknum, bufdesc->extra->seg_blockno, bufToWrite, + false); } } else { SegmentCheck(!IsSegmentFileNode(bufdesc->tag.rnode)); diff --git a/src/gausskernel/storage/cstore/cstore_insert.cpp b/src/gausskernel/storage/cstore/cstore_insert.cpp index 3fa95fca8..f166c28b3 100644 --- a/src/gausskernel/storage/cstore/cstore_insert.cpp +++ b/src/gausskernel/storage/cstore/cstore_insert.cpp @@ -2032,6 +2032,7 @@ void CStoreInsert::InitIndexInsertArg(Relation heap_rel, const int* key_map, int { /* plus TID system attribute */ int nkeys_plus_tid = nkeys + 1; + errno_t rc; struct tupleDesc *index_tupdesc = CreateTemplateTupleDesc(nkeys_plus_tid, false); @@ -2046,13 +2047,16 @@ void CStoreInsert::InitIndexInsertArg(Relation heap_rel, const int* key_map, int /* set attribute point exlcuding TID field */ for (int i = 0; i < nkeys; ++i) { - memcpy(&index_tupdesc->attrs[i], &heap_rel->rd_att->attrs[key_map[i]], ATTRIBUTE_FIXED_PART_SIZE); + rc = memcpy_s(&index_tupdesc->attrs[i], ATTRIBUTE_FIXED_PART_SIZE, &heap_rel->rd_att->attrs[key_map[i]], + ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "\0", "\0"); } /* set TID attribute */ FormData_pg_attribute tid_attr; init_tid_attinfo(&tid_attr); - memcpy(&index_tupdesc->attrs[nkeys], &tid_attr, ATTRIBUTE_FIXED_PART_SIZE); + rc = memcpy_s(&index_tupdesc->attrs[nkeys], ATTRIBUTE_FIXED_PART_SIZE, &tid_attr, ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "\0", "\0"); args.es_result_relations = NULL; /* psort index will use tuple sort */ diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index b742fb305..ac72ac86d 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -1998,10 +1998,10 @@ RETRY: snapshot->snapshotcsn = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); } else { result = SSGetSnapshotData(snapshot); - } + } } else { - result = GetLocalSnapshotData(snapshot); - snapshot->snapshotcsn = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); + result = GetLocalSnapshotData(snapshot); + snapshot->snapshotcsn = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); } if (result) { diff --git a/src/gausskernel/storage/ipc/standby.cpp b/src/gausskernel/storage/ipc/standby.cpp index 5f90131e7..eac45526e 100755 --- a/src/gausskernel/storage/ipc/standby.cpp +++ b/src/gausskernel/storage/ipc/standby.cpp @@ -1031,13 +1031,13 @@ XLogRecPtr LogStandbySnapshot(void) * record. Fortunately this routine isn't executed frequently, and it's * only a shared lock. */ - if (g_instance.attr.attr_storage.wal_level < WAL_LEVEL_LOGICAL) + if (g_instance.attr.attr_storage.wal_level < WAL_LEVEL_LOGICAL || g_instance.streaming_dr_cxt.isInSwitchover) LWLockRelease(ProcArrayLock); recptr = LogCurrentRunningXacts(running); /* Release lock if we kept it longer ... */ - if (g_instance.attr.attr_storage.wal_level >= WAL_LEVEL_LOGICAL) + if (g_instance.attr.attr_storage.wal_level >= WAL_LEVEL_LOGICAL && !g_instance.streaming_dr_cxt.isInSwitchover) LWLockRelease(ProcArrayLock); /* GetRunningTransactionData() acquired XidGenLock, we must release it */ diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp index ec614e2ce..1016016a0 100755 --- a/src/gausskernel/storage/lmgr/proc.cpp +++ b/src/gausskernel/storage/lmgr/proc.cpp @@ -250,7 +250,7 @@ int GetThreadPoolStreamProcNum() void InitProcGlobal(void) { PGPROC **procs = NULL; - int i, j; + int i, j, cmaProcCount; uint32 TotalProcs = (uint32)(GLOBAL_ALL_PROCS); bool needPalloc = false; @@ -384,7 +384,7 @@ void InitProcGlobal(void) ereport(LOG, (errmsg("Get stream thread proc num [%d].", thread_pool_stream_proc_num))); } - for (i = 0; (unsigned int)(i) < TotalProcs; i++) { + for (i = 0, cmaProcCount = 0; (unsigned int)(i) < TotalProcs; i++) { /* Common initialization for all PGPROCs, regardless of type. * * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact @@ -442,6 +442,10 @@ void InitProcGlobal(void) */ procs[i]->links.next = (SHM_QUEUE*)g_instance.proc_base->cmAgentFreeProcs; g_instance.proc_base->cmAgentFreeProcs = procs[i]; + /* Record all cma proc, just print information if necessary */ + Assert(cmaProcCount < NUM_CMAGENT_PROCS); + g_instance.proc_base->cmAgentAllProcs[cmaProcCount] = procs[i]; + cmaProcCount++; } else if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS + g_instance.attr.attr_sql.job_queue_processes + 1 + NUM_CMAGENT_PROCS + g_max_worker_processes + NUM_DCF_CALLBACK_PROCS + NUM_DMS_CALLBACK_PROCS) { @@ -526,46 +530,19 @@ PGPROC *GetFreeProc() */ void PgStatCMAThreadStatus() { - const char* appName = "cm_agent"; - MemoryContext oldContext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); - /* get all threads from global status entries which name is 'cm_agent' */ - PgBackendStatusNode* result = pgstat_get_backend_status_by_appname(appName, NULL); - - if (result == NULL) { - (void)MemoryContextSwitchTo(oldContext); - return; - } - - PgBackendStatusNode* tempNode = result; - tempNode = tempNode->next; - - while (tempNode != NULL) { - PgBackendStatus* beentry = tempNode->data; - tempNode = tempNode->next; - if (beentry == NULL) { - continue; + StringInfoData callStack; + initStringInfo(&callStack); + for (int i = 0; i < NUM_CMAGENT_PROCS; i++) { + ThreadId pid = g_instance.proc_base->cmAgentAllProcs[i]->pid; + if (pid != 0) { + resetStringInfo(&callStack); + get_stack_according_to_tid(pid, &callStack); + ereport(LOG, (errmsg("Print cm_agent thread stack when proc is going to be not available, pid is %lu, " + "stack is \n%s", + pid, callStack.data))); } - - char* wait_status = getThreadWaitStatusDesc(beentry); - ereport(LOG, (errmsg("Print cm_agent thread information when proc is going to be not available, node_name<%s>," - " datid<%u>, app_name<%s>, query_id<%lu>, tid<%lu>, lwtid<%d>, parent_sessionid<%lu>, " - "thread_level<%d>, wait_status<%s>", - g_instance.attr.attr_common.PGXCNodeName, - beentry->st_databaseid, - beentry->st_appname ? beentry->st_appname : "unnamed thread", - beentry->st_queryid, - beentry->st_procpid, - beentry->st_tid, - beentry->st_parent_sessionid, - beentry->st_thread_level, - wait_status))); - - pfree_ext(wait_status); } - - /* Free node list memory */ - FreeBackendStatusNodeMemory(result); - (void)MemoryContextSwitchTo(oldContext); + FreeStringInfo(&callStack); } /* @@ -623,7 +600,7 @@ static void CheckCMAReservedProc() */ if (curCMAProcCount >= (int)procWarningCount) { ereport(WARNING, (errmsg("Get free proc from CMA-proc list, proc location is %p." - " Current proc count %d is more than threshold %u. Ready to print thread wait status", + " Current proc count %d is more than threshold %u. Ready to print thread stack", cmaProc, curCMAProcCount, procWarningCount))); PgStatCMAThreadStatus(); } @@ -813,10 +790,6 @@ void InitProcess(void) (u_sess->libpq_cxt.IsConnFromCmAgent) ? cmaConnNumInfo : ""))); } - if (u_sess->libpq_cxt.IsConnFromCmAgent) { - CheckCMAReservedProc(); - } - #ifdef __USE_NUMA if (g_instance.shmem_cxt.numaNodeNum > 1) { if (!g_instance.numa_cxt.inheritThreadPool) { @@ -994,6 +967,15 @@ void InitProcess(void) */ on_shmem_exit(ProcKill, 0); + /* + * We call this function to check reserved proc and print stack. + * Must after register ProcKill, so that ProcKill will release all lwlock and put back proc to + * g_instance.proc_base->cmAgentFreeProcs if proc_ext happend. + */ + if (u_sess->libpq_cxt.IsConnFromCmAgent) { + CheckCMAReservedProc(); + } + init_proc_dw_buf(); /* diff --git a/src/gausskernel/storage/page/pageparse.cpp b/src/gausskernel/storage/page/pageparse.cpp index 0521f88aa..3e23160fe 100644 --- a/src/gausskernel/storage/page/pageparse.cpp +++ b/src/gausskernel/storage/page/pageparse.cpp @@ -355,6 +355,15 @@ static void ParseTupleHeader(const PageHeader page, uint lineno, char *strOutput static void ParseHeapPage(const PageHeader page, BlockNumber blockNum, char *strOutput, BlockNumber block_endpoint) { errno_t rc = EOK; + if (PageIsNew(page)) { + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "Page information of block %u/%u : new page\n", blockNum, block_endpoint); + securec_check_ss(rc, "\0", "\0"); + ParseHeapHeader(page, strOutput, blockNum, block_endpoint); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n"); + securec_check_ss(rc, "\0", "\0"); + return; + } if (page->pd_lower < GetPageHeaderSize(page) || page->pd_lower > page->pd_upper || page->pd_upper > page->pd_special || page->pd_special > BLCKSZ || page->pd_special != MAXALIGN(page->pd_special)) { @@ -404,7 +413,8 @@ static void ParseOnePage(const PageHeader page, BlockNumber blockNum, char *strO { errno_t rc = EOK; if (strcmp(relation_type, "heap") == 0) { - if (PG_HEAP_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) != 0) { + if ((PG_HEAP_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) && + (uint16)PageGetPageLayoutVersion(page) != 0) || PageGetSpecialSize(page) != 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errmsg("The target page is not heap, the given page version is: %u", (uint16)PageGetPageLayoutVersion(page))))); @@ -427,14 +437,16 @@ static void ParseOnePage(const PageHeader page, BlockNumber blockNum, char *strO } ParseUHeapPage((void *)page, blockNum, block_endpoint, strOutput, dumpUndo); } else if (strcmp(relation_type, "btree") == 0) { - if (PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) == 0) { + if ((PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) && + (uint16)PageGetPageLayoutVersion(page) != 0) || PageGetSpecialSize(page) == 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errmsg("The target page is not btree, the given page version is: %u", (uint16)PageGetPageLayoutVersion(page))))); } ParseIndexPage((void *)page, BTREE_INDEX, blockNum, block_endpoint, strOutput); } else if (strcmp(relation_type, "ubtree") == 0) { - if (PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) == 0) { + if ((PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) && + (uint16)PageGetPageLayoutVersion(page) != 0) || PageGetSpecialSize(page) == 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errmsg("The target page is not ubtree, the given page version is: %u", (uint16)PageGetPageLayoutVersion(page))))); diff --git a/src/gausskernel/storage/replication/logical/launcher.cpp b/src/gausskernel/storage/replication/logical/launcher.cpp index c24fab519..9bffe943b 100644 --- a/src/gausskernel/storage/replication/logical/launcher.cpp +++ b/src/gausskernel/storage/replication/logical/launcher.cpp @@ -680,6 +680,7 @@ void ApplyLauncherMain() (void)gspqsignal(SIGUSR2, logicalrep_launcher_sigusr2); (void)gspqsignal(SIGFPE, FloatExceptionHandler); (void)gspqsignal(SIGCHLD, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); /* Early initialization */ BaseInit(); diff --git a/src/gausskernel/storage/replication/logical/worker.cpp b/src/gausskernel/storage/replication/logical/worker.cpp index e7dea482b..85118c51b 100644 --- a/src/gausskernel/storage/replication/logical/worker.cpp +++ b/src/gausskernel/storage/replication/logical/worker.cpp @@ -1440,6 +1440,7 @@ void ApplyWorkerMain() gspqsignal(SIGUSR2, SIG_IGN); gspqsignal(SIGFPE, FloatExceptionHandler); gspqsignal(SIGCHLD, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); /* Early initialization */ BaseInit(); diff --git a/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp b/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp index 6a8dd6838..be07e183f 100644 --- a/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp +++ b/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp @@ -593,6 +593,7 @@ static void InitThreadSignal() (void)gspqsignal(SIGTTOU, SIG_DFL); (void)gspqsignal(SIGCONT, SIG_DFL); (void)gspqsignal(SIGWINCH, SIG_DFL); + (void)gspqsignal(SIGURG, print_stack); gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index e4308bd3a..9d9bb9ecf 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -581,7 +581,8 @@ static inline void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* ba slot->tts_tam_ops->tslot_formbatch(slot, batch, cur_rows, natts); } -static inline Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, bool need_transform_anyarray = false) +static inline Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, + bool need_transform_anyarray = false) { return slot->tts_tam_ops->tslot_getattr(slot, attnum, isnull, need_transform_anyarray); } @@ -635,7 +636,8 @@ static inline void tableam_tops_deform_tuple(Tuple tuple, TupleDesc tuple_desc, return tuple_desc->td_tam_ops->tops_deform_tuple(tuple, tuple_desc, values, isnull); } -static inline void tableam_tops_deform_tuple2(Tuple tuple, TupleDesc tuple_desc, Datum *values, bool *isnull, Buffer buffer) +static inline void tableam_tops_deform_tuple2(Tuple tuple, TupleDesc tuple_desc, Datum *values, bool *isnull, + Buffer buffer) { AssertValidTuple(tuple); Assert(g_tableam_routines[GetTabelAmIndexTuple(tuple)] == tuple_desc->td_tam_ops); @@ -695,7 +697,6 @@ static inline Tuple tableam_tops_opfusion_modify_tuple(Tuple tuple, TupleDesc tu static inline Datum tableam_tops_tuple_getattr(Tuple tuple, int att_num, TupleDesc tuple_desc, bool *is_null) { AssertValidTuple(tuple); - //FIXME: Assert(g_tableam_routines[GetTabelAmIndexTuple(tuple)] == tuple_desc->td_tam_ops); return GetTableAmRoutine((TableAmType)(GetTabelAmIndexTuple(tuple)))->tops_tuple_getattr(tuple, att_num, tuple_desc, is_null); } @@ -714,7 +715,8 @@ static inline bool tableam_tops_tuple_attisnull(Tuple tuple, int attnum, TupleDe * We allow a NULL tupledesc for relations not expected to have missing * values, such as catalog relations and indexes. */ - return GetTableAmRoutine((TableAmType)(GetTabelAmIndexTuple(tuple)))->tops_tuple_attisnull(tuple, attnum, tuple_desc); + return GetTableAmRoutine((TableAmType)(GetTabelAmIndexTuple(tuple))) + ->tops_tuple_attisnull(tuple, attnum, tuple_desc); } static inline Tuple tableam_tops_copy_tuple(Tuple tuple) @@ -931,10 +933,11 @@ static inline TableScanDesc tableam_scan_begin_bm(Relation relation, Snapshot sn } static inline TableScanDesc tableam_scan_begin_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - bool allow_strat, bool allow_sync, RangeScanInRedis rangeScanInRedis = { false, 0, 0 } ) + bool allow_strat, bool allow_sync, + RangeScanInRedis rangeScanInRedis = {false, 0, 0}) { - return relation->rd_tam_ops->scan_begin_sampling(relation, snapshot, nkeys, key, allow_strat, - allow_sync, rangeScanInRedis); + return relation->rd_tam_ops->scan_begin_sampling(relation, snapshot, nkeys, key, allow_strat, allow_sync, + rangeScanInRedis); } static inline TableScanDesc tableam_scan_begin_parallel(Relation relation, ParallelHeapScanDesc parallel_scan) @@ -942,7 +945,8 @@ static inline TableScanDesc tableam_scan_begin_parallel(Relation relation, Paral return relation->rd_tam_ops->scan_begin_parallel(relation, parallel_scan); } -static inline Tuple tableam_scan_getnexttuple(TableScanDesc sscan, ScanDirection direction, bool* has_cur_xact_write = NULL) +static inline Tuple tableam_scan_getnexttuple(TableScanDesc sscan, ScanDirection direction, + bool *has_cur_xact_write = NULL) { return sscan->rs_rd->rd_tam_ops->scan_getnexttuple(sscan, direction, has_cur_xact_write); } @@ -957,10 +961,10 @@ static inline void tableam_scan_getpage(TableScanDesc sscan, BlockNumber page) return sscan->rs_rd->rd_tam_ops->scan_getpage(sscan, page); } -static inline Tuple tableam_scan_gettuple_for_verify(TableScanDesc sscan, ScanDirection direction, bool isValidRelationPage) +static inline Tuple tableam_scan_gettuple_for_verify(TableScanDesc sscan, ScanDirection direction, + bool isValidRelationPage) { - return sscan->rs_rd->rd_tam_ops->scan_gettuple_for_verify(sscan, - direction, isValidRelationPage); + return sscan->rs_rd->rd_tam_ops->scan_gettuple_for_verify(sscan, direction, isValidRelationPage); } static inline void tableam_scan_end(TableScanDesc sscan) @@ -1037,14 +1041,16 @@ extern TM_Result HeapamTupleUpdate(Relation relation, Relation parentRelation, I * HEAP AM APIs * ------------------------------------------------------------------------ */ -extern Tuple heap_slot_get_tuple_from_slot(TupleTableSlot* slot); -extern Datum heapam_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool* isnull, Buffer buff); -extern Tuple heapam_form_tuple(TupleDesc tuple_descriptor, Datum* values, bool* isnull); -extern void heapam_deform_tuple(Tuple tuple, TupleDesc tuple_desc, Datum* values, bool* isnull); +extern Tuple heap_slot_get_tuple_from_slot(TupleTableSlot *slot); +extern Datum heapam_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool *isnull, Buffer buff); +extern Tuple heapam_form_tuple(TupleDesc tuple_descriptor, Datum *values, bool *isnull); +extern void heapam_deform_tuple(Tuple tuple, TupleDesc tuple_desc, Datum *values, bool *isnull); extern void heapam_deform_tuple2(Tuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull, Buffer buffer); -extern void heapam_deform_cmprs_tuple(Tuple tuple, TupleDesc tuple_desc, Datum* values, bool* isnull, char* cmprs_info); -extern void heapam_fill_tuple(TupleDesc tuple_desc, Datum* values, const bool* isnull, char* data, Size data_size, uint16* infomask, bits8* bit); -extern Tuple heapam_modify_tuple(Tuple tuple, TupleDesc tuple_desc, Datum* repl_values, const bool* repl_isnull, const bool* do_replace); +extern void heapam_deform_cmprs_tuple(Tuple tuple, TupleDesc tuple_desc, Datum *values, bool *isnull, char *cmprs_info); +extern void heapam_fill_tuple(TupleDesc tuple_desc, Datum *values, const bool *isnull, char *data, Size data_size, + uint16 *infomask, bits8 *bit); +extern Tuple heapam_modify_tuple(Tuple tuple, TupleDesc tuple_desc, Datum *repl_values, const bool *repl_isnull, + const bool *do_replace); extern bool heapam_attisnull(Tuple tup, int attnum, TupleDesc tuple_desc); extern Tuple heapam_copytuple(Tuple tuple); diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h index b7a066230..42b8146dc 100644 --- a/src/include/access/tupdesc.h +++ b/src/include/access/tupdesc.h @@ -170,7 +170,8 @@ typedef struct tupleDesc { extern TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid, const TableAmRoutine* tam_ops = TableAmHeap); -extern TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute* attrs, const TableAmRoutine* tam_ops = TableAmHeap); +extern TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute *attrs, + const TableAmRoutine *tam_ops = TableAmHeap); extern TupleDesc CreateTupleDescCopy(TupleDesc tupdesc); diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql index cb4e042cc..cbab56539 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_844.sql @@ -622,3 +622,4 @@ BEGIN LANGUAGE 'plpgsql' NOT FENCED; END IF; END$DO$; + diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql index cb4e042cc..cbab56539 100644 --- a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_844.sql @@ -622,3 +622,4 @@ BEGIN LANGUAGE 'plpgsql' NOT FENCED; END IF; END$DO$; + diff --git a/src/include/ddes/dms/dms_api.h b/src/include/ddes/dms/dms_api.h index 1a9f0a38b..0be392aa5 100644 --- a/src/include/ddes/dms/dms_api.h +++ b/src/include/ddes/dms/dms_api.h @@ -431,7 +431,6 @@ typedef enum en_dms_wait_event { DMS_EVT_LATCH_X_REMOTE, DMS_EVT_LATCH_S_REMOTE, - DMS_EVT_COUNT, } dms_wait_event_t; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 6e5463e39..00c435c6d 100755 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -193,14 +193,15 @@ extern TupleHashEntry FindTupleHashEntry( /* * prototypes from functions in execJunk.c */ -extern JunkFilter* ExecInitJunkFilter(List* targetList, bool hasoid, TupleTableSlot* slot, const TableAmRoutine* tam_ops); -extern void ExecInitJunkAttr(EState* estate, CmdType operation, List* targetlist, ResultRelInfo* result_rel_info); -extern JunkFilter* ExecInitJunkFilterConversion(List* targetList, TupleDesc cleanTupType, TupleTableSlot* slot); -extern AttrNumber ExecFindJunkAttribute(JunkFilter* junkfilter, const char* attrName); -extern AttrNumber ExecFindJunkAttributeInTlist(List* targetlist, const char* attrName); -extern Datum ExecGetJunkAttribute(TupleTableSlot* slot, AttrNumber attno, bool* isNull); -extern TupleTableSlot* ExecFilterJunk(JunkFilter* junkfilter, TupleTableSlot* slot); -extern void ExecSetjunkFilteDescriptor(JunkFilter* junkfilter, TupleDesc tupdesc); +extern JunkFilter *ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot, + const TableAmRoutine *tam_ops); +extern void ExecInitJunkAttr(EState *estate, CmdType operation, List *targetlist, ResultRelInfo *result_rel_info); +extern JunkFilter *ExecInitJunkFilterConversion(List *targetList, TupleDesc cleanTupType, TupleTableSlot *slot); +extern AttrNumber ExecFindJunkAttribute(JunkFilter *junkfilter, const char *attrName); +extern AttrNumber ExecFindJunkAttributeInTlist(List *targetlist, const char *attrName); +extern Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull); +extern TupleTableSlot *ExecFilterJunk(JunkFilter *junkfilter, TupleTableSlot *slot); +extern void ExecSetjunkFilteDescriptor(JunkFilter *junkfilter, TupleDesc tupdesc); #ifdef PGXC extern List* ExecFindJunkPrimaryKeys(List* targetlist); @@ -290,17 +291,16 @@ static inline TupleTableSlot *ExecProcNode(PlanState *node) node->ps_rownum++; return result; } -#else /*ENABLE_MULTIPLE_NODES*/ +#else /* ENABLE_MULTIPLE_NODES */ static inline TupleTableSlot *ExecProcNode(PlanState *node) { - //TODO: FIX ENABLE_MULTIPLE_NODES return NULL; } -#endif /*ENABLE_MULTIPLE_NODES*/ +#endif /* ENABLE_MULTIPLE_NODES */ -#endif /*FRONTEND*/ +#endif /* FRONTEND */ /* @@ -333,17 +333,18 @@ extern bool is_huge_clob(Oid type_oid, bool is_null, Datum value); /* * prototypes from functions in execTuples.c */ -extern void ExecInitResultTupleSlot(EState* estate, PlanState* planstate, const TableAmRoutine* tam_ops = TableAmHeap); -extern void ExecInitScanTupleSlot(EState* estate, ScanState* scanstate, const TableAmRoutine* tam_ops = TableAmHeap); -extern TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, const TableAmRoutine* tam_ops = TableAmHeap); -extern TupleTableSlot* ExecInitNullTupleSlot(EState* estate, TupleDesc tupType); -extern TupleDesc ExecTypeFromTL(List* targetList, bool hasoid, bool markdropped = false, const TableAmRoutine* tam_ops = TableAmHeap); -extern TupleDesc ExecCleanTypeFromTL(List* targetList, bool hasoid, const TableAmRoutine* tam_ops = TableAmHeap); -extern TupleDesc ExecTypeFromExprList(List* exprList, List* namesList, const TableAmRoutine* tam_ops = TableAmHeap); -extern void UpdateChangedParamSet(PlanState* node, Bitmapset* newchg); -extern void InitOutputValues(RightRefState* refState, GenericExprState* targetArr[], - Datum* values, bool* isnull, int targetCount, bool* hasExecs); -extern void SortTargetListAsArray(RightRefState* refState, List* targetList, GenericExprState* targetArr[]); +extern void ExecInitResultTupleSlot(EState *estate, PlanState *planstate, const TableAmRoutine *tam_ops = TableAmHeap); +extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate, const TableAmRoutine *tam_ops = TableAmHeap); +extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate, const TableAmRoutine *tam_ops = TableAmHeap); +extern TupleTableSlot *ExecInitNullTupleSlot(EState *estate, TupleDesc tupType); +extern TupleDesc ExecTypeFromTL(List *targetList, bool hasoid, bool markdropped = false, + const TableAmRoutine *tam_ops = TableAmHeap); +extern TupleDesc ExecCleanTypeFromTL(List *targetList, bool hasoid, const TableAmRoutine *tam_ops = TableAmHeap); +extern TupleDesc ExecTypeFromExprList(List *exprList, List *namesList, const TableAmRoutine *tam_ops = TableAmHeap); +extern void UpdateChangedParamSet(PlanState *node, Bitmapset *newchg); +extern void InitOutputValues(RightRefState *refState, GenericExprState *targetArr[], Datum *values, bool *isnull, + int targetCount, bool *hasExecs); +extern void SortTargetListAsArray(RightRefState *refState, List *targetList, GenericExprState *targetArr[]); typedef struct TupOutputState { TupleTableSlot* slot; diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 437541893..170e97506 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -128,9 +128,7 @@ #define TTS_FLAG_SLOW (1 << 4) #define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0) -/* - * openGauss flags - */ +/* openGauss flags */ /* should pfree should pfree tts_dataRow? */ #define TTS_FLAG_SHOULDFREE_ROW (1 << 12) @@ -165,7 +163,6 @@ typedef struct TupleTableSlot { Oid tts_xcnodeoid; /* Oid of node from where the datarow is fetched */ MemoryContext tts_per_tuple_mcxt; #endif - } TupleTableSlot; #define TTS_HAS_PHYSICAL_TUPLE(slot) ((slot)->tts_tuple != NULL && (slot)->tts_tuple != &((slot)->tts_minhdr)) diff --git a/src/include/instruments/gs_stack.h b/src/include/instruments/gs_stack.h index b9e03b8f1..ee632c58b 100644 --- a/src/include/instruments/gs_stack.h +++ b/src/include/instruments/gs_stack.h @@ -36,6 +36,7 @@ extern void print_stack(SIGNAL_ARGS); void InitGsStack(); void get_stack_and_write_result(); void check_and_process_gs_stack(); +void get_stack_according_to_tid(ThreadId tid, StringInfoData* call_stack); NON_EXEC_STATIC void stack_perf_main(); #endif diff --git a/src/include/knl/knl_thread.h b/src/include/knl/knl_thread.h index 91053cb90..2045261e2 100755 --- a/src/include/knl/knl_thread.h +++ b/src/include/knl/knl_thread.h @@ -2823,7 +2823,8 @@ typedef enum { OLD_REPL_CHANGE_IP_OR_PORT, ADD_REPL_CONN_INFO_WITH_OLD_LOCAL_IP_PORT, ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT, - ADD_DISASTER_RECOVERY_INFO + ADD_DISASTER_RECOVERY_INFO, + REMOVE_DISASTER_RECOVERY_INFO, } ReplConnInfoChangeType; diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 50a3bc216..f745b1a94 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -146,7 +146,8 @@ extern bool contain_backend_version(uint32 version_number); ((u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_SET_SESSION_TRANSACTION) && \ u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) #define ENABLE_SET_VARIABLES (u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_SET_VARIABLES) -#define USE_DEFAULT_COLLATION (u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_DEFAULT_COLLATION) +#define USE_DEFAULT_COLLATION (u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_DEFAULT_COLLATION && \ + t_thrd.proc->workingVersionNum >= CHARACTER_SET_VERSION_NUM && u_sess->attr.attr_common.upgrade_mode == 0) #define ENABLE_MODIFY_COLUMN \ ((u_sess->utils_cxt.b_format_behavior_compat_flags & B_FORMAT_OPT_ENABLE_MODIFY_COLUMN) && \ u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index f497ac136..fb1d9311f 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -2309,7 +2309,7 @@ typedef struct HashJoinState { bool hj_OuterNotEmpty; bool hj_streamBothSides; bool hj_rebuildHashtable; - List* hj_hash_collations; /* list of collations OIDs */ + List* hj_hashCollations; /* list of collations OIDs */ } HashJoinState; /* ---------------------------------------------------------------- diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index 79678735c..b27b9f5cb 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -271,10 +271,10 @@ struct ParseState { */ List* p_updateRangeVars; /* For multiple-update, use relationClase to generate RangeVar list. */ - RightRefState* rightRefState; + RightRefState* rightRefState; /* - * whether to record the columns referenced by the ORDER BY statement + * whether to record the columns referenced by the ORDER BY statement * when transforming the SortClause. */ bool shouldCheckOrderbyCol; diff --git a/src/include/storage/buf/buf_internals.h b/src/include/storage/buf/buf_internals.h index f42d14a16..b5a2d24b6 100644 --- a/src/include/storage/buf/buf_internals.h +++ b/src/include/storage/buf/buf_internals.h @@ -245,7 +245,6 @@ typedef struct BufferDesc { * platform with either 32 or 128 byte line sizes, it's good to align to * boundaries and avoid false sharing. */ -//#define BUFFERDESC_PAD_TO_SIZE (SIZEOF_VOID_P == 8 ? 128 : 1) #define BUFFERDESC_PAD_TO_SIZE (SIZEOF_VOID_P == 8 ? 64 : 1) typedef union BufferDescPadded { diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 96caef04f..b4c772915 100755 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -353,6 +353,9 @@ typedef struct PGXACT { /* the offset of the last padding if exists*/ #define PROC_HDR_PAD_OFFSET 112 +/* max number of CMA's connections */ +#define NUM_CMAGENT_PROCS (10) + /* * There is one ProcGlobal struct for the whole database cluster. */ @@ -373,6 +376,8 @@ typedef struct PROC_HDR { PGPROC* autovacFreeProcs; /* Head of list of cm agent's free PGPROC structures */ PGPROC* cmAgentFreeProcs; + /* Head of list of cm agent's all PGPROC structures */ + PGPROC* cmAgentAllProcs[NUM_CMAGENT_PROCS]; /* Head of list of pg_job's free PGPROC structures */ PGPROC* pgjobfreeProcs; /* Head of list of bgworker free PGPROC structures */ @@ -435,8 +440,6 @@ const int MAX_COMPACTION_THREAD_NUM = 10; #define NUM_AUXILIARY_PROCS (NUM_SINGLE_AUX_PROC + NUM_MULTI_AUX_PROC) -/* max number of CMA's connections */ -#define NUM_CMAGENT_PROCS (10) /* buffer length of information when no free proc available for cm_agent */ #define CONNINFOLEN (64) diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h index e6b1bfe12..9d9797b84 100644 --- a/src/include/tcop/utility.h +++ b/src/include/tcop/utility.h @@ -49,7 +49,7 @@ typedef struct processutility_context { } processutility_context; /* Hook for plugins to get control in ProcessUtility() */ -typedef void (*ProcessUtility_hook_type)(processutility_context* processutility_cxt, +typedef void (*ProcessUtility_hook_type)(processutility_context* processutility_cxt, DestReceiver* dest, #ifdef PGXC bool sentToRemote, diff --git a/src/include/utils/numeric_gs.h b/src/include/utils/numeric_gs.h index 88d8dbf1d..c3211c869 100644 --- a/src/include/utils/numeric_gs.h +++ b/src/include/utils/numeric_gs.h @@ -95,17 +95,19 @@ */ #define NUMERIC_DSCALE_MASK 0x3FFF -#define NUMERIC_SIGN(n) \ - (NUMERIC_HEADER_IS_SHORT(n) ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) \ - : NUMERIC_FLAGBITS(n)) -#define NUMERIC_DSCALE(n) \ - (NUMERIC_HEADER_IS_SHORT((n)) ? ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT \ - : ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) -#define NUMERIC_WEIGHT(n) \ - (NUMERIC_HEADER_IS_SHORT((n)) \ - ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ - ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) \ - : ((n)->choice.n_long.n_weight)) +#define NUMERIC_SIGN(n) \ + (NUMERIC_HEADER_IS_SHORT(n) \ + ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) \ + : NUMERIC_FLAGBITS(n)) +#define NUMERIC_DSCALE(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT \ + : ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) +#define NUMERIC_WEIGHT(n) \ + (NUMERIC_HEADER_IS_SHORT((n)) \ + ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ + ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) \ + : ((n)->choice.n_long.n_weight)) #define NUMERIC_DIGITS(num) (NUMERIC_HEADER_IS_SHORT(num) ? (num)->choice.n_short.n_data : (num)->choice.n_long.n_data) #define NUMERIC_NDIGITS(num) ((VARSIZE(num) - NUMERIC_HEADER_SIZE(num)) / sizeof(NumericDigit)) diff --git a/src/include/utils/partitionmap_gs.h b/src/include/utils/partitionmap_gs.h index dc50a1c65..ea893f9b6 100755 --- a/src/include/utils/partitionmap_gs.h +++ b/src/include/utils/partitionmap_gs.h @@ -263,32 +263,32 @@ typedef struct HashPartitionMap { } \ } while (0) -#define partitionRoutingForValueEqual(rel, keyValue, valueLen, topClosed, result) \ - do { \ - (keyValue) = transformConstIntoPartkeyType(((rel)->rd_att->attrs), GetPartitionKey((rel)->partMap), (keyValue), \ - (valueLen)); \ - if ((rel)->partMap->type == PART_TYPE_LIST) { \ - (result)->partArea = PART_AREA_LIST; \ - (result)->partitionId = \ - getListPartitionOid(((rel)->partMap), (keyValue), (valueLen), &((result)->partSeq), topClosed); \ - if ((result)->partSeq < 0) { \ - (result)->fileExist = false; \ - } else { \ - (result)->fileExist = true; \ - } \ - } else if ((rel)->partMap->type == PART_TYPE_HASH) { \ - (result)->partArea = PART_AREA_HASH; \ - (result)->partitionId = \ - getHashPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \ - if ((result)->partSeq < 0) { \ - (result)->fileExist = false; \ - } else { \ - (result)->fileExist = true; \ - } \ - } else { \ - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), \ - errmsg("Unsupported partition strategy:%d", (rel)->partMap->type))); \ - } \ +#define partitionRoutingForValueEqual(rel, keyValue, valueLen, topClosed, result) \ + do { \ + (keyValue) = transformConstIntoPartkeyType(((rel)->rd_att->attrs), GetPartitionKey((rel)->partMap), \ + (keyValue), (valueLen)); \ + if ((rel)->partMap->type == PART_TYPE_LIST) { \ + (result)->partArea = PART_AREA_LIST; \ + (result)->partitionId = \ + getListPartitionOid(((rel)->partMap), (keyValue), (valueLen), &((result)->partSeq), topClosed); \ + if ((result)->partSeq < 0) { \ + (result)->fileExist = false; \ + } else { \ + (result)->fileExist = true; \ + } \ + } else if ((rel)->partMap->type == PART_TYPE_HASH) { \ + (result)->partArea = PART_AREA_HASH; \ + (result)->partitionId = \ + getHashPartitionOid(((rel)->partMap), (keyValue), &((result)->partSeq), topClosed); \ + if ((result)->partSeq < 0) { \ + (result)->fileExist = false; \ + } else { \ + (result)->fileExist = true; \ + } \ + } else { \ + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), \ + errmsg("Unsupported partition strategy:%d", (rel)->partMap->type))); \ + } \ } while (0) typedef enum PruningResultState { PRUNING_RESULT_EMPTY, PRUNING_RESULT_SUBSET, PRUNING_RESULT_FULL } PruningResultState; diff --git a/src/include/utils/plpgsql.h b/src/include/utils/plpgsql.h index f063d6686..e329af9bf 100644 --- a/src/include/utils/plpgsql.h +++ b/src/include/utils/plpgsql.h @@ -1590,7 +1590,7 @@ extern PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQL_variable* plpgsql_build_varrayType(const char* refname, int lineno, PLpgSQL_type* dtype, bool add2namespace); PLpgSQL_variable* plpgsql_build_tableType(const char* refname, int lineno, PLpgSQL_type* dtype, bool add2namespace); extern PLpgSQL_rec_type* plpgsql_build_rec_type(const char* typname, int lineno, List* list, bool add2namespace); -extern PLpgSQL_rec* plpgsql_build_record(const char* refname, int lineno, bool add2namespace); +extern PLpgSQL_rec* plpgsql_build_record(const char* refname, int lineno, bool add2namespace, TupleDesc tupleDesc); extern int plpgsql_recognize_err_condition(const char* condname, bool allow_sqlstate); extern PLpgSQL_condition* plpgsql_parse_err_condition(char* condname); extern PLpgSQL_condition* plpgsql_parse_err_condition_b(const char* condname); diff --git a/src/test/regress/expected/alter_table_000.out b/src/test/regress/expected/alter_table_000.out index 73c2a85d6..7f6c12dae 100644 --- a/src/test/regress/expected/alter_table_000.out +++ b/src/test/regress/expected/alter_table_000.out @@ -49,7 +49,7 @@ column_clause | ENCRYPTION KEY ROTATION | AUTO_INCREMENT [ = ] value | ALTER INDEX index_name [ VISBLE | INVISIBLE ] - | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ] + | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ] | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ] NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database! NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database! diff --git a/src/test/regress/expected/alter_table_modify_ustore.out b/src/test/regress/expected/alter_table_modify_ustore.out index ff30fd730..7ea642a15 100644 --- a/src/test/regress/expected/alter_table_modify_ustore.out +++ b/src/test/regress/expected/alter_table_modify_ustore.out @@ -992,19 +992,19 @@ DETAIL: modify or change a column used by materialized view or rule is not supp DROP RULE test_at_modify_rule ON test_at_modify_depend; -- --RLSPOLICY reference column. DROP TABLE test_at_modify_depend; -CREATE ROLE at_modify_role PASSWORD 'Gauss@123'; +CREATE ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; CREATE TABLE test_at_modify_depend( a int, b int NOT NULL ) WITH(STORAGE_TYPE=USTORE); INSERT INTO test_at_modify_depend VALUES(0,0); -GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role; -GRANT SELECT ON test_at_modify_depend TO at_modify_role; +GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role_ustore; +GRANT SELECT ON test_at_modify_depend TO at_modify_role_ustore; ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY; -CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20); +CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role_ustore USING(b >= 20); ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; INSERT INTO test_at_modify_depend VALUES(21,21); -SET ROLE at_modify_role PASSWORD 'Gauss@123'; +SET ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; SELECT * FROM test_at_modify_depend ORDER BY 1,2; a | b ----+---- @@ -1022,7 +1022,7 @@ SELECT * FROM test_at_modify_depend ORDER BY 1,2; ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null; ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; INSERT INTO test_at_modify_depend VALUES(22,22); -SET ROLE at_modify_role PASSWORD 'Gauss@123'; +SET ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; SELECT * FROM test_at_modify_depend ORDER BY 1,2; a | b ----+---- @@ -1039,8 +1039,8 @@ SELECT * FROM test_at_modify_depend ORDER BY 1,2; (3 rows) DROP TABLE test_at_modify_depend; -REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role; -DROP ROLE at_modify_role; +REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role_ustore; +DROP ROLE at_modify_role_ustore; -- ------------------------------------------------------ test ALTER TABLE CHANGE -- test change column without data CREATE TABLE test_at_change( @@ -1165,8 +1165,8 @@ select pg_get_tabledef('test_at_change'::regclass); CONSTRAINT test_at_change_b1_check CHECK (((b)::text < 'a'::text)) + ) + WITH (orientation=row, storage_type=ustore, compression=no); + - ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_b_key UNIQUE USING ubtree (b) WITH (storage_type=ustore); + - ALTER TABLE test_at_change ADD CONSTRAINT test_at_change_pkey PRIMARY KEY USING ubtree (b) WITH (storage_type=ustore); +--?.* +--?.* (1 row) INSERT INTO test_at_change VALUES(1,1); @@ -1866,19 +1866,19 @@ DETAIL: modify or change a column used by materialized view or rule is not supp DROP RULE test_at_change_rule ON test_at_change_depend; -- --RLSPOLICY reference column. DROP TABLE test_at_change_depend; -CREATE ROLE at_change_role PASSWORD 'Gauss@123'; +CREATE ROLE at_change_ustore_role PASSWORD 'Gauss@123'; CREATE TABLE test_at_change_depend( a int, b int NOT NULL ) WITH(STORAGE_TYPE=USTORE); INSERT INTO test_at_change_depend VALUES(0,0); -GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_change_role; -GRANT SELECT ON test_at_change_depend TO at_change_role; +GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_change_ustore_role; +GRANT SELECT ON test_at_change_depend TO at_change_ustore_role; ALTER TABLE test_at_change_depend ENABLE ROW LEVEL SECURITY; -CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_role USING(b >= 20); +CREATE ROW LEVEL SECURITY POLICY test_at_change_rls ON test_at_change_depend AS RESTRICTIVE FOR SELECT TO at_change_ustore_role USING(b >= 20); ALTER TABLE test_at_change_depend CHANGE COLUMN b b1 int not null; INSERT INTO test_at_change_depend VALUES(21,21); -SET ROLE at_change_role PASSWORD 'Gauss@123'; +SET ROLE at_change_ustore_role PASSWORD 'Gauss@123'; SELECT * FROM test_at_change_depend ORDER BY 1,2; a | b1 ----+---- @@ -1896,7 +1896,7 @@ SELECT * FROM test_at_change_depend ORDER BY 1,2; ALTER TABLE test_at_change_depend CHANGE COLUMN b1 b2 bool not null; ALTER TABLE test_at_change_depend CHANGE COLUMN b2 b3 int not null; INSERT INTO test_at_change_depend VALUES(22,22); -SET ROLE at_change_role PASSWORD 'Gauss@123'; +SET ROLE at_change_ustore_role PASSWORD 'Gauss@123'; SELECT * FROM test_at_change_depend ORDER BY 1,2; a | b3 ----+---- @@ -1913,8 +1913,8 @@ SELECT * FROM test_at_change_depend ORDER BY 1,2; (3 rows) DROP TABLE test_at_change_depend; -REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_change_role; -DROP ROLE at_change_role; +REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_change_ustore_role; +DROP ROLE at_change_ustore_role; -- test alter command order CREATE TABLE test_at_pass( a int, diff --git a/src/test/regress/expected/event.out b/src/test/regress/expected/event.out index dff252e43..10eb27285 100644 --- a/src/test/regress/expected/event.out +++ b/src/test/regress/expected/event.out @@ -845,6 +845,45 @@ show events where job_name='e1'; (1 row) drop event if exists e1; +--security check +drop user if exists event_se_a cascade; +NOTICE: role "event_se_a" does not exist, skipping +drop user if exists event_se_b cascade; +NOTICE: role "event_se_b" does not exist, skipping +drop user if exists event_se_c cascade; +NOTICE: role "event_se_c" does not exist, skipping +drop user if exists event_se_d cascade; +NOTICE: role "event_se_d" does not exist, skipping +create user event_se_a with MONADMIN password 'event_123'; +create user event_se_b with OPRADMIN password 'event_123'; +create user event_se_c with INDEPENDENT password 'event_123'; +WARNING: Please carefully use independent user as it need more self-management. +HINT: Self-management include logical backup, password manage and so on. +create user event_se_d with SYSADMIN password 'event_123'; +drop event if exists e; +NOTICE: event "e" is not exists, skipping +create definer=event_se_a event e on schedule at sysdate do select 1; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +create definer=event_se_b event e on schedule at sysdate do select 1; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +create definer=event_se_c event e on schedule at sysdate do select 1; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +create definer=event_se_d event e on schedule at sysdate do select 1; +drop event if exists e; +create event e on schedule at sysdate disable do select 1; +alter definer=event_se_a event e; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +alter definer=event_se_b event e; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +alter definer=event_se_c event e; +ERROR: definer_name cannot be specified as a private user, operator admin, or monitoradmin. +alter definer=event_se_d event e; +drop event if exists e; +\c event_b +drop user if exists event_se_a cascade; +drop user if exists event_se_b cascade; +drop user if exists event_se_c cascade; +drop user if exists event_se_d cascade; --test sql help drop table if exists event_a.a; NOTICE: table "a" does not exist, skipping diff --git a/src/test/regress/expected/hw_subpartition_createtable.out b/src/test/regress/expected/hw_subpartition_createtable.out index a516dc261..2b2c6651c 100644 --- a/src/test/regress/expected/hw_subpartition_createtable.out +++ b/src/test/regress/expected/hw_subpartition_createtable.out @@ -2292,7 +2292,7 @@ CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name [, ... ] ) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ][ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ COMPRESS | NOCOMPRESS ] [ TABLESPACE tablespace_name ] diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out index 0b27032c6..7c071a89c 100644 --- a/src/test/regress/expected/plpgsql_cursor_rowtype.out +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -640,6 +640,32 @@ INFO: cursor alread closed drop procedure pro_cs_trans_1; drop table cs_trans_1; +-- test for rec in cursor loop +show behavior_compat_options; + behavior_compat_options +------------------------------- + allow_procedure_compile_check +(1 row) + +create table test_table(col1 varchar2(10)); +create or replace package test_pckg as + procedure test_proc(v01 in varchar2); +end test_pckg; +/ +create or replace package body test_pckg as + procedure test_proc(v01 in varchar2) as + cursor cur(vcol1 varchar2) is select col1 from test_table where col1 = vcol1; + v02 varchar2; + begin + for rec in cur(v01) loop + v02 := 'a'; + end loop; + end; +end test_pckg; +/ +drop table test_table; +drop package test_pckg; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.test_proc(character varying) -- test for rec in select loop when rec is defined set behavior_compat_options='proc_implicit_for_loop_variable'; create table t1(a int, b int); @@ -780,6 +806,45 @@ drop table t1; drop table t2; set behavior_compat_options=''; set plsql_compile_check_options=''; +create or replace procedure check_compile() as +declare + cursor c1 is select sysdate a; + v_a varchar2; +begin + for rec in c1 loop + select 'aa' into v_a from sys_dummy where sysdate = rec.a; + raise info '%' ,v_a; + end loop; +end; +/ +call check_compile(); +INFO: aa + check_compile +--------------- + +(1 row) + +set behavior_compat_options='allow_procedure_compile_check'; +create or replace procedure check_compile_1() as +declare + cursor c1 is select sysdate a; + v_a varchar2; +begin + for rec in c1 loop + select 'aa' into v_a from sys_dummy where sysdate = rec.a; + raise info '%' ,v_a; + end loop; +end; +/ +call check_compile_1(); +INFO: aa + check_compile_1 +----------------- + +(1 row) + +set behavior_compat_options=''; +drop procedure check_compile; ---- clean ---- drop package pck1; NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() @@ -809,7 +874,7 @@ NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() drop package pckg_test2; NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() drop schema plpgsql_cursor_rowtype cascade; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 13 other objects DETAIL: drop cascades to table emp drop cascades to function pro_cursor_no_args_1() drop cascades to function pro_cursor_no_args_2() @@ -822,5 +887,6 @@ drop cascades to table for_loop_test_002 drop cascades to function test_forloop_001() drop cascades to function pro_close_cursor1() drop cascades to function pro_close_cursor2() +drop cascades to function check_compile_1() drop schema schema1 cascade; NOTICE: drop cascades to table schema1.t11 diff --git a/src/test/regress/expected/plsql_show_all_error.out b/src/test/regress/expected/plsql_show_all_error.out index 8145f4f6b..1ace0ffe9 100644 --- a/src/test/regress/expected/plsql_show_all_error.out +++ b/src/test/regress/expected/plsql_show_all_error.out @@ -1673,6 +1673,112 @@ drop procedure pro_tblof_pro_013_1(); ERROR: function pro_tblof_pro_013_1 does not exist drop type pro_tblof_013; drop table pro_tblof_tbl_013; +set behavior_compat_options = 'allow_procedure_compile_check'; +create or replace procedure pro19 +as +a int; +begin + select c1 from t1 into ; + select c1 from t1 limit 1 a; + select c1 from t1 where c2=1 intoo a; + select c1 from t1 into b; +end; +/ +NOTICE: syntax error at or near ";" +LINE 4: select c1 from t1 into ; + ^ +QUERY: DECLARE +a int; +begin + select c1 from t1 into ; + select c1 from t1 limit 1 a; + select c1 from t1 where c2=1 intoo a; + select c1 from t1 into b; +end +ERROR: relation "t1" does not exist +LINE 4: select c1 from t1 into ; + ^ +DETAIL: +QUERY: DECLARE +a int; +begin + select c1 from t1 into ; + select c1 from t1 limit 1 a; + select c1 from t1 where c2=1 intoo a; + select c1 from t1 into b; +end +create or replace procedure pro20 +as +a int; +begin + select into a from t1.c1; + select 1 into ab; +end; +/ +NOTICE: syntax error at or near "from" +LINE 4: select into a from t1.c1; + ^ +QUERY: DECLARE +a int; +begin + select into a from t1.c1; + select 1 into ab; +end +NOTICE: syntax error at or near "from" +LINE 2: a int; + ^ +QUERY: DECLARE +a int; +begin + select into a from t1.c1; + select 1 into ab; +end +ERROR: compile failed when parse the query: select from t1.c1 +DETAIL: unexpected null parsetree list +CONTEXT: compilation of PL/pgSQL function "pro20" near line 4 +create or replace procedure pro21 +as +a int; +begin + select 1 into ab; + select 1 intoo a; + dbe_output.print_line('a is:'||a); +end; +/ +NOTICE: "ab" is not a known variable +LINE 4: select 1 into ab; + ^ +QUERY: DECLARE +a int; +begin + select 1 into ab; + select 1 intoo a; + dbe_output.print_line('a is:'||a); +end +ERROR: Invalid use of identifiers. +LINE 5: select 1 intoo a; + ^ +DETAIL: Syntax error found near token "intoo" +QUERY: DECLARE +a int; +begin + select 1 into ab; + select 1 intoo a; + dbe_output.print_line('a is:'||a); +end +create or replace procedure pro22 +as +a int; +b char(1); +begin + b=:1; + select 1 into a; + dbe_output.print_line('a is:'||a); +end; +/ +ERROR: schema "dbe_output" does not exist +CONTEXT: compilation of PL/pgSQL function "pro22" near line 6 +set behavior_compat_options = ''; drop package if exists package_020; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function public.pro1(integer,integer,character varying) diff --git a/src/test/regress/expected/test_b_format_collate.out b/src/test/regress/expected/test_b_format_collate.out index 708bdd196..05bd77ccc 100644 --- a/src/test/regress/expected/test_b_format_collate.out +++ b/src/test/regress/expected/test_b_format_collate.out @@ -48,6 +48,24 @@ DETAIL: this collation only support in B-format database create unique index idx_2 on t1(a collate "utf8mb4_unicode_ci"); ERROR: Un-support feature DETAIL: this collation only support in B-format database +create table hashjoin1(id int, f1 text, f2 text); +create table hashjoin2(id int, f3 text, f4 text); + +insert into hashjoin1 select generate_series(1,100), 'a', 'a'; +insert into hashjoin2 select generate_series(1,100), 'a', 'a'; + +select f1, f3 from hashjoin1 as h1 inner join hashjoin2 as h2 +on (h1.f2 = h2.f4) +where (('a','a') in (select h1.f2, h2.f4 +from (hashjoin1 inner join hashjoin2 on hashjoin1.id = hashjoin2.id) order by 1, 2)) +group by h1.f1, h2.f3 +order by 1,2 limit 10; + f1 | f3 +----+---- + a | a +(1 row) + +drop table if exists hashjoin1, hashjoin2; -- test binary drop table if exists t1; create table t1(a blob collate binary); @@ -880,6 +898,24 @@ select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f S | S (1 row) +create table hashjoin1(id int, f1 text, f2 text) collate 'utf8mb4_bin'; +create table hashjoin2(id int, f3 text, f4 text) collate 'utf8mb4_bin'; + +insert into hashjoin1 select generate_series(1,100), 'a', 'a'; +insert into hashjoin2 select generate_series(1,100), 'a', 'a'; + +select f1, f3 from hashjoin1 as h1 inner join hashjoin2 as h2 +on (h1.f2 = h2.f4) +where (('a','a') in (select h1.f2, h2.f4 +from (hashjoin1 inner join hashjoin2 on hashjoin1.id = hashjoin2.id) order by 1, 2)) +group by h1.f1, h2.f3 +order by 1,2 limit 10; + f1 | f3 +----+---- + a | a +(1 row) + +drop table if exists hashjoin1, hashjoin2; -- test nestloop set enable_hashjoin=off; set enable_nestloop=on; diff --git a/src/test/regress/expected/test_ustore_index.out b/src/test/regress/expected/test_ustore_index.out index 8f3d5a2d3..7587a4d87 100644 --- a/src/test/regress/expected/test_ustore_index.out +++ b/src/test/regress/expected/test_ustore_index.out @@ -609,4 +609,48 @@ select /*+ indexscan(t) */ * from t where a = 1; (1 row) set ustore_attr="index_trace_level=no;enable_log_tuple=off"; -drop table t; \ No newline at end of file +drop table t; +-- test ustore querying with bitmapindexscan when updated in the same transaction +set enable_indexscan to off; +set enable_indexonlyscan to off; +set enable_seqscan to off; +set enable_bitmapscan to on; +drop table if exists test; +NOTICE: table "test" does not exist, skipping +create table test(a int); +create index test_idx on test(a); +insert into test values(2); +insert into test values(2); +insert into test values(1); +insert into test values(1); +begin; +declare c1 cursor for select a from test where a = 2; +update test set a = 2; +fetch next from c1; + a +--- + 2 +(1 row) + +fetch next from c1; + a +--- + 2 +(1 row) + +fetch next from c1; + a +--- +(0 rows) + +fetch next from c1; + a +--- +(0 rows) + +rollback; +drop table if exists test; +reset enable_indexscan; +reset enable_indexonlyscan; +reset enable_seqscan; +reset enable_bitmapscan; diff --git a/src/test/regress/input/db4ai_snapshots.source b/src/test/regress/input/db4ai_snapshots.source index e08318142..2b74212f9 100644 --- a/src/test/regress/input/db4ai_snapshots.source +++ b/src/test/regress/input/db4ai_snapshots.source @@ -1,2 +1,10 @@ --run \! sh @abs_srcdir@/snapshots_test/test.sh -r -p @portstring@ -d regression + +drop database if exists test1; +create database test1 dbcompatibility 'b'; +\c test1 +create table t1(id int); +insert into t1 values(1); +create snapshot qwer as select * from t1; +\c regression \ No newline at end of file diff --git a/src/test/regress/input/select_into_file.source b/src/test/regress/input/select_into_file.source index bbbb43c48..4ad06846d 100644 --- a/src/test/regress/input/select_into_file.source +++ b/src/test/regress/input/select_into_file.source @@ -25,6 +25,10 @@ select * from t into outfile '@abs_srcdir@/data/select_into_file.data' FIELDS TE select * from t into dumpfile '@abs_srcdir@/data/select_into_file.data'; select * from t limit 1 into dumpfile '@abs_srcdir@/data/select_into_file.data'; \! cat @abs_srcdir@/data/select_into_file.data +create table test_null(a int, b int, c int); +insert into test_null values(null,null,null); +select * from test_null; +select * from test_null into dumpfile '@abs_srcdir@/data/select_into_file.data'; \c regression; drop database test_select_into_file; \! rm @abs_srcdir@/data/select_into_file.data \ No newline at end of file diff --git a/src/test/regress/output/charset_b_format.source b/src/test/regress/output/charset_b_format.source index 01034c5c7..3ac810b7f 100755 --- a/src/test/regress/output/charset_b_format.source +++ b/src/test/regress/output/charset_b_format.source @@ -415,8 +415,8 @@ Syntax: CREATE SCHEMA [ IF NOT EXISTS ] schema_name [ AUTHORIZATION user_name ] [WITH BLOCKCHAIN] [ schema_element [ ... ] ]; CREATE SCHEMA schema_name - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]; -NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database! + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]; +NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]' is only available in CENTRALIZED mode and B-format database! \h alter schema; Command: ALTER SCHEMA @@ -428,8 +428,8 @@ ALTER SCHEMA schema_name OWNER TO new_owner; ALTER SCHEMA schema_name {WITH | WITHOUT} BLOCKCHAIN; ALTER SCHEMA schema_name - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]; -NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ]' is only available in CENTRALIZED mode and B-format database! + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]; +NOTICE: '[ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ]' is only available in CENTRALIZED mode and B-format database! \h create table; Command: CREATE TABLE @@ -442,7 +442,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI | LIKE source_table [ like_option [...] ] } [, ... ]) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] [ COMPRESS | NOCOMPRESS ] @@ -516,7 +516,7 @@ CREATE TABLE [ IF NOT EXISTS ] partition_table_name [, ... ] ] ) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ][ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ COMPRESS | NOCOMPRESS ] [ TABLESPACE tablespace_name ] @@ -593,7 +593,7 @@ CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name [, ... ] ) [ AUTO_INCREMENT [ = ] value ] - [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ][ [ DEFAULT ] COLLATE [ = ] collation ] + [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ][ [ DEFAULT ] COLLATE [ = ] default_collation ] [ WITH ( {storage_parameter = value} [, ... ] ) ] [ COMPRESS | NOCOMPRESS ] [ TABLESPACE tablespace_name ] @@ -695,7 +695,7 @@ column_clause | ENCRYPTION KEY ROTATION | AUTO_INCREMENT [ = ] value | ALTER INDEX index_name [ VISBLE | INVISIBLE ] - | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] charset ] [ [ DEFAULT ] COLLATE [ = ] collation ] + | [ [ DEFAULT ] CHARACTER SET | CHARSET [ = ] default_charset ] [ [ DEFAULT ] COLLATE [ = ] default_collation ] | CONVERT TO CHARACTER SET | CHARSET charset [ COLLATE collation ] NOTICE: '[ CHARACTER SET | CHARSET ]' is only available in CENTRALIZED mode and B-format database! NOTICE: '[ COLLATE ]' is only available in CENTRALIZED mode and B-format database! diff --git a/src/test/regress/output/db4ai_snapshots.source b/src/test/regress/output/db4ai_snapshots.source index 95f80c8d4..14cf2a75a 100644 --- a/src/test/regress/output/db4ai_snapshots.source +++ b/src/test/regress/output/db4ai_snapshots.source @@ -18,3 +18,16 @@ # RUNNING TEST # case: 14 SQLSyntax.sql ............ PASS ### TEARDOWN ### harness: TeardownTestHarness.sql .. PASS ##### DONE ##### DB4AI Snapshots regression test complete +drop database if exists test1; +NOTICE: database "test1" does not exist, skipping +create database test1 dbcompatibility 'b'; +\c test1 +create table t1(id int); +insert into t1 values(1); +create snapshot qwer as select * from t1; + schema | name +--------+------------ + public | qwer@1.0.0 +(1 row) + +\c regression diff --git a/src/test/regress/output/event_dump_audit.source b/src/test/regress/output/event_dump_audit.source index fd960c507..be73d42c3 100644 --- a/src/test/regress/output/event_dump_audit.source +++ b/src/test/regress/output/event_dump_audit.source @@ -60,7 +60,7 @@ create database restore_event_dump_db with dbcompatibility 'b'; select job_name, job_status,failure_msg from pg_job where dbname='restore_event_dump_db'; job_name | job_status | failure_msg ----------+------------+------------- ---?.* + ea | s | (1 row) \c regression diff --git a/src/test/regress/output/select_into_file.source b/src/test/regress/output/select_into_file.source index 0a1d8fb9e..809fd3d65 100644 --- a/src/test/regress/output/select_into_file.source +++ b/src/test/regress/output/select_into_file.source @@ -48,6 +48,15 @@ select * from t into dumpfile '@abs_srcdir@/data/select_into_file.data'; ERROR: Result consisted of more than one row select * from t limit 1 into dumpfile '@abs_srcdir@/data/select_into_file.data'; \! cat @abs_srcdir@/data/select_into_file.data -1c1text1ENUM101DEADBEEF\\xdeadbeef\c regression; +1c1text1ENUM101DEADBEEF\\xdeadbeefcreate table test_null(a int, b int, c int); +insert into test_null values(null,null,null); +select * from test_null; + a | b | c +---+---+--- + | | +(1 row) + +select * from test_null into dumpfile '@abs_srcdir@/data/select_into_file.data'; +\c regression; drop database test_select_into_file; \! rm @abs_srcdir@/data/select_into_file.data diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index d2ffe2e56..724f901ca 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -649,6 +649,7 @@ test: holdable_cursor test: alter_table_000 alter_table_002 alter_table_003 alter_table_modify #test: alter_table_001 alter_table_modify_ustore test: alter_table_modify_ltt alter_table_modify_gtt +test: alter_table_modify alter_table_modify_ustore alter_table_modify_ltt alter_table_modify_gtt #test: with @@ -1087,4 +1088,3 @@ test: alter_foreign_schema test: slow_sql # test user@host test: user_host_test - diff --git a/src/test/regress/parallel_schedule0U b/src/test/regress/parallel_schedule0U index 27a78231c..7e1f97698 100644 --- a/src/test/regress/parallel_schedule0U +++ b/src/test/regress/parallel_schedule0U @@ -485,6 +485,13 @@ test: char_truncation_common char_truncation_cast #this case is dispatched from schedule10(gin_test) test: gin_test1 gin_test2 gin_test3 +test: db4ai_snapshots +test: db4ai_gd_train_predict +test: db4ai_gd_houses +test: db4ai_gd_snapshots +test: db4ai_gd_pca_train_predict +test: db4ai_kmeans_train_predict +test: db4ai_xgboost_train_predict #the fallowing part is dispatched from schedule15 # FIXME: move me back to the parallel test when the refcnt issue is fixed diff --git a/src/test/regress/pg_regress.cpp b/src/test/regress/pg_regress.cpp index cb5f5e917..507a3f1d5 100644 --- a/src/test/regress/pg_regress.cpp +++ b/src/test/regress/pg_regress.cpp @@ -5461,7 +5461,7 @@ static void check_global_variables() } } -#define BASE_PGXC_LIKE_MACRO_NUM 1417 +#define BASE_PGXC_LIKE_MACRO_NUM 1418 static void check_pgxc_like_macros() { #ifdef BUILD_BY_CMAKE diff --git a/src/test/regress/sql/alter_table_modify_ustore.sql b/src/test/regress/sql/alter_table_modify_ustore.sql index 176d345ea..277ac20cc 100644 --- a/src/test/regress/sql/alter_table_modify_ustore.sql +++ b/src/test/regress/sql/alter_table_modify_ustore.sql @@ -390,32 +390,32 @@ DROP RULE test_at_modify_rule ON test_at_modify_depend; -- --RLSPOLICY reference column. DROP TABLE test_at_modify_depend; -CREATE ROLE at_modify_role PASSWORD 'Gauss@123'; +CREATE ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; CREATE TABLE test_at_modify_depend( a int, b int NOT NULL ) WITH(STORAGE_TYPE=USTORE); INSERT INTO test_at_modify_depend VALUES(0,0); -GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role; -GRANT SELECT ON test_at_modify_depend TO at_modify_role; +GRANT USAGE ON SCHEMA atbdb_ustore_schema TO at_modify_role_ustore; +GRANT SELECT ON test_at_modify_depend TO at_modify_role_ustore; ALTER TABLE test_at_modify_depend ENABLE ROW LEVEL SECURITY; -CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role USING(b >= 20); +CREATE ROW LEVEL SECURITY POLICY test_at_modify_rls ON test_at_modify_depend AS RESTRICTIVE FOR SELECT TO at_modify_role_ustore USING(b >= 20); ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; INSERT INTO test_at_modify_depend VALUES(21,21); -SET ROLE at_modify_role PASSWORD 'Gauss@123'; +SET ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; SELECT * FROM test_at_modify_depend ORDER BY 1,2; RESET ROLE; SELECT * FROM test_at_modify_depend ORDER BY 1,2; ALTER TABLE test_at_modify_depend MODIFY COLUMN b bool not null; ALTER TABLE test_at_modify_depend MODIFY COLUMN b int not null; INSERT INTO test_at_modify_depend VALUES(22,22); -SET ROLE at_modify_role PASSWORD 'Gauss@123'; +SET ROLE at_modify_role_ustore PASSWORD 'Gauss@123'; SELECT * FROM test_at_modify_depend ORDER BY 1,2; RESET ROLE; SELECT * FROM test_at_modify_depend ORDER BY 1,2; DROP TABLE test_at_modify_depend; -REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role; -DROP ROLE at_modify_role; +REVOKE ALL PRIVILEGES ON SCHEMA atbdb_ustore_schema FROM at_modify_role_ustore; +DROP ROLE at_modify_role_ustore; -- ------------------------------------------------------ test ALTER TABLE CHANGE -- test change column without data @@ -1243,4 +1243,4 @@ RESET CURRENT_SCHEMA; DROP SCHEMA atbdb_ustore_schema CASCADE; \c regression clean connection to all force for database atbdb_ustore; -drop database if exists atbdb_ustore; \ No newline at end of file +drop database if exists atbdb_ustore; diff --git a/src/test/regress/sql/event.sql b/src/test/regress/sql/event.sql index b5a2355f0..963e38d74 100644 --- a/src/test/regress/sql/event.sql +++ b/src/test/regress/sql/event.sql @@ -317,6 +317,36 @@ show events like 'e_'; show events where job_name='e1'; drop event if exists e1; +--security check +drop user if exists event_se_a cascade; +drop user if exists event_se_b cascade; +drop user if exists event_se_c cascade; +drop user if exists event_se_d cascade; + +create user event_se_a with MONADMIN password 'event_123'; +create user event_se_b with OPRADMIN password 'event_123'; +create user event_se_c with INDEPENDENT password 'event_123'; +create user event_se_d with SYSADMIN password 'event_123'; +drop event if exists e; +create definer=event_se_a event e on schedule at sysdate do select 1; +create definer=event_se_b event e on schedule at sysdate do select 1; +create definer=event_se_c event e on schedule at sysdate do select 1; +create definer=event_se_d event e on schedule at sysdate do select 1; +drop event if exists e; + +create event e on schedule at sysdate disable do select 1; +alter definer=event_se_a event e; +alter definer=event_se_b event e; +alter definer=event_se_c event e; +alter definer=event_se_d event e; +drop event if exists e; + +\c event_b +drop user if exists event_se_a cascade; +drop user if exists event_se_b cascade; +drop user if exists event_se_c cascade; +drop user if exists event_se_d cascade; + --test sql help drop table if exists event_a.a; diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql index 56e039dd9..9b82a565f 100644 --- a/src/test/regress/sql/plpgsql_cursor_rowtype.sql +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -547,6 +547,29 @@ call pro_cs_trans_1(); drop procedure pro_cs_trans_1; drop table cs_trans_1; +-- test for rec in cursor loop +show behavior_compat_options; +create table test_table(col1 varchar2(10)); +create or replace package test_pckg as + procedure test_proc(v01 in varchar2); +end test_pckg; +/ + + +create or replace package body test_pckg as + procedure test_proc(v01 in varchar2) as + cursor cur(vcol1 varchar2) is select col1 from test_table where col1 = vcol1; + v02 varchar2; + begin + for rec in cur(v01) loop + v02 := 'a'; + end loop; + end; +end test_pckg; +/ +drop table test_table; +drop package test_pckg; + -- test for rec in select loop when rec is defined set behavior_compat_options='proc_implicit_for_loop_variable'; create table t1(a int, b int); @@ -639,6 +662,38 @@ drop table t2; set behavior_compat_options=''; set plsql_compile_check_options=''; +create or replace procedure check_compile() as +declare + cursor c1 is select sysdate a; + v_a varchar2; +begin + for rec in c1 loop + select 'aa' into v_a from sys_dummy where sysdate = rec.a; + raise info '%' ,v_a; + end loop; +end; +/ + +call check_compile(); + +set behavior_compat_options='allow_procedure_compile_check'; +create or replace procedure check_compile_1() as +declare + cursor c1 is select sysdate a; + v_a varchar2; +begin + for rec in c1 loop + select 'aa' into v_a from sys_dummy where sysdate = rec.a; + raise info '%' ,v_a; + end loop; +end; +/ + +call check_compile_1(); +set behavior_compat_options=''; + +drop procedure check_compile; + ---- clean ---- drop package pck1; drop package pck2; diff --git a/src/test/regress/sql/plsql_show_all_error.sql b/src/test/regress/sql/plsql_show_all_error.sql index 45be5d94a..eef6c6045 100644 --- a/src/test/regress/sql/plsql_show_all_error.sql +++ b/src/test/regress/sql/plsql_show_all_error.sql @@ -1098,6 +1098,49 @@ drop procedure pro_tblof_pro_013_1(); drop type pro_tblof_013; drop table pro_tblof_tbl_013; +set behavior_compat_options = 'allow_procedure_compile_check'; +create or replace procedure pro19 +as +a int; +begin + select c1 from t1 into ; + select c1 from t1 limit 1 a; + select c1 from t1 where c2=1 intoo a; + select c1 from t1 into b; +end; +/ + +create or replace procedure pro20 +as +a int; +begin + select into a from t1.c1; + select 1 into ab; +end; +/ + +create or replace procedure pro21 +as +a int; +begin + select 1 into ab; + select 1 intoo a; + dbe_output.print_line('a is:'||a); +end; +/ + +create or replace procedure pro22 +as +a int; +b char(1); +begin + b=:1; + select 1 into a; + dbe_output.print_line('a is:'||a); +end; +/ +set behavior_compat_options = ''; + drop package if exists package_020; drop package if exists z_pk2; drop package if exists aa; diff --git a/src/test/regress/sql/test_b_format_collate.sql b/src/test/regress/sql/test_b_format_collate.sql index 3a467556d..acc282d1a 100644 --- a/src/test/regress/sql/test_b_format_collate.sql +++ b/src/test/regress/sql/test_b_format_collate.sql @@ -15,6 +15,20 @@ create table t1(a text); create index idx_1 on t1(a collate "utf8mb4_unicode_ci"); create unique index idx_2 on t1(a collate "utf8mb4_unicode_ci"); +create table hashjoin1(id int, f1 text, f2 text); +create table hashjoin2(id int, f3 text, f4 text); + +insert into hashjoin1 select generate_series(1,100), 'a', 'a'; +insert into hashjoin2 select generate_series(1,100), 'a', 'a'; + +select f1, f3 from hashjoin1 as h1 inner join hashjoin2 as h2 +on (h1.f2 = h2.f4) +where (('a','a') in (select h1.f2, h2.f4 +from (hashjoin1 inner join hashjoin2 on hashjoin1.id = hashjoin2.id) order by 1, 2)) +group by h1.f1, h2.f3 +order by 1,2 limit 10; +drop table if exists hashjoin1, hashjoin2; + -- test binary drop table if exists t1; create table t1(a blob collate binary); @@ -203,6 +217,20 @@ select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1 collate "utf8mb4_general_ci"; select tab1.f1, tab3.f1 from test_join1 as tab1, test_join3 as tab3 where tab1.f1 = tab3.f1; --fail +create table hashjoin1(id int, f1 text, f2 text) collate 'utf8mb4_bin'; +create table hashjoin2(id int, f3 text, f4 text) collate 'utf8mb4_bin'; + +insert into hashjoin1 select generate_series(1,100), 'a', 'a'; +insert into hashjoin2 select generate_series(1,100), 'a', 'a'; + +select f1, f3 from hashjoin1 as h1 inner join hashjoin2 as h2 +on (h1.f2 = h2.f4) +where (('a','a') in (select h1.f2, h2.f4 +from (hashjoin1 inner join hashjoin2 on hashjoin1.id = hashjoin2.id) order by 1, 2)) +group by h1.f1, h2.f3 +order by 1,2 limit 10; +drop table if exists hashjoin1, hashjoin2; + -- test nestloop set enable_hashjoin=off; set enable_nestloop=on; diff --git a/src/test/regress/sql/test_ustore_index.sql b/src/test/regress/sql/test_ustore_index.sql index 9d707c530..909f9370e 100644 --- a/src/test/regress/sql/test_ustore_index.sql +++ b/src/test/regress/sql/test_ustore_index.sql @@ -258,4 +258,33 @@ set ustore_attr="index_trace_level=all;enable_log_tuple=on"; select /*+ indexscan(t) */ * from t where a = 1; set ustore_attr="index_trace_level=no;enable_log_tuple=off"; -drop table t; \ No newline at end of file +drop table t; + +-- test ustore querying with bitmapindexscan when updated in the same transaction +set enable_indexscan to off; +set enable_indexonlyscan to off; +set enable_seqscan to off; +set enable_bitmapscan to on; + +drop table if exists test; +create table test(a int); +create index test_idx on test(a); +insert into test values(2); +insert into test values(2); +insert into test values(1); +insert into test values(1); + +begin; +declare c1 cursor for select a from test where a = 2; +update test set a = 2; +fetch next from c1; +fetch next from c1; +fetch next from c1; +fetch next from c1; +rollback; + +drop table if exists test; +reset enable_indexscan; +reset enable_indexonlyscan; +reset enable_seqscan; +reset enable_bitmapscan; \ No newline at end of file