diff --git a/contrib/gauss_connector/gc_fdw.cpp b/contrib/gauss_connector/gc_fdw.cpp index a68e1dcb2..172e4b203 100644 --- a/contrib/gauss_connector/gc_fdw.cpp +++ b/contrib/gauss_connector/gc_fdw.cpp @@ -747,8 +747,8 @@ static void gcBeginForeignScan(ForeignScanState* node, int eflags) fsstate->resultSlot->tts_isnull[i] = true; } - fsstate->resultSlot->tts_isempty = false; - fsstate->scanSlot->tts_isempty = false; + fsstate->resultSlot->tts_flags &= ~TTS_FLAG_EMPTY; + fsstate->scanSlot->tts_flags &= ~TTS_FLAG_EMPTY; fsstate->attinmeta = TupleDescGetAttInMetadata(fsstate->tupdesc); @@ -919,7 +919,7 @@ static void postgresConstructResultSlotWithArray(ForeignScanState* node) } resultSlot->tts_nvalid = resultDesc->natts; - resultSlot->tts_isempty = false; + resultSlot->tts_flags &= ~TTS_FLAG_EMPTY; } static void postgresMapResultFromScanSlot(ForeignScanState* node) @@ -958,7 +958,7 @@ static TupleTableSlot* gcIterateNormalForeignScan(ForeignScanState* node) /* reset tupleslot on the begin */ (void)ExecClearTuple(fsstate->resultSlot); - fsstate->resultSlot->tts_isempty = false; + fsstate->resultSlot->tts_flags &= ~TTS_FLAG_EMPTY; TupleTableSlot* slot = node->ss.ss_ScanTupleSlot; diff --git a/src/common/backend/catalog/index.cpp b/src/common/backend/catalog/index.cpp index 7c0d38972..a616fe3f8 100644 --- a/src/common/backend/catalog/index.cpp +++ b/src/common/backend/catalog/index.cpp @@ -4210,7 +4210,7 @@ double IndexBuildUHeapScan(Relation heapRelation, Relation indexRelation, IndexI */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); - slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation), false, TAM_USTORE); + slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation), false, TableAmUstore); /* Arrange for econtext's scan tuple to be the tuple under test */ econtext->ecxt_scantuple = slot; @@ -6320,7 +6320,7 @@ void ScanHeapInsertCBI(Relation parentRel, Relation heapRel, Relation idxRel, Oi tupleDesc = heapRel->rd_att; estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); - slot = MakeSingleTupleTableSlot(RelationGetDescr(parentRel), false, parentRel->rd_tam_type); + slot = MakeSingleTupleTableSlot(RelationGetDescr(parentRel), false, GetTableAmRoutine(parentRel->rd_tam_type)); econtext->ecxt_scantuple = slot; /* Set up execution state for predicate, if any. */ predicate = (List*)ExecPrepareExpr((Expr*)idxInfo->ii_Predicate, estate); @@ -6548,7 +6548,7 @@ void ScanPartitionInsertIndex(Relation partTableRel, Relation partRel, const Lis if (PointerIsValid(indexRelList)) { estate = CreateExecutorState(); - slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_type); + slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, GetTableAmRoutine(partTableRel->rd_tam_type)); } scan = scan_handler_tbl_beginscan(partRel, SnapshotNow, 0, NULL); @@ -6768,7 +6768,7 @@ void ScanPartitionDeleteGPITuples(Relation partTableRel, Relation partRel, const if (PointerIsValid(indexRelList)) { estate = CreateExecutorState(); - slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_type); + slot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, GetTableAmRoutine(partTableRel->rd_tam_type)); } scan = scan_handler_tbl_beginscan(partRel, SnapshotNow, 0, NULL); diff --git a/src/gausskernel/optimizer/commands/analyze.cpp b/src/gausskernel/optimizer/commands/analyze.cpp index c2cc9b27b..6d2bdf3ee 100755 --- a/src/gausskernel/optimizer/commands/analyze.cpp +++ b/src/gausskernel/optimizer/commands/analyze.cpp @@ -2571,7 +2571,7 @@ retry: } /* TO DO: Need to switch this to inplaceheapam_scan_analyze_next_block after we have tableam. */ - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel), false, onerel->rd_tam_type); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel), false, GetTableAmRoutine(onerel->rd_tam_type)); maxoffset = UHeapPageGetMaxOffsetNumber(targpage); /* Inner loop over all tuples on the selected page */ diff --git a/src/gausskernel/optimizer/commands/cluster.cpp b/src/gausskernel/optimizer/commands/cluster.cpp index 5eaad2488..86ace69d0 100755 --- a/src/gausskernel/optimizer/commands/cluster.cpp +++ b/src/gausskernel/optimizer/commands/cluster.cpp @@ -1699,7 +1699,7 @@ double CopyUHeapDataInternal(Relation oldHeap, Relation oldIndex, Relation newHe values = (Datum *)palloc(natts * sizeof(Datum)); isnull = (bool *)palloc(natts * sizeof(bool)); - slot = MakeSingleTupleTableSlot(oldTupDesc, false, oldTupDesc->tdTableAmType); + slot = MakeSingleTupleTableSlot(oldTupDesc, false, GetTableAmRoutine(oldTupDesc->tdTableAmType)); /* Initialize the rewrite operation */ rwstate = begin_heap_rewrite(oldHeap, newHeap, oldestXmin, freezeXid, useWal); diff --git a/src/gausskernel/optimizer/commands/copy.cpp b/src/gausskernel/optimizer/commands/copy.cpp index c16140edd..aac612543 100644 --- a/src/gausskernel/optimizer/commands/copy.cpp +++ b/src/gausskernel/optimizer/commands/copy.cpp @@ -4037,7 +4037,7 @@ uint64 CopyFrom(CopyState cstate) estate->es_range_table = cstate->range_table; /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate, cstate->rel->rd_tam_type); + myslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(cstate->rel->rd_tam_type)); ExecSetSlotDescriptor(myslot, tupDesc); /* Triggers might need a slot as well */ estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); @@ -4738,7 +4738,7 @@ uint64 CopyFrom(CopyState cstate) * Global Partition Index stores the partition's tableOid with the index * tuple which is extracted from the tuple of the slot. Make sure it is set. */ - if (slot->tts_tupslotTableAm != TAM_USTORE) { + if (!TTS_TABLEAM_IS_USTORE(slot)) { ((HeapTuple)slot->tts_tuple)->t_tableOid = RelationGetRelid(targetRel); } else { ((UHeapTuple)slot->tts_tuple)->table_oid = RelationGetRelid(targetRel); @@ -5230,7 +5230,7 @@ void UHeapCopyFromInsertBatch(Relation rel, EState* estate, CommandId mycid, int * Global Partition Index stores the partition's tableOid with the index * tuple which is extracted from the tuple of the slot. Make sure it is set. */ - if (myslot->tts_tupslotTableAm != TAM_USTORE) { + if (!TTS_TABLEAM_IS_USTORE(myslot)) { ((HeapTuple)myslot->tts_tuple)->t_tableOid = RelationGetRelid(rel); } else { ((UHeapTuple)myslot->tts_tuple)->table_oid = RelationGetRelid(rel); @@ -10232,7 +10232,7 @@ static void BatchInsertCopyLog(LogInsertState copyLogInfo, int nBufferedTuples, return; } -static LogInsertState InitInsertCopyLogInfo(CopyState cstate, TableAmType tam) +static LogInsertState InitInsertCopyLogInfo(CopyState cstate, const TableAmRoutine* tam_ops) { LogInsertState copyLogInfo = NULL; ResultRelInfo *resultRelInfo = NULL; @@ -10255,7 +10255,7 @@ static LogInsertState InitInsertCopyLogInfo(CopyState cstate, TableAmType tam) estate->es_result_relation_info = resultRelInfo; copyLogInfo->estate = estate; - copyLogInfo->myslot = ExecInitExtraTupleSlot(estate, tam); + copyLogInfo->myslot = ExecInitExtraTupleSlot(estate, tam_ops); ExecSetSlotDescriptor(copyLogInfo->myslot, RelationGetDescr(copyLogInfo->rel)); copyLogInfo->bistate = GetBulkInsertState(); @@ -10297,7 +10297,7 @@ static void LogCopyErrorLogBulk(CopyState cstate) /* Reset the offset of the logger. Read from 0. */ cstate->logger->Reset(); - copyLogInfo = InitInsertCopyLogInfo(cstate, TAM_HEAP); + copyLogInfo = InitInsertCopyLogInfo(cstate, TableAmHeap); bufferedTuples = (HeapTuple *)palloc0(MAX_TUPLES * sizeof(HeapTuple)); for (;;) { @@ -10355,7 +10355,7 @@ static void LogCopyUErrorLogBulk(CopyState cstate) /* Reset the offset of the logger. Read from 0. */ cstate->logger->Reset(); - copyLogInfo = InitInsertCopyLogInfo(cstate, TAM_USTORE); + copyLogInfo = InitInsertCopyLogInfo(cstate, TableAmUstore); bufferedUTuples = (UHeapTuple *)palloc0(MAX_TUPLES * sizeof(UHeapTuple)); for (;;) { diff --git a/src/gausskernel/optimizer/commands/matview.cpp b/src/gausskernel/optimizer/commands/matview.cpp index 551138cf7..4c8cfa249 100755 --- a/src/gausskernel/optimizer/commands/matview.cpp +++ b/src/gausskernel/optimizer/commands/matview.cpp @@ -395,7 +395,7 @@ static void ExecHandleMatData(TupleTableSlot *slot, Relation matview, Oid mapid, HeapTuple tuple; Oid matid = RelationGetRelid(matview); - if (slot == NULL || slot->tts_isempty) { + if (slot == NULL || TTS_EMPTY(slot)) { return; } @@ -455,7 +455,7 @@ static void ExecHandleIncData(TupleTableSlot *slot, Relation matview, Oid mapid, HeapTuple tuple; Oid mvid = RelationGetRelid(matview); - if (slot == NULL || slot->tts_isempty) { + if (slot == NULL || TTS_EMPTY(slot)) { return; } diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index 33788f63e..ff57ec77c 100644 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -9096,8 +9096,8 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat * tuples are the same, the tupDescs might not be (consider ADD COLUMN * without a default). */ - oldslot = MakeSingleTupleTableSlot(oldTupDesc, false, oldrel->rd_tam_type); - newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_type); + oldslot = MakeSingleTupleTableSlot(oldTupDesc, false, GetTableAmRoutine(oldrel->rd_tam_type)); + newslot = MakeSingleTupleTableSlot(newTupDesc, false, GetTableAmRoutine(oldrel->rd_tam_type)); /* Preallocate values/isnull arrays */ i = Max(newTupDesc->natts, oldTupDesc->natts); @@ -9258,7 +9258,7 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat * will not try to clear it after we reset the context. Note that we don't explicitly pfree its * tuple since the per-tuple memory context will be reset shortly. */ - oldslot->tts_shouldFree = false; + oldslot->tts_flags &= ~TTS_FLAG_SHOULDFREE; UHeapTuple backUpTup = BackUpScanCuTup(((UHeapScanDesc) scan)->rs_cutup); ResetExprContext(econtext); @@ -12879,7 +12879,7 @@ static void validateCheckConstraint(Relation rel, HeapTuple constrtup) List* exprstate = (List*)ExecPrepareExpr((Expr*)make_ands_implicit(origexpr), estate); ExprContext* econtext = GetPerTupleExprContext(estate); TupleDesc tupdesc = RelationGetDescr(rel); - TupleTableSlot* slot = MakeSingleTupleTableSlot(tupdesc, false, rel->rd_tam_type); + TupleTableSlot* slot = MakeSingleTupleTableSlot(tupdesc, false, GetTableAmRoutine(rel->rd_tam_type)); econtext->ecxt_scantuple = slot; diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index 58b998fb4..ce9683846 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -3024,7 +3024,7 @@ HeapTuple GetTupleForTrigger(EState* estate, EPQState* epqstate, ResultRelInfo* } if (RelationIsUstoreFormat(relation)) { - TupleTableSlot *slot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE); + TupleTableSlot *slot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore); UHeapTuple utuple; UHeapTupleData uheaptupdata; diff --git a/src/gausskernel/optimizer/util/dataskew.cpp b/src/gausskernel/optimizer/util/dataskew.cpp index 17e9a0324..36ae2ab9f 100755 --- a/src/gausskernel/optimizer/util/dataskew.cpp +++ b/src/gausskernel/optimizer/util/dataskew.cpp @@ -48,6 +48,7 @@ #include "utils/syscache.h" #include "utils/typcache.h" #include "vecexecutor/vecexecutor.h" +#include "access/tableam.h" /* We only create optimized path only when the skew ratio is large than the limit. */ #define SKEW_RATIO_LIMIT 3.0 @@ -1398,7 +1399,7 @@ bool SkewInfo::canValuePassQual(List* varList, List* valueList, Expr* expr) rte = planner_rt_fetch(rel->relid, m_root); heaprel = heap_open(rte->relid, NoLock); tupdesc = RelationGetDescr(heaprel); - slot = MakeSingleTupleTableSlot(tupdesc, false, heaprel->rd_tam_type); + slot = MakeSingleTupleTableSlot(tupdesc, false, GetTableAmRoutine(heaprel->rd_tam_type)); slot->tts_nvalid = tupdesc->natts; heap_close(heaprel, NoLock); diff --git a/src/gausskernel/runtime/executor/execJunk.cpp b/src/gausskernel/runtime/executor/execJunk.cpp index 796e6d1ec..d250f3566 100644 --- a/src/gausskernel/runtime/executor/execJunk.cpp +++ b/src/gausskernel/runtime/executor/execJunk.cpp @@ -208,7 +208,7 @@ void ExecInitJunkAttr(EState* estate, CmdType operation, List* targetlist, Resul j = ExecInitJunkFilter(targetlist, result_rel_info->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate, result_rel_info->ri_RelationDesc->rd_tam_type)); + ExecInitExtraTupleSlot(estate, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type))); if (operation == CMD_UPDATE || operation == CMD_DELETE || operation == CMD_MERGE) { /* For UPDATE/DELETE, find the appropriate junk attr now */ diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index 797be87ab..255e4299a 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -3315,7 +3315,7 @@ TupleTableSlot *EvalPlanQualUSlot(EPQState *epqstate, Relation relation, Index r if (*slot == NULL) { MemoryContext oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt); - *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable, TAM_USTORE); + *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable, TableAmUstore); if (relation) ExecSetSlotDescriptor(*slot, RelationGetDescr(relation)); else @@ -3324,7 +3324,7 @@ TupleTableSlot *EvalPlanQualUSlot(EPQState *epqstate, Relation relation, Index r MemoryContextSwitchTo(oldcontext); } - (*slot)->tts_tupslotTableAm = TAM_USTORE; + (*slot)->tts_tam_ops = TableAmUstore; return *slot; } diff --git a/src/gausskernel/runtime/executor/execTuples.cpp b/src/gausskernel/runtime/executor/execTuples.cpp index 12a7fff01..752d47b29 100644 --- a/src/gausskernel/runtime/executor/execTuples.cpp +++ b/src/gausskernel/runtime/executor/execTuples.cpp @@ -112,18 +112,15 @@ static TupleDesc ExecTypeFromTLInternal(List* target_list, bool has_oid, bool sk * Basic routine to make an empty TupleTableSlot. * -------------------------------- */ -TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTableAm) +TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, const TableAmRoutine* tam_ops) { TupleTableSlot* slot = makeNode(TupleTableSlot); - Assert(tupslotTableAm == TAM_HEAP || tupslotTableAm == TAM_USTORE); + Assert(tam_ops == TableAmHeap || tam_ops == TableAmUstore); - slot->tts_isempty = true; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; + slot->tts_flags |= TTS_FLAG_EMPTY; slot->tts_tuple = NULL; slot->tts_tupleDescriptor = NULL; #ifdef PGXC - slot->tts_shouldFreeRow = false; slot->tts_dataRow = NULL; slot->tts_dataLen = -1; slot->tts_attinmeta = NULL; @@ -141,7 +138,7 @@ TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTable ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE) : NULL; #endif - slot->tts_tupslotTableAm = tupslotTableAm; + slot->tts_tam_ops = tam_ops; return slot; } @@ -152,7 +149,7 @@ TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTable * Create a tuple table slot within a tuple table (which is just a List). * -------------------------------- */ -TupleTableSlot* ExecAllocTableSlot(List** tuple_table, TableAmType tupslotTableAm) +TupleTableSlot* ExecAllocTableSlot(List** tuple_table, const TableAmRoutine* tam_ops) { TupleTableSlot* slot; @@ -160,7 +157,7 @@ TupleTableSlot* ExecAllocTableSlot(List** tuple_table, TableAmType tupslotTableA *tuple_table = lappend(*tuple_table, slot); - slot->tts_tupslotTableAm = tupslotTableAm; + slot->tts_tam_ops = tam_ops; return slot; } @@ -208,7 +205,7 @@ void ExecResetTupleTable(List* tuple_table, /* tuple table */ } } -TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, TableAmType tableAm) +TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, const TableAmRoutine* tam_ops) { if (unlikely(RELATION_CREATE_BUCKET(tableScan->rs_rd))) { tableScan = ((HBktTblScanDesc)tableScan)->currBktScan; @@ -216,7 +213,7 @@ TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTab if (tuple != NULL) { Assert(tableScan != NULL); - slot->tts_tupslotTableAm = tableAm; + slot->tts_tam_ops = tam_ops; return ExecStoreTuple(tuple, /* tuple to store */ slot, /* slot to store in */ tableScan->rs_cbuf, /* buffer associated with this tuple */ @@ -235,9 +232,9 @@ TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTab * to use the given tuple descriptor. * -------------------------------- */ -TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tup_desc, bool allocSlotCxt, TableAmType tupslotTableAm) +TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tup_desc, bool allocSlotCxt, const TableAmRoutine* tam_ops) { - TupleTableSlot* slot = MakeTupleTableSlot(allocSlotCxt, tupslotTableAm); + TupleTableSlot* slot = MakeTupleTableSlot(allocSlotCxt, tam_ops); ExecSetSlotDescriptor(slot, tup_desc); return slot; } @@ -377,9 +374,9 @@ TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, Assert(slot->tts_tupleDescriptor != NULL); HeapTuple htup = (HeapTuple)tuple; - if (slot->tts_tupslotTableAm == TAM_USTORE && htup->tupTableType == HEAP_TUPLE) { + if (TTS_TABLEAM_IS_USTORE(slot) && htup->tupTableType == HEAP_TUPLE) { tuple = (Tuple)HeapToUHeap(slot->tts_tupleDescriptor, (HeapTuple)tuple); - } else if (slot->tts_tupslotTableAm == TAM_HEAP && htup->tupTableType == UHEAP_TUPLE) { + } else if (TTS_TABLEAM_IS_HEAP(slot) && htup->tupTableType == UHEAP_TUPLE) { tuple = (Tuple)UHeapToHeap(slot->tts_tupleDescriptor, (UHeapTuple)tuple); } @@ -431,7 +428,7 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot /* * clear the physical tuple or minimal tuple if present via TableAm. */ - if (slot->tts_shouldFree || slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREE(slot) || TTS_SHOULDFREEMIN(slot)) { Assert(slot->tts_tupleDescriptor != NULL); tableam_tslot_clear(slot); } @@ -441,14 +438,14 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot */ slot->tts_tuple = NULL; slot->tts_mintuple = NULL; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; #ifdef ENABLE_MULTIPLE_NODES - if (slot->tts_shouldFreeRow) { + if (TTS_SHOULDFREE_ROW(slot)) { pfree_ext(slot->tts_dataRow); } - slot->tts_shouldFreeRow = false; + slot->tts_flags = false; slot->tts_dataRow = NULL; slot->tts_dataLen = -1; slot->tts_xcnodeoid = 0; @@ -465,7 +462,7 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot /* * Mark it empty. */ - slot->tts_isempty = true; + slot->tts_flags |= TTS_FLAG_EMPTY; slot->tts_nvalid = 0; // Row uncompression use slot->tts_per_tuple_mcxt in some case, So we need @@ -503,9 +500,9 @@ TupleTableSlot* ExecStoreVirtualTuple(TupleTableSlot* slot) */ Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_isempty); + Assert(TTS_EMPTY(slot)); - slot->tts_isempty = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_nvalid = slot->tts_tupleDescriptor->natts; return slot; @@ -565,7 +562,7 @@ HeapTuple ExecCopySlotTuple(TupleTableSlot* slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); return tableam_tslot_copy_heap_tuple(slot); } @@ -583,7 +580,7 @@ MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot* slot, bool need_transform_ * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); return tableam_tslot_copy_minimal_tuple(slot); } @@ -609,7 +606,7 @@ HeapTuple ExecFetchSlotTuple(TupleTableSlot* slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); return tableam_tslot_get_heap_tuple(slot); } @@ -682,7 +679,7 @@ HeapTuple ExecMaterializeSlot(TupleTableSlot* slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); return tableam_tslot_materialize(slot); } @@ -730,27 +727,27 @@ TupleTableSlot* ExecCopySlot(TupleTableSlot* dst_slot, TupleTableSlot* src_slot) * ExecInitResultTupleSlot * ---------------- */ -void ExecInitResultTupleSlot(EState* estate, PlanState* plan_state, TableAmType tam) +void ExecInitResultTupleSlot(EState* estate, PlanState* plan_state, const TableAmRoutine* tam_ops) { - plan_state->ps_ResultTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam); + plan_state->ps_ResultTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam_ops); } /* ---------------- * ExecInitScanTupleSlot * ---------------- */ -void ExecInitScanTupleSlot(EState* estate, ScanState* scan_state, TableAmType tam) +void ExecInitScanTupleSlot(EState* estate, ScanState* scan_state, const TableAmRoutine* tam_ops) { - scan_state->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam); + scan_state->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, tam_ops); } /* ---------------- * ExecInitExtraTupleSlot * ---------------- */ -TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, TableAmType tam) +TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, const TableAmRoutine* tam_ops) { - return ExecAllocTableSlot(&estate->es_tupleTable, tam); + return ExecAllocTableSlot(&estate->es_tupleTable, tam_ops); } /* ---------------- @@ -1104,12 +1101,12 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) { + if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) { heap_freetuple((HeapTuple)slot->tts_tuple); slot->tts_tuple = NULL; - slot->tts_shouldFree = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); } /* @@ -1118,9 +1115,9 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu * to reset shouldFreeRow, since it will be overwritten just below. */ if (msg == slot->tts_dataRow) { - slot->tts_shouldFreeRow = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; } - if (slot->tts_shouldFreeRow) { + if (TTS_SHOULDFREE_ROW(slot)) { pfree_ext(slot->tts_dataRow); } ResetSlotPerTupleContext(slot); @@ -1136,10 +1133,13 @@ TupleTableSlot* ExecStoreDataRowTuple(char* msg, size_t len, Oid msgnode_oid, Tu /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; - slot->tts_shouldFreeRow = should_free; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + if(should_free) + slot->tts_flags |= TTS_FLAG_SHOULDFREE_ROW; + else + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; slot->tts_tuple = NULL; slot->tts_mintuple = NULL; slot->tts_dataRow = msg; diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index eb97dca0d..2dec62fa4 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -2138,7 +2138,7 @@ bool check_violation(Relation heap, Relation index, IndexInfo *indexInfo, ItemPo * to this slot. Be sure to save and restore caller's value for * scantuple. */ - existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap), false, heap->rd_tam_type); + existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap), false, GetTableAmRoutine(heap->rd_tam_type)); econtext = GetPerTupleExprContext(estate); save_scantuple = econtext->ecxt_scantuple; econtext->ecxt_scantuple = existing_slot; diff --git a/src/gausskernel/runtime/executor/nodeAgg.cpp b/src/gausskernel/runtime/executor/nodeAgg.cpp index 9632afa27..4b7772e2b 100644 --- a/src/gausskernel/runtime/executor/nodeAgg.cpp +++ b/src/gausskernel/runtime/executor/nodeAgg.cpp @@ -1015,7 +1015,7 @@ static void prepare_projection_slot(AggState* aggstate, TupleTableSlot* slot, in aggstate->grouped_cols = grouped_cols; - if (slot->tts_isempty) { + if (TTS_EMPTY(slot)) { /* * Force all values to be NULL if working on an empty input tuple * (i.e. an empty grouping set for which no input rows were diff --git a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp index c911bb683..9d6aa6b78 100644 --- a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp +++ b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp @@ -807,8 +807,8 @@ BitmapHeapScanState* ExecInitBitmapHeapScan(BitmapHeapScan* node, EState* estate /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps, currentRelation->rd_tam_type); - ExecInitScanTupleSlot(estate, &scanstate->ss, currentRelation->rd_tam_type); + ExecInitResultTupleSlot(estate, &scanstate->ss.ps, GetTableAmRoutine(currentRelation->rd_tam_type)); + ExecInitScanTupleSlot(estate, &scanstate->ss, GetTableAmRoutine(currentRelation->rd_tam_type)); InitBitmapHeapScanNextMtd(scanstate); diff --git a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp index 5dd07ffed..222bdfbc1 100644 --- a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp @@ -139,7 +139,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) slot = node->ss.ss_ScanTupleSlot; isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation); tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation), - false, scandesc->heapRelation->rd_tam_type); + false, GetTableAmRoutine(scandesc->heapRelation->rd_tam_type)); /* * OK, now that we have what we need, fetch the next tuple. @@ -589,8 +589,8 @@ IndexOnlyScanState* ExecInitIndexOnlyScan(IndexOnlyScan* node, EState* estate, i /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &indexstate->ss.ps, currentRelation->rd_tam_type); - ExecInitScanTupleSlot(estate, &indexstate->ss, currentRelation->rd_tam_type); + ExecInitResultTupleSlot(estate, &indexstate->ss.ps, GetTableAmRoutine(currentRelation->rd_tam_type)); + ExecInitScanTupleSlot(estate, &indexstate->ss, GetTableAmRoutine(currentRelation->rd_tam_type)); /* * Build the scan tuple type using the indextlist generated by the diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp index 37df68c3d..f4af00871 100644 --- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp @@ -693,8 +693,8 @@ IndexScanState* ExecInitIndexScan(IndexScan* node, EState* estate, int eflags) /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &index_state->ss.ps, current_relation->rd_tam_type); - ExecInitScanTupleSlot(estate, &index_state->ss, current_relation->rd_tam_type); + ExecInitResultTupleSlot(estate, &index_state->ss.ps, GetTableAmRoutine(current_relation->rd_tam_type)); + ExecInitScanTupleSlot(estate, &index_state->ss, GetTableAmRoutine(current_relation->rd_tam_type)); /* * get the scan type from the relation descriptor. diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index 5fdc5ba59..902d19e43 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -1220,7 +1220,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple } else #endif if (useHeapMultiInsert) { - TupleTableSlot* tmp_slot = MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, false, result_relation_desc->rd_tam_type); + TupleTableSlot* tmp_slot = MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, false, GetTableAmRoutine(result_relation_desc->rd_tam_type)); bool is_partition_rel = result_relation_desc->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION; Oid targetOid = InvalidOid; @@ -1694,7 +1694,7 @@ TupleTableSlot* ExecDelete(ItemPointer tupleid, Oid deletePartitionOid, int2 buc return NULL; } - if (slot->tts_isempty) { + if (TTS_EMPTY(slot)) { (void)ExecStoreAllNullTuple(slot); } } else { @@ -1939,9 +1939,9 @@ end:; if (slot->tts_tupleDescriptor != RelationGetDescr(result_relation_desc)) { ExecSetSlotDescriptor(slot, RelationGetDescr(result_relation_desc)); } - slot->tts_tupslotTableAm = result_relation_desc->rd_tam_type; + slot->tts_tam_ops = GetTableAmRoutine(result_relation_desc->rd_tam_type); if (oldtuple != NULL) { - Assert(slot->tts_tupslotTableAm != TAM_USTORE); + Assert(!TTS_TABLEAM_IS_USTORE(slot)); del_tuple.t_data = oldtuple; del_tuple.t_len = HeapTupleHeaderGetDatumLength(oldtuple); ItemPointerSetInvalid(&(del_tuple.t_self)); @@ -4112,7 +4112,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl /* initialize slot for the existing tuple */ upsertState->us_existing = - ExecInitExtraTupleSlot(mt_state->ps.state, result_rel_info->ri_RelationDesc->rd_tam_type); + ExecInitExtraTupleSlot(mt_state->ps.state, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type)); ExecSetSlotDescriptor(upsertState->us_existing, result_rel_info->ri_RelationDesc->rd_att); upsertState->us_excludedtlist = node->exclRelTlist; @@ -4120,7 +4120,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl /* create target slot for UPDATE SET projection */ tupDesc = ExecTypeFromTL((List*)node->updateTlist, result_rel_info->ri_RelationDesc->rd_rel->relhasoids); upsertState->us_updateproj = - ExecInitExtraTupleSlot(mt_state->ps.state, result_rel_info->ri_RelationDesc->rd_tam_type); + ExecInitExtraTupleSlot(mt_state->ps.state, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type)); ExecSetSlotDescriptor(upsertState->us_updateproj, tupDesc); /* build UPDATE SET expression and projection state */ @@ -4274,7 +4274,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl */ if (estate->es_trig_tuple_slot == NULL) { result_rel_info = mt_state->resultRelInfo; - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, result_rel_info->ri_RelationDesc->rd_tam_type); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(result_rel_info->ri_RelationDesc->rd_tam_type)); } /* diff --git a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp index 3a7e66d10..c894b288b 100644 --- a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp +++ b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp @@ -2061,13 +2061,13 @@ static void ExecInitRecursiveResultTupleSlot(EState* estate, PlanState* planstat { TupleTableSlot* slot = makeNode(TupleTableSlot); - slot->tts_isempty = true; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; + slot->tts_flags |= TTS_FLAG_EMPTY; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; slot->tts_tuple = NULL; slot->tts_tupleDescriptor = NULL; #ifdef PGXC - slot->tts_shouldFreeRow = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; slot->tts_dataRow = NULL; slot->tts_dataLen = -1; slot->tts_attinmeta = NULL; diff --git a/src/gausskernel/runtime/executor/nodeSamplescan.cpp b/src/gausskernel/runtime/executor/nodeSamplescan.cpp index f696f017e..630df3815 100644 --- a/src/gausskernel/runtime/executor/nodeSamplescan.cpp +++ b/src/gausskernel/runtime/executor/nodeSamplescan.cpp @@ -118,7 +118,10 @@ TupleTableSlot* HeapSeqSampleNext(SeqScanState* node) TupleTableSlot* slot = node->ss_ScanTupleSlot; node->ss_ScanTupleSlot->tts_tupleDescriptor->tdTableAmType = node->ss_currentRelation->rd_tam_type; HeapTuple tuple = SampleFetchNextTuple(node); - return ExecMakeTupleSlot(tuple, GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), slot, node->ss_currentRelation->rd_tam_type); + return ExecMakeTupleSlot(tuple, + GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), + slot, + GetTableAmRoutine(node->ss_currentRelation->rd_tam_type)); } TupleTableSlot* UHeapSeqSampleNext(SeqScanState* node) @@ -175,7 +178,7 @@ TupleTableSlot* HbktSeqSampleNext(SeqScanState* node) return ExecMakeTupleSlot( (Tuple) tuple, GetTableScanDesc(node->ss_currentScanDesc, node->ss_currentRelation), slot, - node->ss_currentRelation->rd_tam_type); + GetTableAmRoutine(node->ss_currentRelation->rd_tam_type)); } /* diff --git a/src/gausskernel/runtime/executor/nodeSeqscan.cpp b/src/gausskernel/runtime/executor/nodeSeqscan.cpp index 1844feb39..6cbe203db 100644 --- a/src/gausskernel/runtime/executor/nodeSeqscan.cpp +++ b/src/gausskernel/runtime/executor/nodeSeqscan.cpp @@ -210,7 +210,7 @@ void seq_scan_getnext_template(TableScanDesc scan, TupleTableSlot* slot, ScanDi if (tuple != NULL) { Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - slot->tts_tupslotTableAm = type; + slot->tts_tam_ops = GetTableAmRoutine(type); if (type == TAM_USTORE) { UHeapSlotStoreUHeapTuple((UHeapTuple)tuple, slot, false, false); } else { @@ -347,7 +347,7 @@ static ScanBatchResult *SeqNextBatchMode(SeqScanState *node) scanDesc->rs_maxScanRows = node->scanBatchState->scanTupleSlotMaxNum; node->scanBatchState->scanfinished = tableam_scan_gettuplebatchmode(scanDesc, direction); - if (slot[0]->tts_tupslotTableAm == TAM_USTORE) { + if (TTS_TABLEAM_IS_USTORE(slot[0])) { ExecStoreTupleBatchMode(scanDesc, slot); } else { ExecStoreTupleBatchMode(scanDesc, slot); @@ -554,8 +554,8 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags) /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &node->ps, current_relation->rd_tam_type); - ExecInitScanTupleSlot(estate, node, current_relation->rd_tam_type); + ExecInitResultTupleSlot(estate, &node->ps, GetTableAmRoutine(current_relation->rd_tam_type)); + ExecInitScanTupleSlot(estate, node, GetTableAmRoutine(current_relation->rd_tam_type)); if (((Scan*)node->ps.plan)->tablesample && node->sampleScanInfo.tsm_state == NULL) { if (isUstoreRel) { @@ -755,7 +755,7 @@ static SeqScanState *ExecInitSeqScanBatchMode(SeqScan *node, SeqScanState* scans (TupleTableSlot**)palloc(sizeof(TupleTableSlot*) * BatchMaxSize); for (i = 0; i < BatchMaxSize; i++) { TupleTableSlot* slot = ExecAllocTableSlot(&estate->es_tupleTable, - scanstate->ss_currentRelation->rd_tam_type); + GetTableAmRoutine(scanstate->ss_currentRelation->rd_tam_type)); ExecSetSlotDescriptor(slot, scanstate->ss_ScanTupleSlot->tts_tupleDescriptor); scanBatchState->scanBatch.scanTupleSlotInBatch[i] = slot; } diff --git a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp index c506535c0..12cf91ee3 100644 --- a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp +++ b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp @@ -338,7 +338,7 @@ StartWithOpState* ExecInitStartWithOp(StartWithOp* node, EState* estate, int efl false, false, u_sess->attr.attr_memory.work_mem); /* create the working TupleTableslot */ - state->sw_workingSlot = ExecAllocTableSlot(&estate->es_tupleTable, TAM_HEAP); + state->sw_workingSlot = ExecAllocTableSlot(&estate->es_tupleTable, TableAmHeap); ExecSetSlotDescriptor(state->sw_workingSlot, ExecTypeFromTL(targetlist, false)); int natts = list_length(node->plan.targetlist); diff --git a/src/gausskernel/runtime/executor/nodeTidscan.cpp b/src/gausskernel/runtime/executor/nodeTidscan.cpp index 5444b6d39..fb6d4594f 100644 --- a/src/gausskernel/runtime/executor/nodeTidscan.cpp +++ b/src/gausskernel/runtime/executor/nodeTidscan.cpp @@ -625,8 +625,8 @@ TidScanState* ExecInitTidScan(TidScan* node, EState* estate, int eflags) /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &tidstate->ss.ps, current_relation->rd_tam_type); - ExecInitScanTupleSlot(estate, &tidstate->ss, current_relation->rd_tam_type); + ExecInitResultTupleSlot(estate, &tidstate->ss.ps, GetTableAmRoutine(current_relation->rd_tam_type)); + ExecInitScanTupleSlot(estate, &tidstate->ss, GetTableAmRoutine(current_relation->rd_tam_type)); /* deal with partitioned table branch */ if (node->scan.isPartTbl) { diff --git a/src/gausskernel/runtime/opfusion/opfusion_delete.cpp b/src/gausskernel/runtime/opfusion/opfusion_delete.cpp index 9b81450a1..df27a42fd 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_delete.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_delete.cpp @@ -52,7 +52,7 @@ void DeleteFusion::InitLocals(ParamListInfo params) m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc); if (m_global->m_table_type == TAM_USTORE) { - m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE; + m_local.m_reslot->tts_tam_ops = TableAmUstore; } m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum)); m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool)); diff --git a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp index 144989bac..4dd85a812 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp @@ -155,7 +155,7 @@ void IndexOnlyScanFusion::Init(long max_rows) *m_direction = NoMovementScanDirection; } - m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, m_tupDesc->tdTableAmType); + m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, GetTableAmRoutine(m_tupDesc->tdTableAmType)); ScanState* scanstate = makeNode(ScanState); // need release scanstate->ps.plan = (Plan *)m_node; @@ -223,7 +223,7 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() bool bucket_changed = false; TupleTableSlot* tmpreslot = NULL; tmpreslot = MakeSingleTupleTableSlot(RelationGetDescr(m_scandesc->heapRelation), - false, m_scandesc->heapRelation->rd_tam_type); + false, GetTableAmRoutine(m_scandesc->heapRelation->rd_tam_type)); while ((tid = scan_handler_idx_getnext_tid(m_scandesc, *m_direction, &bucket_changed)) != NULL) { HeapTuple tuple = NULL; diff --git a/src/gausskernel/runtime/opfusion/opfusion_indexscan.cpp b/src/gausskernel/runtime/opfusion/opfusion_indexscan.cpp index 79510cd01..7a47a7e0f 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_indexscan.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_indexscan.cpp @@ -168,7 +168,7 @@ void IndexScanFusion::Init(long max_rows) } m_epq_indexqual = m_node->indexqualorig; - m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, m_rel->rd_tam_type); + m_reslot = MakeSingleTupleTableSlot(m_tupDesc, false, GetTableAmRoutine(m_rel->rd_tam_type)); } HeapTuple IndexScanFusion::getTuple() diff --git a/src/gausskernel/runtime/opfusion/opfusion_insert.cpp b/src/gausskernel/runtime/opfusion/opfusion_insert.cpp index abb385533..9bdabaaa1 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_insert.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_insert.cpp @@ -111,7 +111,7 @@ void InsertFusion::InitLocals(ParamListInfo params) m_c_local.m_estate->es_plannedstmt = m_global->m_planstmt; m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc); if (m_global->m_table_type == TAM_USTORE) { - m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE; + m_local.m_reslot->tts_tam_ops = TableAmUstore; } m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum)); m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool)); diff --git a/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp b/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp index c7b9d861a..ee553f5b2 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp @@ -47,7 +47,7 @@ void SelectForUpdateFusion::InitLocals(ParamListInfo params) { m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc); if (m_global->m_table_type == TAM_USTORE) { - m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE; + m_local.m_reslot->tts_tam_ops = TableAmUstore; } m_c_local.m_estate = CreateExecutorState(); m_c_local.m_estate->es_range_table = m_global->m_planstmt->rtable; diff --git a/src/gausskernel/runtime/opfusion/opfusion_sort.cpp b/src/gausskernel/runtime/opfusion/opfusion_sort.cpp index 77fc7bf74..c1c1e0c2f 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_sort.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_sort.cpp @@ -54,7 +54,7 @@ void SortFusion::InitLocals(ParamListInfo params) if (!IsGlobal()) m_global->m_tupDesc->tdTableAmType = m_local.m_scan->m_tupDesc->tdTableAmType; - m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc, false, m_local.m_scan->m_tupDesc->tdTableAmType); + m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc, false, GetTableAmRoutine(m_local.m_scan->m_tupDesc->tdTableAmType)); m_local.m_values = (Datum*)palloc0(m_global->m_tupDesc->natts * sizeof(Datum)); m_local.m_isnull = (bool*)palloc0(m_global->m_tupDesc->natts * sizeof(bool)); } diff --git a/src/gausskernel/runtime/opfusion/opfusion_update.cpp b/src/gausskernel/runtime/opfusion/opfusion_update.cpp index c6c76abf1..320df81ef 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_update.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_update.cpp @@ -209,7 +209,7 @@ void UpdateFusion::InitLocals(ParamListInfo params) m_local.m_reslot = MakeSingleTupleTableSlot(m_global->m_tupDesc); if (m_global->m_table_type == TAM_USTORE) { - m_local.m_reslot->tts_tupslotTableAm = TAM_USTORE; + m_local.m_reslot->tts_tam_ops = TableAmUstore; } m_local.m_values = (Datum*)palloc0(m_global->m_natts * sizeof(Datum)); m_local.m_isnull = (bool*)palloc0(m_global->m_natts * sizeof(bool)); diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vechashtable.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vechashtable.cpp index 375571fee..729ea8635 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vechashtable.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vechashtable.cpp @@ -659,7 +659,7 @@ hashFileSource::hashFileSource(VectorBatch* batch, MemoryContext context, int ce m_tupleSize = 100; m_tuple = (MinimalTuple)palloc(m_tupleSize); m_tuple->t_len = m_tupleSize; - m_hashTupleSlot = MakeTupleTableSlot(true, tuple_desc->tdTableAmType); + m_hashTupleSlot = MakeTupleTableSlot(true, GetTableAmRoutine(tuple_desc->tdTableAmType)); ExecSetSlotDescriptor(m_hashTupleSlot, tuple_desc); } @@ -675,7 +675,7 @@ hashFileSource::hashFileSource(TupleTableSlot* hash_slot, int file_num) m_context = NULL; if (m_hashTupleSlot->tts_tupleDescriptor == NULL) { ExecSetSlotDescriptor(m_hashTupleSlot, hash_slot->tts_tupleDescriptor); - m_hashTupleSlot->tts_tupslotTableAm = hash_slot->tts_tupleDescriptor->tdTableAmType; + m_hashTupleSlot->tts_tam_ops = GetTableAmRoutine(hash_slot->tts_tupleDescriptor->tdTableAmType); } m_cols = 0; diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp index 96d43c574..4e077e81e 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp @@ -267,7 +267,7 @@ VecToRowState* ExecInitVecToRow(VecToRow* node, EState* estate, int eflags) state->tts = state->ps.ps_ResultTupleSlot; (void)ExecClearTuple(state->tts); state->tts->tts_nvalid = state->nattrs; - state->tts->tts_isempty = false; + state->tts->tts_flags &= ~TTS_FLAG_EMPTY; state->devectorizeFunRuntime = (DevectorizeFun*)palloc0(state->nattrs * sizeof(DevectorizeFun)); for (int i = 0; i < state->nattrs; i++) { state->tts->tts_isnull[i] = false; diff --git a/src/gausskernel/storage/access/common/heaptuple.cpp b/src/gausskernel/storage/access/common/heaptuple.cpp index c49a26d9c..27a0a5682 100644 --- a/src/gausskernel/storage/access/common/heaptuple.cpp +++ b/src/gausskernel/storage/access/common/heaptuple.cpp @@ -1157,7 +1157,7 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts) } else { /* Restore state from previous execution */ off = slot->tts_off; - slow = slot->tts_slow; + slow = TTS_SLOW(slot); } /* @@ -1196,7 +1196,10 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts) */ slot->tts_nvalid = attnum; slot->tts_off = off; - slot->tts_slow = slow; + if (slow) + slot->tts_flags |= TTS_FLAG_SLOW; + else + slot->tts_flags &= ~TTS_FLAG_SLOW; } @@ -1440,7 +1443,7 @@ Datum heap_slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, bool nee /* sanity checks */ Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); HeapTuple tuple = (HeapTuple)slot->tts_tuple; TupleDesc tupleDesc = slot->tts_tupleDescriptor; @@ -1560,7 +1563,7 @@ Datum heap_slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull, bool nee */ void heap_slot_getallattrs(TupleTableSlot *slot, bool need_transform_anyarray) { - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); int tdesc_natts = slot->tts_tupleDescriptor->natts; int attnum; @@ -1671,7 +1674,7 @@ void heap_slot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, */ void heap_slot_getsomeattrs(TupleTableSlot *slot, int attnum) { - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* Quick out if we have 'em all already */ if (slot->tts_nvalid >= attnum) { @@ -1715,7 +1718,7 @@ bool heap_slot_attisnull(TupleTableSlot *slot, int attnum) HeapTuple tuple = (HeapTuple)slot->tts_tuple; TupleDesc tupleDesc = slot->tts_tupleDescriptor; - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* * system attributes are handled by heap_attisnull @@ -2984,7 +2987,7 @@ static void slot_deform_cmprs_tuple(TupleTableSlot *slot, uint32 natts) slot->tts_nvalid = attnum; slot->tts_off = off; slot->tts_meta_off = cmprsOff; - slot->tts_slow = true; + slot->tts_flags |= TTS_FLAG_SLOW; } /* @@ -2996,20 +2999,20 @@ void heap_slot_clear(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree) { + if (TTS_SHOULDFREE(slot)) { heap_freetuple((HeapTuple)slot->tts_tuple); slot->tts_tuple = NULL; - slot->tts_shouldFree = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; } } @@ -3025,14 +3028,14 @@ void heap_slot_materialize(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); Assert(slot->tts_tupleDescriptor != NULL); /* * If we have a regular physical tuple, and it's locally palloc'd, we have * nothing to do. */ - if (slot->tts_tuple && slot->tts_shouldFree && !HEAP_TUPLE_IS_COMPRESSED(((HeapTuple)slot->tts_tuple)->t_data)) + if (slot->tts_tuple && TTS_SHOULDFREE(slot) && !HEAP_TUPLE_IS_COMPRESSED(((HeapTuple)slot->tts_tuple)->t_data)) return ; /* @@ -3044,7 +3047,7 @@ void heap_slot_materialize(TupleTableSlot *slot) */ MemoryContext old_context = MemoryContextSwitchTo(slot->tts_mcxt); slot->tts_tuple = heap_slot_copy_heap_tuple(slot); - slot->tts_shouldFree = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREE; MemoryContextSwitchTo(old_context); /* @@ -3071,11 +3074,11 @@ void heap_slot_materialize(TupleTableSlot *slot) * storage, we must not pfree it now, since callers might have already * fetched datum pointers referencing it.) */ - if (!slot->tts_shouldFreeMin) { + if (!TTS_SHOULDFREEMIN(slot)) { slot->tts_mintuple = NULL; } #ifdef PGXC - if (!slot->tts_shouldFreeRow) { + if (!TTS_SHOULDFREE_ROW(slot)) { slot->tts_dataRow = NULL; slot->tts_dataLen = -1; } @@ -3097,8 +3100,8 @@ MinimalTuple heap_slot_get_minimal_tuple(TupleTableSlot *slot) { * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(!TTS_EMPTY(slot)); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* * If we have a minimal physical tuple (local or not) then just return it. @@ -3115,7 +3118,7 @@ MinimalTuple heap_slot_get_minimal_tuple(TupleTableSlot *slot) { */ MemoryContext old_context = MemoryContextSwitchTo(slot->tts_mcxt); slot->tts_mintuple = heap_slot_copy_minimal_tuple(slot); - slot->tts_shouldFreeMin = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; MemoryContextSwitchTo(old_context); /* @@ -3144,9 +3147,9 @@ MinimalTuple heap_slot_copy_minimal_tuple(TupleTableSlot *slot) * sanity checks. */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* * If we have a physical tuple then just copy it. Prefer to copy @@ -3193,24 +3196,24 @@ void heap_slot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool Assert(mtup != NULL); Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_tupslotTableAm == TAM_HEAP); + Assert(TTS_TABLEAM_IS_HEAP(slot)); /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) { + if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) { heap_freetuple((HeapTuple)slot->tts_tuple); slot->tts_tuple = NULL; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); } #ifdef PGXC -if (slot->tts_shouldFreeRow) { +if (TTS_SHOULDFREE_ROW(slot)) { pfree_ext(slot->tts_dataRow); } -slot->tts_shouldFreeRow = false; +slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; slot->tts_dataRow = NULL; slot->tts_dataLen = -1; #endif @@ -3226,9 +3229,12 @@ slot->tts_buffer = InvalidBuffer; /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = shouldFree; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + if (shouldFree) + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; + else + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; slot->tts_tuple = &slot->tts_minhdr; slot->tts_mintuple = mtup; @@ -3255,7 +3261,7 @@ HeapTuple heap_slot_get_heap_tuple(TupleTableSlot* slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); Assert(slot->tts_tupleDescriptor != NULL); /* @@ -3289,7 +3295,7 @@ HeapTuple heap_slot_copy_heap_tuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); Assert(slot->tts_tupleDescriptor != NULL); /* @@ -3340,19 +3346,19 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree && (HeapTuple)slot->tts_tuple != NULL) { + if (TTS_SHOULDFREE(slot) && (HeapTuple)slot->tts_tuple != NULL) { heap_freetuple((HeapTuple)slot->tts_tuple); slot->tts_tuple = NULL; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); } #ifdef ENABLE_MULTIPLE_NODES #ifdef PGXC - if (slot->tts_shouldFreeRow) { + if (TTS_SHOULDFREE_ROW(slot)) { pfree_ext(slot->tts_dataRow); } - slot->tts_shouldFreeRow = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE_ROW; slot->tts_dataRow = NULL; slot->tts_dataLen = -1; @@ -3371,9 +3377,12 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = should_free; - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + if (should_free) + slot->tts_flags |= TTS_FLAG_SHOULDFREE; + else + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; slot->tts_tuple = tuple; slot->tts_mintuple = NULL; diff --git a/src/gausskernel/storage/access/index/indexam.cpp b/src/gausskernel/storage/access/index/indexam.cpp index 6ec232b36..ebd4ad3ec 100644 --- a/src/gausskernel/storage/access/index/indexam.cpp +++ b/src/gausskernel/storage/access/index/indexam.cpp @@ -767,7 +767,7 @@ bool IndexGetnextSlot(IndexScanDesc scan, ScanDirection direction, TupleTableSlo ItemPointer tid; TupleTableSlot* tmpslot = NULL; tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scan->heapRelation), - false, scan->heapRelation->rd_tam_type); + false, GetTableAmRoutine(scan->heapRelation->rd_tam_type)); for (;;) { /* IO collector and IO scheduler */ #ifdef ENABLE_MULTIPLE_NODES diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index 93ae344b0..952d46c56 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -56,81 +56,6 @@ * ------------------------------------------------------------------------ */ - -const TableAmRoutine *GetTableAmRoutine(TableAmType type) -{ - return g_tableam_routines[type]; -} - -/* - * Clears the contents of the table slot that contains heap table tuple data. - */ -void tableam_tslot_clear(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_clear(slot); -} - -HeapTuple tableam_tslot_materialize(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_materialize(slot); -} - -MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_get_minimal_tuple(slot); -} - - -MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_copy_minimal_tuple(slot); -} - -void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree) -{ - g_tableam_routines[slot->tts_tupslotTableAm]->tslot_store_minimal_tuple(mtup, slot, shouldFree); -} - -HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_get_heap_tuple(slot); -} - -HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_copy_heap_tuple(slot); -} - -void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode) -{ - g_tableam_routines[GetTabelAmIndexTuple(tuple)]->tslot_store_tuple(tuple, slot, buffer, shouldFree, batchMode); -} - -void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts) -{ - g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getsomeattrs(slot, natts); -} - -void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts) -{ - g_tableam_routines[slot->tts_tupslotTableAm]->tslot_formbatch(slot, batch, cur_rows, natts); -} - -Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getattr(slot, attnum, isnull); -} - -void tableam_tslot_getallattrs(TupleTableSlot *slot) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getallattrs(slot); -} - -bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum) -{ - return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_attisnull(slot, attnum); -} - Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot) { slot->tts_tupleDescriptor->tdhasuids = RELATION_HAS_UIDS(relation); @@ -1041,7 +966,7 @@ void HeapamTcapInsertLost(Relation relation, Snapshot snap) TvInsertLost(RelationGetRelid(relation), snap); } -const TableAmRoutine g_heapam_methods = { +static const TableAmRoutine g_heapam_methods = { /* ------------------------------------------------------------------------ * TABLE SLOT AM APIs * ------------------------------------------------------------------------ @@ -1233,11 +1158,11 @@ bool UHeapamTslotAttisnull(TupleTableSlot *slot, int attnum) Tuple uheapam_tslot_get_tuple_from_slot(TupleTableSlot* slot) { UHeapTuple utuple = NULL; - if (slot->tts_tupslotTableAm != TAM_USTORE) { + if (!TTS_TABLEAM_IS_USTORE(slot)) { tableam_tslot_getallattrs(slot); // here has some main difference. utuple = (UHeapTuple)tableam_tops_form_tuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull, UHEAP_TUPLE); - slot->tts_tupslotTableAm = TAM_USTORE; + slot->tts_tam_ops = TableAmUstore; utuple->tupInfo = 1; ExecStoreTuple((Tuple)utuple, slot, InvalidBuffer, true); } else { @@ -1417,7 +1342,7 @@ void UHeapamTopsUpdateTupleWithOid (Relation rel, Tuple tuple, TupleTableSlot *s if (RelationGetRelid(rel) != InvalidOid) ((UHeapTuple)tuple)->table_oid = RelationGetRelid(rel); - if (slot->tts_tupslotTableAm != TAM_USTORE) { + if (!TTS_TABLEAM_IS_USTORE(slot)) { /* * Global Partition Index stores the partition's tableOid with the index * tuple which is extracted from heap tuple of the slot in this case. @@ -1742,7 +1667,7 @@ void UheapamTcapInsertLost(Relation relation, Snapshot snap) /* All the function is pointer to heap function now, need to abstract the logic and replace with ustore function * after. */ -const TableAmRoutine g_ustoream_methods = { +static const TableAmRoutine g_ustoream_methods = { // XXXTAM: Currently heapam* methods are hacked to deal with uheap table methods. // separate them out into uheapam* and assign them below to the right am function pointer. @@ -1861,3 +1786,6 @@ const TableAmRoutine * const g_tableam_routines[] = { &g_heapam_methods, &g_ustoream_methods }; + +const TableAmRoutine* TableAmHeap = &g_heapam_methods; +const TableAmRoutine* TableAmUstore = &g_ustoream_methods; \ No newline at end of file diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 8b3b787b2..2b9c31999 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -41,6 +41,7 @@ #include "access/ustore/knl_utuple.h" #include "access/ustore/knl_utuptoaster.h" #include "access/ustore/knl_whitebox_test.h" +#include "access/tableam.h" #include static Bitmapset *UHeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols, UHeapTuple oldtup, @@ -2156,7 +2157,7 @@ check_tup_satisfies_update: /* create the old tuple for caller */ if (oldslot) { - *oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE); + *oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore); TupleDesc rowDesc = (*oldslot)->tts_tupleDescriptor; UHeapTuple oldtupCopy = UHeapCopyTuple(&utuple); @@ -2964,7 +2965,7 @@ check_tup_satisfies_update: /* Till now, we know whether we will delete the old index */ if (oldslot && (*modifiedIdxAttrs != NULL || !useInplaceUpdate)) { - *oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TAM_USTORE); + *oldslot = MakeSingleTupleTableSlot(relation->rd_att, false, TableAmUstore); TupleDesc rowDesc = (*oldslot)->tts_tupleDescriptor; UHeapTuple oldtupCopy = UHeapCopyTuple(&oldtup); @@ -3673,13 +3674,13 @@ static void TtsUHeapMaterialize(TupleTableSlot *slot) { MemoryContext oldContext; - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* If already materialized nothing to do. */ - if (slot->tts_shouldFree) + if (TTS_SHOULDFREE(slot)) return; - slot->tts_shouldFree = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREE; oldContext = MemoryContextSwitchTo(slot->tts_mcxt); @@ -3691,7 +3692,7 @@ static void TtsUHeapMaterialize(TupleTableSlot *slot) slot->tts_tuple = UHeapFormTuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull); /* Let the caller know this contains a UHeap tuple now */ - slot->tts_tupslotTableAm = TAM_USTORE; + slot->tts_tam_ops = TableAmUstore; MemoryContextSwitchTo(oldContext); diff --git a/src/gausskernel/storage/access/ustore/knl_utuple.cpp b/src/gausskernel/storage/access/ustore/knl_utuple.cpp index 3fcd54914..73395ea2c 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuple.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuple.cpp @@ -1351,8 +1351,8 @@ HeapTuple UHeapCopyHeapTuple(TupleTableSlot *slot) { HeapTuple tuple; - Assert(!slot->tts_isempty); - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(!TTS_EMPTY(slot)); + Assert(TTS_TABLEAM_IS_USTORE(slot)); UHeapSlotGetAllAttrs(slot); @@ -1379,20 +1379,20 @@ void UHeapSlotClear(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree && (UHeapTuple)slot->tts_tuple != NULL) { + if (TTS_SHOULDFREE(slot) && (UHeapTuple)slot->tts_tuple != NULL) { UHeapFreeTuple(slot->tts_tuple); slot->tts_tuple = NULL; - slot->tts_shouldFree = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; } } @@ -1406,7 +1406,7 @@ void UHeapSlotClear(TupleTableSlot *slot) */ void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum) { - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* Quick out if we have 'em all already */ if (slot->tts_nvalid >= attnum) { @@ -1418,7 +1418,7 @@ void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum) void UHeapSlotFormBatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int attnum) { - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* Quick out if we have all already */ if (slot->tts_nvalid >= attnum) { @@ -1514,7 +1514,7 @@ bool UHeapSlotAttIsNull(const TupleTableSlot *slot, int attnum) TupleDesc tupleDesc = slot->tts_tupleDescriptor; UHeapTuple uhtup = (UHeapTuple)slot->tts_tuple; - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* * system attributes are handled by heap_attisnull @@ -1571,7 +1571,7 @@ bool UHeapSlotAttIsNull(const TupleTableSlot *slot, int attnum) */ void UHeapSlotGetAllAttrs(TupleTableSlot *slot) { - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* Quick out if we have 'em all already */ if (slot->tts_nvalid == slot->tts_tupleDescriptor->natts) { @@ -1708,9 +1708,9 @@ MinimalTuple UHeapSlotCopyMinimalTuple(TupleTableSlot *slot) * sanity checks. */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); UHeapSlotGetAllAttrs(slot); @@ -1733,7 +1733,7 @@ MinimalTuple UHeapSlotGetMinimalTuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* * If we have a minimal physical tuple (local or not) then just return it. @@ -1750,7 +1750,7 @@ MinimalTuple UHeapSlotGetMinimalTuple(TupleTableSlot *slot) */ MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt); slot->tts_mintuple = UHeapSlotCopyMinimalTuple(slot); - slot->tts_shouldFreeMin = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; MemoryContextSwitchTo(oldContext); /* @@ -1778,16 +1778,16 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh Assert(mtup != NULL); Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(TTS_TABLEAM_IS_USTORE(slot)); /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree && (UHeapTuple)slot->tts_tuple != NULL) { + if (TTS_SHOULDFREE(slot) && (UHeapTuple)slot->tts_tuple != NULL) { UHeapFreeTuple(slot->tts_tuple); slot->tts_tuple = NULL; } - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); } @@ -1802,9 +1802,13 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = shouldFree; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + if (shouldFree) + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; + else + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + slot->tts_tuple = &slot->tts_minhdr; slot->tts_mintuple = mtup; @@ -1813,7 +1817,7 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh slot->tts_minhdr.t_data = (HeapTupleHeader)((char *)mtup - MINIMAL_TUPLE_OFFSET); /* This slot now contains a HEAP_TUPLE so make sure to let callers know how to read it */ - slot->tts_tupslotTableAm = TAM_HEAP; + slot->tts_tam_ops = TableAmHeap; /* no need to set t_self or t_tableOid since we won't allow access */ /* Mark extracted state invalid */ @@ -1833,12 +1837,12 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou * sanity checks */ Assert(utuple != NULL && utuple->tupTableType == UHEAP_TUPLE); - Assert(slot != NULL && slot->tts_tupslotTableAm == TAM_USTORE); + Assert(slot != NULL && TTS_TABLEAM_IS_USTORE(slot)); Assert(slot->tts_tupleDescriptor != NULL); - if (slot->tts_shouldFreeMin) { + if (TTS_SHOULDFREEMIN(slot)) { heap_free_minimal_tuple(slot->tts_mintuple); - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; } UHeapSlotClear(slot); @@ -1846,9 +1850,12 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = shouldFree; - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + if (shouldFree) + slot->tts_flags |= TTS_FLAG_SHOULDFREE; + else + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; slot->tts_tuple = utuple; slot->tts_mintuple = NULL; @@ -1864,14 +1871,14 @@ void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shou */ Tuple UHeapMaterialize(TupleTableSlot *slot) { - Assert(!slot->tts_isempty); - Assert(slot->tts_tupslotTableAm == TAM_USTORE); + Assert(!TTS_EMPTY(slot)); + Assert(TTS_TABLEAM_IS_USTORE(slot)); Assert(slot->tts_tupleDescriptor != NULL); /* * If we have a regular physical tuple, and it's locally palloc'd, we have * nothing to do. */ - if (slot->tts_tuple && slot->tts_shouldFree) { + if (slot->tts_tuple && TTS_SHOULDFREE(slot)) { return slot->tts_tuple; } @@ -1888,7 +1895,7 @@ Tuple UHeapMaterialize(TupleTableSlot *slot) } else { slot->tts_tuple = UHeapFormTuple(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull); } - slot->tts_shouldFree = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREE; MemoryContextSwitchTo(old_context); /* diff --git a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp index f46e3af63..079060bff 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp @@ -19,6 +19,7 @@ #include "access/genam.h" #include "access/heapam.h" +#include "access/tableam.h" #include "nodes/relation.h" #include "access/tuptoaster.h" #include "access/ustore/knl_utuptoaster.h" @@ -56,7 +57,7 @@ Oid UHeapGetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn ScanKeyData key; bool collides = false; Assert(RelationIsUstoreFormat(relation) || RelationIsToast(relation)); - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), false, relation->rd_tam_type); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), false, GetTableAmRoutine(relation->rd_tam_type)); /* Generate new OIDs until we find one not in the table */ do { CHECK_FOR_INTERRUPTS(); @@ -897,7 +898,7 @@ static void UHeapToastDeleteDatum(Relation rel, Datum value, int options) /* The toast table of ustore table should also be of ustore type */ Assert(RelationIsUstoreFormat(toastrel)); /* should index must be ustore format ? */ - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type)); /* * Setup a scan key to find chunks with matching va_valueid @@ -960,7 +961,7 @@ struct varlena *UHeapInternalToastFetchDatum(struct varatt_external toastPointer SET_VARSIZE(result, ressize + VARHDRSZ); toastTupDesc = toastrel->rd_att; - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type)); /* * Setup a scan key to fetch from the index by va_valueid @@ -1132,7 +1133,7 @@ struct varlena *UHeapInternalToastFetchDatumSlice(struct varatt_external toastPo * Open the toast relation and its index */ toastTupDesc = toastrel->rd_att; - TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type)); /* * Setup a scan key to fetch from the index. This is either two keys or @@ -1267,7 +1268,7 @@ static bool UHeapToastRelValueidExists(Relation toastrel, Oid valueid) SysScanDesc toastscan; TupleTableSlot *slot = NULL; Assert(RelationIsUstoreFormat(toastrel)); - slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, toastrel->rd_tam_type); + slot = MakeSingleTupleTableSlot(RelationGetDescr(toastrel), false, GetTableAmRoutine(toastrel->rd_tam_type)); /* * Setup a scan key to find chunks with matching va_valueid diff --git a/src/gausskernel/storage/bulkload/dist_fdw.cpp b/src/gausskernel/storage/bulkload/dist_fdw.cpp index 917bb5eab..1538ab4c5 100644 --- a/src/gausskernel/storage/bulkload/dist_fdw.cpp +++ b/src/gausskernel/storage/bulkload/dist_fdw.cpp @@ -526,7 +526,7 @@ ForeignScanState *buildRelatedStateInfo(Relation relation, DistFdwFileSegment *s ; /* setup tuple slot */ - scanTupleSlot = MakeTupleTableSlot(true, tupleDescriptor->tdTableAmType); + scanTupleSlot = MakeTupleTableSlot(true, GetTableAmRoutine(tupleDescriptor->tdTableAmType)); scanTupleSlot->tts_tupleDescriptor = tupleDescriptor; scanTupleSlot->tts_values = columnValues; scanTupleSlot->tts_isnull = columnNulls; @@ -641,7 +641,7 @@ static int distAcquireSampleRows(Relation relation, int logLevel, HeapTuple *sam (void)MemoryContextSwitchTo(oldContext); /* if there are no more records to read, break */ - if (scanTupleSlot->tts_isempty) { + if (TTS_EMPTY(scanTupleSlot)) { break; } diff --git a/src/gausskernel/storage/bulkload/foreignroutine.cpp b/src/gausskernel/storage/bulkload/foreignroutine.cpp index fc3657346..e5946a56e 100644 --- a/src/gausskernel/storage/bulkload/foreignroutine.cpp +++ b/src/gausskernel/storage/bulkload/foreignroutine.cpp @@ -1612,7 +1612,7 @@ retry: /* * Optimize foreign scan by using informational constraint. */ - if (((ForeignScan *)node->ss.ps.plan)->scan.predicate_pushdown_optimized && false == slot->tts_isempty) { + if (((ForeignScan *)node->ss.ps.plan)->scan.predicate_pushdown_optimized && !TTS_EMPTY(slot)) { /* * If we find a suitable tuple, set is_scan_end value is true. * It means that we do not find suitable tuple in the next iteration, diff --git a/src/gausskernel/storage/mot/jit_exec/jit_helpers.cpp b/src/gausskernel/storage/mot/jit_exec/jit_helpers.cpp index 10c1ebb09..f882adb73 100644 --- a/src/gausskernel/storage/mot/jit_exec/jit_helpers.cpp +++ b/src/gausskernel/storage/mot/jit_exec/jit_helpers.cpp @@ -3432,7 +3432,7 @@ static bool RecompilePlanIfNeeded( static void PrintResultSlot(TupleTableSlot* resultSlot) { - if (resultSlot->tts_isempty) { + if (TTS_EMPTY(resultSlot)) { MOT_LOG_DEBUG("Query result is empty"); } else { MOT_LOG_BEGIN(MOT::LogLevel::LL_DEBUG, "Query result slot:"); @@ -3861,7 +3861,7 @@ int JitExecSubQuery(int subQueryId, int tcount) if (slot == subQueryExecState->m_resultSlot) { // we need to copy slot if there is a minimal tuple or SPI push was called and the tuple is not // empty - if ((slot->tts_mintuple != nullptr) || (isSpiPushed && !slot->tts_isempty)) { + if ((slot->tts_mintuple != nullptr) || (isSpiPushed && !TTS_EMPTY(slot))) { CopyTupleTableSlot(subQueryExecState, subQueryExecState->m_resultSlot); } } diff --git a/src/gausskernel/storage/replication/logical/worker.cpp b/src/gausskernel/storage/replication/logical/worker.cpp index 28488e151..2925c730c 100644 --- a/src/gausskernel/storage/replication/logical/worker.cpp +++ b/src/gausskernel/storage/replication/logical/worker.cpp @@ -201,7 +201,7 @@ static EState *create_estate_for_relation(LogicalRepRelMapEntry *rel) /* Triggers might need a slot */ if (resultRelInfo->ri_TrigDesc) - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); @@ -598,7 +598,7 @@ static void apply_handle_insert(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); /* Input functions may need an active snapshot, so get one */ @@ -724,9 +724,9 @@ static void apply_handle_update(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); - localslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + localslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel)); EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1); @@ -842,9 +842,9 @@ static void apply_handle_delete(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + remoteslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); - localslot = ExecInitExtraTupleSlot(estate, rel->localrel->rd_tam_type); + localslot = ExecInitExtraTupleSlot(estate, GetTableAmRoutine(rel->localrel->rd_tam_type)); ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel)); EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1); diff --git a/src/gausskernel/storage/tcap/tcap_version.cpp b/src/gausskernel/storage/tcap/tcap_version.cpp index 5d41fad47..6c5c7fbf1 100644 --- a/src/gausskernel/storage/tcap/tcap_version.cpp +++ b/src/gausskernel/storage/tcap/tcap_version.cpp @@ -808,7 +808,7 @@ static void TvUheapInsertLostImpl(Relation rel, Relation partRel, Partition p, Relation relRel = (partRel != NULL) ? partRel : rel; /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate, TAM_USTORE); + myslot = ExecInitExtraTupleSlot(estate, TableAmUstore); ExecSetSlotDescriptor(myslot, RelationGetDescr(relRel)); /* Switch into its memory context */ diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 0d38968eb..1de4e5ccc 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -498,25 +498,93 @@ typedef struct TableAmRoutine { void (*tcap_insert_lost)(Relation relation, Snapshot snap); } TableAmRoutine; + extern const TableAmRoutine * const g_tableam_routines[]; extern void HeapamScanIndexFetchEnd(IndexFetchTableData *scan); extern void heapam_index_fetch_reset(IndexFetchTableData *scan); extern IndexFetchTableData *HeapamScanIndexFetchBegin(Relation rel); -extern const TableAmRoutine *GetTableAmRoutine(TableAmType type); -extern void tableam_tslot_clear(TupleTableSlot *slot); -extern HeapTuple tableam_tslot_materialize(TupleTableSlot *slot); -extern MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot); -extern MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot); -extern void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree); -extern HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot); -extern HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot); -extern void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode); -extern void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts); -extern Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull); -extern void tableam_tslot_getallattrs(TupleTableSlot *slot); -extern void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts); -extern bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum); +static inline const TableAmRoutine* GetTableAmRoutine(TableAmType type) +{ + Assert(type == TAM_HEAP || type == TAM_USTORE); + return type == TAM_HEAP ? TableAmHeap : TableAmUstore; +} + +static inline TableAmType GetTableAmType(const TableAmRoutine* ops) +{ + Assert(ops == TableAmHeap || ops == TableAmUstore); + return ops == TableAmHeap ? TAM_HEAP : TAM_USTORE; +} + +/* + * Clears the contents of the table slot that contains heap table tuple data. + */ +static inline void tableam_tslot_clear(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_clear(slot); +} + +static inline HeapTuple tableam_tslot_materialize(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_materialize(slot); +} + +static inline MinimalTuple tableam_tslot_get_minimal_tuple(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_get_minimal_tuple(slot); +} + +static inline MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_copy_minimal_tuple(slot); +} + +static inline void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree) +{ + slot->tts_tam_ops->tslot_store_minimal_tuple(mtup, slot, shouldFree); +} + +static inline HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_get_heap_tuple(slot); +} + +static inline HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_copy_heap_tuple(slot); +} + +static inline void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode) +{ + Assert(slot->tts_tam_ops == GetTableAmRoutine(TableAmType(GetTabelAmIndexTuple(tuple)))); + slot->tts_tam_ops->tslot_store_tuple(tuple, slot, buffer, shouldFree, batchMode); +} + +static inline void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts) +{ + slot->tts_tam_ops->tslot_getsomeattrs(slot, natts); +} + +static inline void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts) +{ + slot->tts_tam_ops->tslot_formbatch(slot, batch, cur_rows, natts); +} + +static inline Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) +{ + return slot->tts_tam_ops->tslot_getattr(slot, attnum, isnull); +} + +static inline void tableam_tslot_getallattrs(TupleTableSlot *slot) +{ + return slot->tts_tam_ops->tslot_getallattrs(slot); +} + +static inline bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum) +{ + return slot->tts_tam_ops->tslot_attisnull(slot, attnum); +} + extern Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot); extern Datum tableam_tops_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool *isnull, Buffer buf = InvalidBuffer); diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h index 4ccc82c04..96ba3bcf2 100644 --- a/src/include/access/tupdesc.h +++ b/src/include/access/tupdesc.h @@ -41,6 +41,14 @@ typedef enum tableAmType TAM_USTORE = 1, } TableAmType; +/* + * Predefined TableAmRoutine for various types of table AM. The + * same are used to identify the table AM of a given slot. + */ +struct TableAmRoutine; +extern const TableAmRoutine* TableAmHeap; +extern const TableAmRoutine* TableAmUstore; + /* index page split methods */ #define INDEXSPLIT_NO_DEFAULT 0 /* default split method, aimed at equal split */ #define INDEXSPLIT_NO_INSERTPT 1 /* insertpt */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 45cfda856..e4240ee4a 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -330,9 +330,9 @@ extern bool is_huge_clob(Oid type_oid, bool is_null, Datum value); /* * prototypes from functions in execTuples.c */ -extern void ExecInitResultTupleSlot(EState* estate, PlanState* planstate, TableAmType tam = TAM_HEAP); -extern void ExecInitScanTupleSlot(EState* estate, ScanState* scanstate, TableAmType tam = TAM_HEAP); -extern TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, TableAmType tam = TAM_HEAP); +extern void ExecInitResultTupleSlot(EState* estate, PlanState* planstate, const TableAmRoutine* tam_ops = TableAmHeap); +extern void ExecInitScanTupleSlot(EState* estate, ScanState* scanstate, const TableAmRoutine* tam_ops = TableAmHeap); +extern TupleTableSlot* ExecInitExtraTupleSlot(EState* estate, const TableAmRoutine* tam_ops = TableAmHeap); extern TupleTableSlot* ExecInitNullTupleSlot(EState* estate, TupleDesc tupType); extern TupleDesc ExecTypeFromTL(List* targetList, bool hasoid, bool markdropped = false, TableAmType tam = TAM_HEAP); extern TupleDesc ExecCleanTypeFromTL(List* targetList, bool hasoid, TableAmType tam = TAM_HEAP); diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index cd6b068b2..65c85f71a 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -65,11 +65,11 @@ * ie, only as needed. This serves to avoid repeated extraction of data * from the physical tuple. * - * A TupleTableSlot can also be "empty", holding no valid data. This is - * the only valid state for a freshly-created slot that has not yet had a - * tuple descriptor assigned to it. In this state, tts_isempty must be - * TRUE, tts_shouldFree FALSE, tts_tuple NULL, tts_buffer InvalidBuffer, - * and tts_nvalid zero. + * A TupleTableSlot can also be "empty", indicated by flag TTS_EMPTY set in + * tts_flags, holding no valid data. This is the only valid state for a + * freshly-created slot that has not yet had a tuple descriptor assigned to it. + * In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple must + * be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero. * * The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot * code. The caller of ExecSetSlotDescriptor() is responsible for providing @@ -79,8 +79,9 @@ * mechanism to do more. However, the slot will increment the tupdesc * reference count if a reference-counted tupdesc is supplied.) * - * When tts_shouldFree is true, the physical tuple is "owned" by the slot - * and should be freed when the slot's reference to the tuple is dropped. + * When TTS_SHOULDFREE is set in tts_flags, the physical tuple is "owned" by + * the slot and should be freed when the slot's reference to the tuple is + * dropped. * * If tts_buffer is not InvalidBuffer, then the slot is holding a pin * on the indicated buffer page; drop the pin when we release the @@ -106,55 +107,83 @@ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column * extraction to treat the case identically to regular physical tuples. * - * tts_slow/tts_off are saved state for slot_deform_tuple, and should not - * be touched by any other code. + * TTS_SLOW flag in tts_flags and tts_off are saved state for + * slot_deform_tuple, and should not be touched by any other code. * ---------- */ + +/* true = slot is empty */ +#define TTS_FLAG_EMPTY (1 << 1) +#define TTS_EMPTY(slot) (((slot)->tts_flags & TTS_FLAG_EMPTY) != 0) + +/* should pfree tts_tuple? */ +#define TTS_FLAG_SHOULDFREE (1 << 2) +#define TTS_SHOULDFREE(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE) != 0) + +/* should pfree tts_mintuple? */ +#define TTS_FLAG_SHOULDFREEMIN (1 << 3) +#define TTS_SHOULDFREEMIN(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREEMIN) != 0) + +/* saved state for slot_deform_tuple */ +#define TTS_FLAG_SLOW (1 << 4) +#define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0) + +/* + * openGauss flags + */ + +/* should pfree should pfree tts_dataRow? */ +#define TTS_FLAG_SHOULDFREE_ROW (1 << 12) +#define TTS_SHOULDFREE_ROW(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE_ROW) != 0) + typedef struct TupleTableSlot { NodeTag type; - bool tts_isempty; /* true = slot is empty */ - bool tts_shouldFree; /* should pfree tts_tuple? */ - bool tts_shouldFreeMin; /* should pfree tts_mintuple? */ - bool tts_slow; /* saved state for slot_deform_tuple */ - + uint16 tts_flags; /* Boolean states */ + int tts_nvalid; /* # of valid values in tts_values */ + const TableAmRoutine* tts_tam_ops; /* implementation of table AM */ Tuple tts_tuple; /* physical tuple, or NULL if virtual */ + + TupleDesc tts_tupleDescriptor; /* slot's tuple descriptor */ + MemoryContext tts_mcxt; /* slot itself is in this context */ + Buffer tts_buffer; /* tuple's buffer, or InvalidBuffer */ + long tts_off; /* saved state for slot_deform_tuple */ + Datum* tts_values; /* current per-attribute values */ + bool* tts_isnull; /* current per-attribute isnull flags */ + + MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */ + HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */ + + long tts_meta_off; /* saved state for slot_deform_cmpr_tuple */ + Datum* tts_lobPointers; #ifdef PGXC /* * PGXC extension to support tuples sent from remote Datanode. */ char* tts_dataRow; /* Tuple data in DataRow format */ int tts_dataLen; /* Actual length of the data row */ - bool tts_shouldFreeRow; /* should pfree tts_dataRow? */ struct AttInMetadata* tts_attinmeta; /* store here info to extract values from the DataRow */ Oid tts_xcnodeoid; /* Oid of node from where the datarow is fetched */ MemoryContext tts_per_tuple_mcxt; #endif - TupleDesc tts_tupleDescriptor; /* slot's tuple descriptor */ - MemoryContext tts_mcxt; /* slot itself is in this context */ - Buffer tts_buffer; /* tuple's buffer, or InvalidBuffer */ - int tts_nvalid; /* # of valid values in tts_values */ - Datum* tts_values; /* current per-attribute values */ - bool* tts_isnull; /* current per-attribute isnull flags */ - Datum* tts_lobPointers; - MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */ - HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */ - long tts_off; /* saved state for slot_deform_tuple */ - long tts_meta_off; /* saved state for slot_deform_cmpr_tuple */ - TableAmType tts_tupslotTableAm; /* slots's tuple table type */ + } TupleTableSlot; #define TTS_HAS_PHYSICAL_TUPLE(slot) ((slot)->tts_tuple != NULL && (slot)->tts_tuple != &((slot)->tts_minhdr)) + +#define TTS_TABLEAM_IS_HEAP(slot) ((slot)->tts_tam_ops == TableAmHeap) +#define TTS_TABLEAM_IS_USTORE(slot) ((slot)->tts_tam_ops == TableAmUstore) + /* * TupIsNull -- is a TupleTableSlot empty? */ -#define TupIsNull(slot) ((slot) == NULL || (slot)->tts_isempty) +#define TupIsNull(slot) ((slot) == NULL || TTS_EMPTY(slot)) /* in executor/execTuples.c */ -extern TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt = false, TableAmType tupslotTableAm = TAM_HEAP); -extern TupleTableSlot* ExecAllocTableSlot(List** tupleTable, TableAmType tupslotTableAm = TAM_HEAP); +extern TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt = false, const TableAmRoutine* tam_ops = TableAmHeap); +extern TupleTableSlot* ExecAllocTableSlot(List** tupleTable, const TableAmRoutine* tam_ops = TableAmHeap); extern void ExecResetTupleTable(List* tupleTable, bool shouldFree); -extern TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tupdesc, bool allocSlotCxt = false, TableAmType tupslotTableAm = TAM_HEAP); +extern TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tupdesc, bool allocSlotCxt = false, const TableAmRoutine* tam_ops = TableAmHeap); extern void ExecDropSingleTupleTableSlot(TupleTableSlot* slot); extern void ExecSetSlotDescriptor(TupleTableSlot* slot, TupleDesc tupdesc); extern TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, bool shouldFree); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 70b5cd11b..7a0c55919 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -2647,7 +2647,7 @@ typedef struct GroupingIdExprState { } \ } while (0) -extern TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, TableAmType tableAm); +extern TupleTableSlot* ExecMakeTupleSlot(Tuple tuple, TableScanDesc tableScan, TupleTableSlot* slot, const TableAmRoutine* tam_ops); /* * When the global partition index is used for bitmap scanning,