mirror of
https://git.postgresql.org/git/postgresql.git
synced 2026-02-13 01:47:05 +08:00
of tuples when passing data up through multiple plan nodes. A slot can now hold either a normal "physical" HeapTuple, or a "virtual" tuple consisting of Datum/isnull arrays. Upper plan levels can usually just copy the Datum arrays, avoiding heap_formtuple() and possible subsequent nocachegetattr() calls to extract the data again. This work extends Atsushi Ogawa's earlier patch, which provided the key idea of adding Datum arrays to TupleTableSlots. (I believe however that something like this was foreseen way back in Berkeley days --- see the old comment on ExecProject.) A test case involving many levels of join of fairly wide tables (about 80 columns altogether) showed about 3x overall speedup, though simple queries will probably not be helped very much. I have also duplicated some code in heaptuple.c in order to provide versions of heap_formtuple and friends that use "bool" arrays to indicate null attributes, instead of the old convention of "char" arrays containing either 'n' or ' '. This provides a better match to the convention used by ExecEvalExpr. While I have not made a concerted effort to get rid of uses of the old routines, I think they should be deprecated and eventually removed.
312 lines
8.0 KiB
C
312 lines
8.0 KiB
C
/*-------------------------------------------------------------------------
|
|
*
|
|
* nodeMaterial.c
|
|
* Routines to handle materialization nodes.
|
|
*
|
|
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
*
|
|
*
|
|
* IDENTIFICATION
|
|
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.49 2005/03/16 21:38:07 tgl Exp $
|
|
*
|
|
*-------------------------------------------------------------------------
|
|
*/
|
|
/*
|
|
* INTERFACE ROUTINES
|
|
* ExecMaterial - materialize the result of a subplan
|
|
* ExecInitMaterial - initialize node and subnodes
|
|
* ExecEndMaterial - shutdown node and subnodes
|
|
*
|
|
*/
|
|
#include "postgres.h"
|
|
|
|
#include "access/heapam.h"
|
|
#include "executor/executor.h"
|
|
#include "executor/nodeMaterial.h"
|
|
#include "miscadmin.h"
|
|
#include "utils/tuplestore.h"
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterial
|
|
*
|
|
* As long as we are at the end of the data collected in the tuplestore,
|
|
* we collect one new row from the subplan on each call, and stash it
|
|
* aside in the tuplestore before returning it. The tuplestore is
|
|
* only read if we are asked to scan backwards, rescan, or mark/restore.
|
|
*
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
TupleTableSlot * /* result tuple from subplan */
|
|
ExecMaterial(MaterialState *node)
|
|
{
|
|
EState *estate;
|
|
ScanDirection dir;
|
|
bool forward;
|
|
Tuplestorestate *tuplestorestate;
|
|
HeapTuple heapTuple = NULL;
|
|
bool should_free = false;
|
|
bool eof_tuplestore;
|
|
TupleTableSlot *slot;
|
|
|
|
/*
|
|
* get state info from node
|
|
*/
|
|
estate = node->ss.ps.state;
|
|
dir = estate->es_direction;
|
|
forward = ScanDirectionIsForward(dir);
|
|
tuplestorestate = (Tuplestorestate *) node->tuplestorestate;
|
|
|
|
/*
|
|
* If first time through, initialize the tuplestore.
|
|
*/
|
|
if (tuplestorestate == NULL)
|
|
{
|
|
tuplestorestate = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
node->tuplestorestate = (void *) tuplestorestate;
|
|
}
|
|
|
|
/*
|
|
* If we are not at the end of the tuplestore, or are going backwards,
|
|
* try to fetch a tuple from tuplestore.
|
|
*/
|
|
eof_tuplestore = tuplestore_ateof(tuplestorestate);
|
|
|
|
if (!forward && eof_tuplestore)
|
|
{
|
|
if (!node->eof_underlying)
|
|
{
|
|
/*
|
|
* When reversing direction at tuplestore EOF, the first
|
|
* getheaptuple call will fetch the last-added tuple; but we
|
|
* want to return the one before that, if possible. So do an
|
|
* extra fetch.
|
|
*/
|
|
heapTuple = tuplestore_getheaptuple(tuplestorestate,
|
|
forward,
|
|
&should_free);
|
|
if (heapTuple == NULL)
|
|
return NULL; /* the tuplestore must be empty */
|
|
if (should_free)
|
|
heap_freetuple(heapTuple);
|
|
}
|
|
eof_tuplestore = false;
|
|
}
|
|
|
|
if (!eof_tuplestore)
|
|
{
|
|
heapTuple = tuplestore_getheaptuple(tuplestorestate,
|
|
forward,
|
|
&should_free);
|
|
if (heapTuple == NULL && forward)
|
|
eof_tuplestore = true;
|
|
}
|
|
|
|
/*
|
|
* If necessary, try to fetch another row from the subplan.
|
|
*
|
|
* Note: the eof_underlying state variable exists to short-circuit
|
|
* further subplan calls. It's not optional, unfortunately, because
|
|
* some plan node types are not robust about being called again when
|
|
* they've already returned NULL.
|
|
*/
|
|
if (eof_tuplestore && !node->eof_underlying)
|
|
{
|
|
PlanState *outerNode;
|
|
TupleTableSlot *outerslot;
|
|
|
|
/*
|
|
* We can only get here with forward==true, so no need to worry
|
|
* about which direction the subplan will go.
|
|
*/
|
|
outerNode = outerPlanState(node);
|
|
outerslot = ExecProcNode(outerNode);
|
|
if (TupIsNull(outerslot))
|
|
{
|
|
node->eof_underlying = true;
|
|
return NULL;
|
|
}
|
|
heapTuple = ExecFetchSlotTuple(outerslot);
|
|
should_free = false;
|
|
|
|
/*
|
|
* Append returned tuple to tuplestore, too. NOTE: because the
|
|
* tuplestore is certainly in EOF state, its read position will
|
|
* move forward over the added tuple. This is what we want.
|
|
*/
|
|
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
|
|
}
|
|
|
|
/*
|
|
* Return the obtained tuple, if any.
|
|
*/
|
|
slot = (TupleTableSlot *) node->ss.ps.ps_ResultTupleSlot;
|
|
if (heapTuple)
|
|
return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
|
|
else
|
|
return ExecClearTuple(slot);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecInitMaterial
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
MaterialState *
|
|
ExecInitMaterial(Material *node, EState *estate)
|
|
{
|
|
MaterialState *matstate;
|
|
Plan *outerPlan;
|
|
|
|
/*
|
|
* create state structure
|
|
*/
|
|
matstate = makeNode(MaterialState);
|
|
matstate->ss.ps.plan = (Plan *) node;
|
|
matstate->ss.ps.state = estate;
|
|
|
|
matstate->tuplestorestate = NULL;
|
|
matstate->eof_underlying = false;
|
|
|
|
/*
|
|
* Miscellaneous initialization
|
|
*
|
|
* Materialization nodes don't need ExprContexts because they never call
|
|
* ExecQual or ExecProject.
|
|
*/
|
|
|
|
#define MATERIAL_NSLOTS 2
|
|
|
|
/*
|
|
* tuple table initialization
|
|
*
|
|
* material nodes only return tuples from their materialized relation.
|
|
*/
|
|
ExecInitResultTupleSlot(estate, &matstate->ss.ps);
|
|
ExecInitScanTupleSlot(estate, &matstate->ss);
|
|
|
|
/*
|
|
* initializes child nodes
|
|
*/
|
|
outerPlan = outerPlan(node);
|
|
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
|
|
|
|
/*
|
|
* initialize tuple type. no need to initialize projection info
|
|
* because this node doesn't do projections.
|
|
*/
|
|
ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps);
|
|
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
|
|
matstate->ss.ps.ps_ProjInfo = NULL;
|
|
|
|
return matstate;
|
|
}
|
|
|
|
int
|
|
ExecCountSlotsMaterial(Material *node)
|
|
{
|
|
return ExecCountSlotsNode(outerPlan((Plan *) node)) +
|
|
ExecCountSlotsNode(innerPlan((Plan *) node)) +
|
|
MATERIAL_NSLOTS;
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecEndMaterial
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecEndMaterial(MaterialState *node)
|
|
{
|
|
/*
|
|
* clean out the tuple table
|
|
*/
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
/*
|
|
* Release tuplestore resources
|
|
*/
|
|
if (node->tuplestorestate != NULL)
|
|
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
|
|
node->tuplestorestate = NULL;
|
|
|
|
/*
|
|
* shut down the subplan
|
|
*/
|
|
ExecEndNode(outerPlanState(node));
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialMarkPos
|
|
*
|
|
* Calls tuplestore to save the current position in the stored file.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialMarkPos(MaterialState *node)
|
|
{
|
|
/*
|
|
* if we haven't materialized yet, just return.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
tuplestore_markpos((Tuplestorestate *) node->tuplestorestate);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialRestrPos
|
|
*
|
|
* Calls tuplestore to restore the last saved file position.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialRestrPos(MaterialState *node)
|
|
{
|
|
/*
|
|
* if we haven't materialized yet, just return.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
/*
|
|
* restore the scan to the previously marked position
|
|
*/
|
|
tuplestore_restorepos((Tuplestorestate *) node->tuplestorestate);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialReScan
|
|
*
|
|
* Rescans the materialized relation.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
|
|
{
|
|
/*
|
|
* If we haven't materialized yet, just return. If outerplan' chgParam
|
|
* is not NULL then it will be re-scanned by ExecProcNode, else - no
|
|
* reason to re-scan it at all.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
|
|
/*
|
|
* If subnode is to be rescanned then we forget previous stored
|
|
* results; we have to re-read the subplan and re-store.
|
|
*
|
|
* Otherwise we can just rewind and rescan the stored output. The state
|
|
* of the subnode does not change.
|
|
*/
|
|
if (((PlanState *) node)->lefttree->chgParam != NULL)
|
|
{
|
|
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
|
|
node->tuplestorestate = NULL;
|
|
node->eof_underlying = false;
|
|
}
|
|
else
|
|
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
|
|
}
|