# # # add_dir "tests/test_a_merge_8" # # add_file "tests/test_a_merge_8/__driver__.lua" # content [fbeb6c265cb40e96fbe3760f4c59e3acc4dc025b] # # add_file "tests/test_a_merge_8/correct" # content [5f0b7aad22f9002748fba840d9bc612413887b8c] # # add_file "tests/test_a_merge_8/left" # content [3ad78dd8d181bd08c699214692923da58cdcdda4] # # add_file "tests/test_a_merge_8/parent" # content [300798a2fc26fbf1e3b63d2e313a823d9226f183] # # add_file "tests/test_a_merge_8/right" # content [13cb3ac0b7c84eaf7aa7d8ba22670bd3cd22060a] # # patch "ChangeLog" # from [76b87bded2668c398a2b7b880a8dce10ee2ac269] # to [3d7447330bb3e54087255fc337ff8cba0bc43741] # # patch "testsuite.lua" # from [76181ad2963afcf0b47c431e129a2dd7b7c1ed83] # to [3cd1fd512cde3e786e1b98e8f173226c259a2a5f] # ============================================================ --- tests/test_a_merge_8/__driver__.lua fbeb6c265cb40e96fbe3760f4c59e3acc4dc025b +++ tests/test_a_merge_8/__driver__.lua fbeb6c265cb40e96fbe3760f4c59e3acc4dc025b @@ -0,0 +1,29 @@ + +mtn_setup() + +-- This tests a real world case from PostgreSQL + +-- kdiff3 and merge(1) can handle this merge correctly. + +check(get("parent")) +check(get("left")) +check(get("right")) +check(get("correct")) + +copy("parent", "testfile") +check(mtn("add", "testfile"), 0, false, false) +commit() +parent = base_revision() + +copy("left", "testfile") +commit() + +revert_to(parent) + +copy("right", "testfile") +commit() + +check(mtn("--branch=testbranch", "merge"), 0, false, false) + +check(mtn("update"), 0, false, false) +check(samefile("testfile", "correct")) ============================================================ --- tests/test_a_merge_8/correct 5f0b7aad22f9002748fba840d9bc612413887b8c +++ tests/test_a_merge_8/correct 5f0b7aad22f9002748fba840d9bc612413887b8c @@ -0,0 +1,1792 @@ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/reloptions.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/toasting.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/execdebug.h" +#include "executor/instrument.h" +#include "executor/nodeSubplan.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#include "parser/parse_clause.h" +#include "parser/parsetree.h" +#include "storage/smgr.h" +#include "utils/acl.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + + +typedef struct evalPlanQual +{ + Index rti; + EState *estate; + PlanState *planstate; + struct evalPlanQual *next; /* stack of active PlanQual plans */ + struct evalPlanQual *free; /* list of free PlanQual plans */ +} evalPlanQual; + +/* decls for local routines only used within this module */ +static void InitPlan(QueryDesc *queryDesc, int eflags); +static void initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument); +static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest); +static void ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, EState *estate); +static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot, + DestReceiver *dest); +static TupleTableSlot *EvalPlanQualNext(EState *estate); +static void EndEvalPlanQual(EState *estate); +static void ExecCheckRTEPerms(RangeTblEntry *rte); +static void ExecCheckXactReadOnly(Query *parsetree); +static void EvalPlanQualStart(evalPlanQual *epq, EState *estate, + evalPlanQual *priorepq); +static void EvalPlanQualStop(evalPlanQual *epq); +static void OpenIntoRel(QueryDesc *queryDesc); +static void CloseIntoRel(QueryDesc *queryDesc); +static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo); +static void intorel_receive(TupleTableSlot *slot, DestReceiver *self); +static void intorel_shutdown(DestReceiver *self); +static void intorel_destroy(DestReceiver *self); + +/* end of local decls */ + + +/* ---------------------------------------------------------------- + * ExecutorStart + * + * This routine must be called at the beginning of any execution of any + * query plan + * + * Takes a QueryDesc previously created by CreateQueryDesc (it's not real + * clear why we bother to separate the two functions, but...). The tupDesc + * field of the QueryDesc is filled in to describe the tuples that will be + * returned, and the internal fields (estate and planstate) are set up. + * + * eflags contains flag bits as described in executor.h. + * + * NB: the CurrentMemoryContext when this is called will become the parent + * of the per-query context used for this Executor invocation. + * ---------------------------------------------------------------- + */ +void +ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks: queryDesc must not be started already */ + Assert(queryDesc != NULL); + Assert(queryDesc->estate == NULL); + + /* + * If the transaction is read-only, we need to check if any writes are + * planned to non-temporary tables. EXPLAIN is considered read-only. + */ + if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecCheckXactReadOnly(queryDesc->parsetree); + + /* + * Build EState, switch into per-query memory context for startup. + */ + estate = CreateExecutorState(); + queryDesc->estate = estate; + + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Fill in parameters, if any, from queryDesc + */ + estate->es_param_list_info = queryDesc->params; + + if (queryDesc->plantree->nParamExec > 0) + estate->es_param_exec_vals = (ParamExecData *) + palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData)); + + /* + * Copy other important information into the EState + */ + estate->es_snapshot = queryDesc->snapshot; + estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot; + estate->es_instrument = queryDesc->doInstrument; + + /* + * Initialize the plan state tree + */ + InitPlan(queryDesc, eflags); + + MemoryContextSwitchTo(oldcontext); +} + +/* ---------------------------------------------------------------- + * ExecutorRun + * + * This is the main routine of the executor module. It accepts + * the query descriptor from the traffic cop and executes the + * query plan. + * + * ExecutorStart must have been called already. + * + * If direction is NoMovementScanDirection then nothing is done + * except to start up/shut down the destination. Otherwise, + * we retrieve up to 'count' tuples in the specified direction. + * + * Note: count = 0 is interpreted as no portal limit, i.e., run to + * completion. + * + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecutorRun(QueryDesc *queryDesc, + ScanDirection direction, long count) +{ + EState *estate; + CmdType operation; + DestReceiver *dest; + bool sendTuples; + TupleTableSlot *result; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * extract information from the query descriptor and the query feature. + */ + operation = queryDesc->operation; + dest = queryDesc->dest; + + /* + * startup tuple receiver, if we will be emitting tuples + */ + estate->es_processed = 0; + estate->es_lastoid = InvalidOid; + + sendTuples = (operation == CMD_SELECT || + queryDesc->parsetree->returningList); + + if (sendTuples) + (*dest->rStartup) (dest, operation, queryDesc->tupDesc); + + /* + * run plan + */ + if (ScanDirectionIsNoMovement(direction)) + result = NULL; + else + result = ExecutePlan(estate, + queryDesc->planstate, + operation, + count, + direction, + dest); + + /* + * shutdown tuple receiver, if we started it + */ + if (sendTuples) + (*dest->rShutdown) (dest); + + MemoryContextSwitchTo(oldcontext); + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorEnd + * + * This routine must be called at the end of execution of any + * query plan + * ---------------------------------------------------------------- + */ +void +ExecutorEnd(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context to run ExecEndPlan + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + ExecEndPlan(queryDesc->planstate, estate); + + /* + * Close the SELECT INTO relation if any + */ + if (estate->es_select_into) + CloseIntoRel(queryDesc); + + /* + * Must switch out of context before destroying it + */ + MemoryContextSwitchTo(oldcontext); + + /* + * Release EState and per-query memory context. This should release + * everything the executor has allocated. + */ + FreeExecutorState(estate); + + /* Reset queryDesc fields that no longer point to anything */ + queryDesc->tupDesc = NULL; + queryDesc->estate = NULL; + queryDesc->planstate = NULL; +} + +/* ---------------------------------------------------------------- + * ExecutorRewind + * + * This routine may be called on an open queryDesc to rewind it + * to the start. + * ---------------------------------------------------------------- + */ +void +ExecutorRewind(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* It's probably not sensible to rescan updating queries */ + Assert(queryDesc->operation == CMD_SELECT); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * rescan plan + */ + ExecReScan(queryDesc->planstate, NULL); + + MemoryContextSwitchTo(oldcontext); +} + + +/* + * ExecCheckRTPerms + * Check access permissions for all relations listed in a range table. + */ +void +ExecCheckRTPerms(List *rangeTable) +{ + ListCell *l; + + foreach(l, rangeTable) + { + RangeTblEntry *rte = lfirst(l); + + ExecCheckRTEPerms(rte); + } +} + +/* + * ExecCheckRTEPerms + * Check access permissions for a single RTE. + */ +static void +ExecCheckRTEPerms(RangeTblEntry *rte) +{ + AclMode requiredPerms; + Oid relOid; + Oid userid; + + /* + * Only plain-relation RTEs need to be checked here. Subquery RTEs are + * checked by ExecInitSubqueryScan if the subquery is still a separate + * subquery --- if it's been pulled up into our query level then the RTEs + * are in our rangetable and will be checked here. Function RTEs are + * checked by init_fcache when the function is prepared for execution. + * Join and special RTEs need no checks. + */ + if (rte->rtekind != RTE_RELATION) + return; + + /* + * No work if requiredPerms is empty. + */ + requiredPerms = rte->requiredPerms; + if (requiredPerms == 0) + return; + + relOid = rte->relid; + + /* + * userid to check as: current user unless we have a setuid indication. + * + * Note: GetUserId() is presently fast enough that there's no harm in + * calling it separately for each RTE. If that stops being true, we could + * call it once in ExecCheckRTPerms and pass the userid down from there. + * But for now, no need for the extra clutter. + */ + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + + /* + * We must have *all* the requiredPerms bits, so use aclmask not aclcheck. + */ + if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL) + != requiredPerms) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, + get_rel_name(relOid)); +} + +/* + * Check that the query does not imply any writes to non-temp tables. + */ +static void +ExecCheckXactReadOnly(Query *parsetree) +{ + ListCell *l; + + /* + * CREATE TABLE AS or SELECT INTO? + * + * XXX should we allow this if the destination is temp? + */ + if (parsetree->into != NULL) + goto fail; + + /* Fail if write permissions are requested on any non-temp table */ + foreach(l, parsetree->rtable) + { + RangeTblEntry *rte = lfirst(l); + + if (rte->rtekind == RTE_SUBQUERY) + { + ExecCheckXactReadOnly(rte->subquery); + continue; + } + + if (rte->rtekind != RTE_RELATION) + continue; + + if ((rte->requiredPerms & (~ACL_SELECT)) == 0) + continue; + + if (isTempNamespace(get_rel_namespace(rte->relid))) + continue; + + goto fail; + } + + return; + +fail: + ereport(ERROR, + (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), + errmsg("transaction is read-only"))); +} + + +/* ---------------------------------------------------------------- + * InitPlan + * + * Initializes the query plan: open files, allocate storage + * and start up the rule manager + * ---------------------------------------------------------------- + */ +static void +InitPlan(QueryDesc *queryDesc, int eflags) +{ + CmdType operation = queryDesc->operation; + Query *parseTree = queryDesc->parsetree; + Plan *plan = queryDesc->plantree; + EState *estate = queryDesc->estate; + PlanState *planstate; + List *rangeTable; + TupleDesc tupType; + ListCell *l; + + /* + * Do permissions checks. It's sufficient to examine the query's top + * rangetable here --- subplan RTEs will be checked during + * ExecInitSubPlan(). + */ + ExecCheckRTPerms(parseTree->rtable); + + /* + * get information from query descriptor + */ + rangeTable = parseTree->rtable; + + /* + * initialize the node's execution state + */ + estate->es_range_table = rangeTable; + + /* + * if there is a result relation, initialize result relation stuff + */ + if (parseTree->resultRelation) + { + List *resultRelations = parseTree->resultRelations; + int numResultRelations; + ResultRelInfo *resultRelInfos; + + if (resultRelations != NIL) + { + /* + * Multiple result relations (due to inheritance) + * parseTree->resultRelations identifies them all + */ + ResultRelInfo *resultRelInfo; + + numResultRelations = list_length(resultRelations); + resultRelInfos = (ResultRelInfo *) + palloc(numResultRelations * sizeof(ResultRelInfo)); + resultRelInfo = resultRelInfos; + foreach(l, resultRelations) + { + initResultRelInfo(resultRelInfo, + lfirst_int(l), + rangeTable, + operation, + estate->es_instrument); + resultRelInfo++; + } + } + else + { + /* + * Single result relation identified by parseTree->resultRelation + */ + numResultRelations = 1; + resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); + initResultRelInfo(resultRelInfos, + parseTree->resultRelation, + rangeTable, + operation, + estate->es_instrument); + } + + estate->es_result_relations = resultRelInfos; + estate->es_num_result_relations = numResultRelations; + /* Initialize to first or only result rel */ + estate->es_result_relation_info = resultRelInfos; + } + else + { + /* + * if no result relation, then set state appropriately + */ + estate->es_result_relations = NULL; + estate->es_num_result_relations = 0; + estate->es_result_relation_info = NULL; + } + + /* + * Detect whether we're doing SELECT INTO. If so, set the es_into_oids + * flag appropriately so that the plan tree will be initialized with the + * correct tuple descriptors. (Other SELECT INTO stuff comes later.) + */ + estate->es_select_into = false; + if (operation == CMD_SELECT && parseTree->into != NULL) + { + estate->es_select_into = true; + estate->es_into_oids = interpretOidsOption(parseTree->intoOptions); + } + + /* + * Have to lock relations selected FOR UPDATE/FOR SHARE before we + * initialize the plan tree, else we'd be doing a lock upgrade. + * While we are at it, build the ExecRowMark list. + */ + estate->es_rowMarks = NIL; + foreach(l, parseTree->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(l); + Oid relid = getrelid(rc->rti, rangeTable); + Relation relation; + ExecRowMark *erm; + + relation = heap_open(relid, RowShareLock); + erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); + erm->relation = relation; + erm->rti = rc->rti; + erm->forUpdate = rc->forUpdate; + erm->noWait = rc->noWait; + /* We'll set up ctidAttno below */ + erm->ctidAttNo = InvalidAttrNumber; + estate->es_rowMarks = lappend(estate->es_rowMarks, erm); + } + + /* + * initialize the executor "tuple" table. We need slots for all the plan + * nodes, plus possibly output slots for the junkfilter(s). At this point + * we aren't sure if we need junkfilters, so just add slots for them + * unconditionally. Also, if it's not a SELECT, set up a slot for use for + * trigger output tuples. + */ + { + int nSlots = ExecCountSlotsNode(plan); + + if (parseTree->resultRelations != NIL) + nSlots += list_length(parseTree->resultRelations); + else + nSlots += 1; + if (operation != CMD_SELECT) + nSlots++; /* for es_trig_tuple_slot */ + if (parseTree->returningLists) + nSlots++; /* for RETURNING projection */ + + estate->es_tupleTable = ExecCreateTupleTable(nSlots); + + if (operation != CMD_SELECT) + estate->es_trig_tuple_slot = + ExecAllocTableSlot(estate->es_tupleTable); + } + + /* mark EvalPlanQual not active */ + estate->es_topPlan = plan; + estate->es_evalPlanQual = NULL; + estate->es_evTupleNull = NULL; + estate->es_evTuple = NULL; + estate->es_useEvalPlan = false; + + /* + * initialize the private state information for all the nodes in the query + * tree. This opens files, allocates storage and leaves us ready to start + * processing tuples. + */ + planstate = ExecInitNode(plan, estate, eflags); + + /* + * Get the tuple descriptor describing the type of tuples to return. (this + * is especially important if we are creating a relation with "SELECT + * INTO") + */ + tupType = ExecGetResultType(planstate); + + /* + * Initialize the junk filter if needed. SELECT and INSERT queries need a + * filter if there are any junk attrs in the tlist. INSERT and SELECT + * INTO also need a filter if the plan may return raw disk tuples (else + * heap_insert will be scribbling on the source relation!). UPDATE and + * DELETE always need a filter, since there's always a junk 'ctid' + * attribute present --- no need to look first. + */ + { + bool junk_filter_needed = false; + ListCell *tlist; + + switch (operation) + { + case CMD_SELECT: + case CMD_INSERT: + foreach(tlist, plan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(tlist); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + if (!junk_filter_needed && + (operation == CMD_INSERT || estate->es_select_into) && + ExecMayReturnRawTuples(planstate)) + junk_filter_needed = true; + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + break; + } + + if (junk_filter_needed) + { + /* + * If there are multiple result relations, each one needs its own + * junk filter. Note this is only possible for UPDATE/DELETE, so + * we can't be fooled by some needing a filter and some not. + */ + if (parseTree->resultRelations != NIL) + { + PlanState **appendplans; + int as_nplans; + ResultRelInfo *resultRelInfo; + int i; + + /* Top plan had better be an Append here. */ + Assert(IsA(plan, Append)); + Assert(((Append *) plan)->isTarget); + Assert(IsA(planstate, AppendState)); + appendplans = ((AppendState *) planstate)->appendplans; + as_nplans = ((AppendState *) planstate)->as_nplans; + Assert(as_nplans == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + for (i = 0; i < as_nplans; i++) + { + PlanState *subplan = appendplans[i]; + JunkFilter *j; + + j = ExecInitJunkFilter(subplan->plan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + /* + * Since it must be UPDATE/DELETE, there had better be + * a "ctid" junk attribute in the tlist ... but ctid could + * be at a different resno for each result relation. + * We look up the ctid resnos now and save them in the + * junkfilters. + */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + + /* + * Set active junkfilter too; at this point ExecInitAppend has + * already selected an active result relation... + */ + estate->es_junkFilter = + estate->es_result_relation_info->ri_junkFilter; + } + else + { + /* Normal case with just one JunkFilter */ + JunkFilter *j; + + j = ExecInitJunkFilter(planstate->plan->targetlist, + tupType->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + estate->es_junkFilter = j; + if (estate->es_result_relation_info) + estate->es_result_relation_info->ri_junkFilter = j; + + if (operation == CMD_SELECT) + { + /* For SELECT, want to return the cleaned tuple type */ + tupType = j->jf_cleanTupType; + /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = (ExecRowMark *) lfirst(l); + char resname[32]; + + snprintf(resname, sizeof(resname), "ctid%u", erm->rti); + erm->ctidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->ctidAttNo)) + elog(ERROR, "could not find junk \"%s\" column", + resname); + } + } + else if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + } + } + else + estate->es_junkFilter = NULL; + } + + /* + * Initialize RETURNING projections if needed. + */ + if (parseTree->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + ResultRelInfo *resultRelInfo; + + /* + * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case. + * We assume all the sublists will generate the same output tupdesc. + */ + tupType = ExecTypeFromTL((List *) linitial(parseTree->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + slot = ExecAllocTableSlot(estate->es_tupleTable); + ExecSetSlotDescriptor(slot, tupType); + /* Need an econtext too */ + econtext = CreateExprContext(estate); + + /* + * Build a projection for each result rel. Note that any SubPlans in + * the RETURNING lists get attached to the topmost plan node. + */ + Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, parseTree->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot, + resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo++; + } + + /* + * Because we already ran ExecInitNode() for the top plan node, any + * subplans we just attached to it won't have been initialized; so we + * have to do it here. (Ugly, but the alternatives seem worse.) + */ + foreach(l, planstate->subPlan) + { + SubPlanState *sstate = (SubPlanState *) lfirst(l); + + Assert(IsA(sstate, SubPlanState)); + if (sstate->planstate == NULL) /* already inited? */ + ExecInitSubPlan(sstate, estate, eflags); + } + } + + queryDesc->tupDesc = tupType; + queryDesc->planstate = planstate; + + /* + * If doing SELECT INTO, initialize the "into" relation. We must wait + * till now so we have the "clean" result tuple type to create the new + * table from. + * + * If EXPLAIN, skip creating the "into" relation. + */ + if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + OpenIntoRel(queryDesc); +} + +/* + * Initialize ResultRelInfo data for one result relation + */ +static void +initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument) +{ + Oid resultRelationOid; + Relation resultRelationDesc; + + resultRelationOid = getrelid(resultRelationIndex, rangeTable); + resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock); + + switch (resultRelationDesc->rd_rel->relkind) + { + case RELKIND_SEQUENCE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change sequence \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_TOASTVALUE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change TOAST relation \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_VIEW: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change view \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + } + + MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); + resultRelInfo->type = T_ResultRelInfo; + resultRelInfo->ri_RangeTableIndex = resultRelationIndex; + resultRelInfo->ri_RelationDesc = resultRelationDesc; + resultRelInfo->ri_NumIndices = 0; + resultRelInfo->ri_IndexRelationDescs = NULL; + resultRelInfo->ri_IndexRelationInfo = NULL; + /* make a copy so as not to depend on relcache info not changing... */ + resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); + if (resultRelInfo->ri_TrigDesc) + { + int n = resultRelInfo->ri_TrigDesc->numtriggers; + + resultRelInfo->ri_TrigFunctions = (FmgrInfo *) + palloc0(n * sizeof(FmgrInfo)); + if (doInstrument) + resultRelInfo->ri_TrigInstrument = InstrAlloc(n); + else + resultRelInfo->ri_TrigInstrument = NULL; + } + else + { + resultRelInfo->ri_TrigFunctions = NULL; + resultRelInfo->ri_TrigInstrument = NULL; + } + resultRelInfo->ri_ConstraintExprs = NULL; + resultRelInfo->ri_junkFilter = NULL; + resultRelInfo->ri_projectReturning = NULL; + + /* + * If there are indices on the result relation, open them and save + * descriptors in the result relation info, so that we can add new index + * entries for the tuples we add/update. We need not do this for a + * DELETE, however, since deletion doesn't affect indexes. + */ + if (resultRelationDesc->rd_rel->relhasindex && + operation != CMD_DELETE) + ExecOpenIndices(resultRelInfo); +} + +/* + * ExecContextForcesOids + * + * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO, + * we need to ensure that result tuples have space for an OID iff they are + * going to be stored into a relation that has OIDs. In other contexts + * we are free to choose whether to leave space for OIDs in result tuples + * (we generally don't want to, but we do if a physical-tlist optimization + * is possible). This routine checks the plan context and returns TRUE if the + * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * *hasoids is set to the required value. + * + * One reason this is ugly is that all plan nodes in the plan tree will emit + * tuples with space for an OID, though we really only need the topmost node + * to do so. However, node types like Sort don't project new tuples but just + * return their inputs, and in those cases the requirement propagates down + * to the input node. Eventually we might make this code smart enough to + * recognize how far down the requirement really goes, but for now we just + * make all plan nodes do the same thing if the top level forces the choice. + * + * We assume that estate->es_result_relation_info is already set up to + * describe the target relation. Note that in an UPDATE that spans an + * inheritance tree, some of the target relations may have OIDs and some not. + * We have to make the decisions on a per-relation basis as we initialize + * each of the child plans of the topmost Append plan. + * + * SELECT INTO is even uglier, because we don't have the INTO relation's + * descriptor available when this code runs; we have to look aside at a + * flag set by InitPlan(). + */ +bool +ExecContextForcesOids(PlanState *planstate, bool *hasoids) +{ + if (planstate->state->es_select_into) + { + *hasoids = planstate->state->es_into_oids; + return true; + } + else + { + ResultRelInfo *ri = planstate->state->es_result_relation_info; + + if (ri != NULL) + { + Relation rel = ri->ri_RelationDesc; + + if (rel != NULL) + { + *hasoids = rel->rd_rel->relhasoids; + return true; + } + } + } + + return false; +} + +/* ---------------------------------------------------------------- + * ExecEndPlan + * + * Cleans up the query plan -- closes files and frees up storage + * + * NOTE: we are no longer very worried about freeing storage per se + * in this code; FreeExecutorState should be guaranteed to release all + * memory that needs to be released. What we are worried about doing + * is closing relations and dropping buffer pins. Thus, for example, + * tuple tables must be cleared or dropped to ensure pins are released. + * ---------------------------------------------------------------- + */ +void +ExecEndPlan(PlanState *planstate, EState *estate) +{ + ResultRelInfo *resultRelInfo; + int i; + ListCell *l; + + /* + * shut down any PlanQual processing we were doing + */ + if (estate->es_evalPlanQual != NULL) + EndEvalPlanQual(estate); + + /* + * shut down the node-type-specific query processing + */ + ExecEndNode(planstate); + + /* + * destroy the executor "tuple" table. + */ + ExecDropTupleTable(estate->es_tupleTable, true); + estate->es_tupleTable = NULL; + + /* + * close the result relation(s) if any, but hold locks until xact commit. + */ + resultRelInfo = estate->es_result_relations; + for (i = estate->es_num_result_relations; i > 0; i--) + { + /* Close indices and then the relation itself */ + ExecCloseIndices(resultRelInfo); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); + resultRelInfo++; + } + + /* + * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks + */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + + heap_close(erm->relation, NoLock); + } +} + +/* ---------------------------------------------------------------- + * ExecutePlan + * + * processes the query plan to retrieve 'numberTuples' tuples in the + * direction specified. + * + * Retrieves all tuples if numberTuples is 0 + * + * result is either a slot containing the last tuple in the case + * of a SELECT or NULL otherwise. + * + * Note: the ctid attribute is a 'junk' attribute that is removed before the + * user can see it + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecutePlan(EState *estate, + PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest) +{ + JunkFilter *junkfilter; + TupleTableSlot *planSlot; + TupleTableSlot *slot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + long current_tuple_count; + TupleTableSlot *result; + + /* + * initialize local variables + */ + current_tuple_count = 0; + result = NULL; + + /* + * Set the direction. + */ + estate->es_direction = direction; + + /* + * Process BEFORE EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecBSUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecBSInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * Loop until we've processed the proper number of tuples from the plan. + */ + + for (;;) + { + /* Reset the per-output-tuple exprcontext */ + ResetPerTupleExprContext(estate); + + /* + * Execute the plan and obtain a tuple + */ +lnext: ; + if (estate->es_useEvalPlan) + { + planSlot = EvalPlanQualNext(estate); + if (TupIsNull(planSlot)) + planSlot = ExecProcNode(planstate); + } + else + planSlot = ExecProcNode(planstate); + + /* + * if the tuple is null, then we assume there is nothing more to + * process so we just return null... + */ + if (TupIsNull(planSlot)) + { + result = NULL; + break; + } + slot = planSlot; + + /* + * if we have a junk filter, then project a new tuple with the junk + * removed. + * + * Store this new "clean" tuple in the junkfilter's resultSlot. + * (Formerly, we stored it back over the "dirty" tuple, which is WRONG + * because that tuple slot has the wrong descriptor.) + * + * Also, extract all the junk information we need. + */ + if ((junkfilter = estate->es_junkFilter) != NULL) + { + Datum datum; + bool isNull; + + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * Process any FOR UPDATE or FOR SHARE locking requested. + */ + else if (estate->es_rowMarks != NIL) + { + ListCell *l; + + lmark: ; + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + HeapTupleData tuple; + Buffer buffer; + ItemPointerData update_ctid; + TransactionId update_xmax; + TupleTableSlot *newSlot; + LockTupleMode lockmode; + HTSU_Result test; + + datum = ExecGetJunkAttribute(slot, + erm->ctidAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tuple.t_self = *((ItemPointer) DatumGetPointer(datum)); + + if (erm->forUpdate) + lockmode = LockTupleExclusive; + else + lockmode = LockTupleShared; + + test = heap_lock_tuple(erm->relation, &tuple, &buffer, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + lockmode, erm->noWait); + ReleaseBuffer(buffer); + switch (test) + { + case HeapTupleSelfUpdated: + /* treat it as deleted; do not process */ + goto lnext; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(&update_ctid, + &tuple.t_self)) + { + /* updated, so look at updated version */ + newSlot = EvalPlanQual(estate, + erm->rti, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(newSlot)) + { + slot = planSlot = newSlot; + estate->es_useEvalPlan = true; + goto lmark; + } + } + + /* + * if tuple was deleted or PlanQual failed for + * updated tuple - we must not return this tuple! + */ + goto lnext; + + default: + elog(ERROR, "unrecognized heap_lock_tuple status: %u", + test); + return NULL; + } + } + } + + /* + * Create a new "clean" tuple with all junk attributes removed. We + * don't need to do this for DELETE, however (there will in fact + * be no non-junk attributes in a DELETE!) + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + /* + * now that we have a tuple, do the appropriate thing with it.. either + * return it to the user, add it to a relation someplace, delete it + * from a relation, or modify some of its attributes. + */ + switch (operation) + { + case CMD_SELECT: + ExecSelect(slot, dest, estate); + result = slot; + break; + + case CMD_INSERT: + ExecInsert(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_DELETE: + ExecDelete(tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_UPDATE: + ExecUpdate(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + default: + elog(ERROR, "unrecognized operation code: %d", + (int) operation); + result = NULL; + break; + } + + /* + * check our tuple count.. if we've processed the proper number then + * quit, else loop again and process more tuples. Zero numberTuples + * means no limit. + */ + current_tuple_count++; + if (numberTuples && numberTuples == current_tuple_count) + break; + } + + /* + * Process AFTER EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecASUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecASDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecASInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * here, result is either a slot containing a tuple in the case of a + * SELECT or NULL otherwise. + */ + return result; +} + +/* ---------------------------------------------------------------- + * ExecSelect + * + * SELECTs are easy.. we just pass the tuple to the appropriate + * output function. + * ---------------------------------------------------------------- + */ +static void +ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, + EState *estate) +{ + (*dest->receiveSlot) (slot, dest); + IncrRetrieved(); + (estate->es_processed)++; +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * INSERTs are trickier.. we have to insert the tuple into + * the base relation and insert appropriate tuples into the + * index relations. + * ---------------------------------------------------------------- + */ +static void +ExecInsert(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && +#ifdef REPLICATION + (txn_type != REPLICATED_REMOTE) && +#endif + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ +#ifndef REPLICATION + if (resultRelationDesc->rd_att->constr) +#else + if ((txn_type != REPLICATED_REMOTE) && + (resultRelationDesc->rd_att->constr)) +#endif + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_snapshot->curcid, + true, true); + + IncrAppended(); + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + +#ifdef REPLICATION + if (txn_type != REPLICATED_REMOTE) + { +#endif + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + +#ifdef REPLICATION + } + + if ( txn_type == REPLICATED_LOCAL ) + { + Oid resultRelationOid; + TupleCollection *tcoll; + + tcoll = &(((QueryInfo *) CurrentWriteSet->currQuery)->tcoll); + resultRelationOid = RelationGetRelid(resultRelationDesc); + if(resultRelationOid == tcoll->rel->relOid) + WriteSetCollectTuple(tupleid, slot, CurrentWriteSet->currQuery, + estate->es_snapshot); + } +#endif +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * ---------------------------------------------------------------- + */ +static void +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && +#ifdef REPLICATION + (txn_type != REPLICATED_REMOTE) && +#endif + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid, + estate->es_snapshot->curcid); + + if (!dodelete) /* "do nothing" */ + return; + } + +#ifdef REPLICATION + /* initialize the TxnToAbort return value */ + TxnToAbort = InvalidTransactionId; + + /* + * Add the tuple info to the WriteSet. + */ + if ( txn_type == REPLICATED_LOCAL ) + { + Oid resultRelationOid; + TupleCollection *tcoll; + + tcoll = &(((QueryInfo *) CurrentWriteSet->currQuery)->tcoll); + resultRelationOid = RelationGetRelid(resultRelationDesc); + if (resultRelationOid == tcoll->rel->relOid) + WriteSetCollectTuple(tupleid, planSlot, + CurrentWriteSet->currQuery, + estate->es_snapshot); + } +#endif + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, +#ifndef REPLICATION + true /* wait for commit */ ); +#else + /* remote transaction don't wait */ + (txn_type != REPLICATED_REMOTE)); +#endif + + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + +#ifdef REPLICATION + case HeapTupleBeingUpdated: + if (txn_type == REPLICATED_REMOTE) + { + /* + * A running local transaction has a lock on the tuple. Abort + * that local transaction and return, signaling that we must + * wait until the other transaction releases the lock. + */ +#ifdef RMGR_DEBUG + elog(DEBUG5, + "ExecDelete: need to terminate a local transaction %d", update_xmax); +#endif + TxnToAbort = update_xmax; + return; + } +#ifdef RMGR_DEBUG + else + /* should be impossible */ + Assert(result != HeapTupleBeingUpdated); +#endif +#endif + + case HeapTupleUpdated: +#ifdef RMGR_DEBUG + if (txn_type == REPLICATED_REMOTE) + { + elog(DEBUG5, "ExecDelete: a concurrent update has committed before. Abort this transaction."); + //FIXME + } +#endif + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return; + } + + IncrDeleted(); + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + } +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * ---------------------------------------------------------------- + */ +static void +ExecUpdate(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, + tupleid, tuple, + estate->es_snapshot->curcid); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(estate->es_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return; + } + + IncrReplaced(); + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} ============================================================ --- tests/test_a_merge_8/left 3ad78dd8d181bd08c699214692923da58cdcdda4 +++ tests/test_a_merge_8/left 3ad78dd8d181bd08c699214692923da58cdcdda4 @@ -0,0 +1,1702 @@ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/reloptions.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/toasting.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/execdebug.h" +#include "executor/instrument.h" +#include "executor/nodeSubplan.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#include "parser/parse_clause.h" +#include "parser/parsetree.h" +#include "storage/smgr.h" +#include "utils/acl.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + + +typedef struct evalPlanQual +{ + Index rti; + EState *estate; + PlanState *planstate; + struct evalPlanQual *next; /* stack of active PlanQual plans */ + struct evalPlanQual *free; /* list of free PlanQual plans */ +} evalPlanQual; + +/* decls for local routines only used within this module */ +static void InitPlan(QueryDesc *queryDesc, int eflags); +static void initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument); +static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest); +static void ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, EState *estate); +static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot, + DestReceiver *dest); +static TupleTableSlot *EvalPlanQualNext(EState *estate); +static void EndEvalPlanQual(EState *estate); +static void ExecCheckRTEPerms(RangeTblEntry *rte); +static void ExecCheckXactReadOnly(Query *parsetree); +static void EvalPlanQualStart(evalPlanQual *epq, EState *estate, + evalPlanQual *priorepq); +static void EvalPlanQualStop(evalPlanQual *epq); +static void OpenIntoRel(QueryDesc *queryDesc); +static void CloseIntoRel(QueryDesc *queryDesc); +static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo); +static void intorel_receive(TupleTableSlot *slot, DestReceiver *self); +static void intorel_shutdown(DestReceiver *self); +static void intorel_destroy(DestReceiver *self); + +/* end of local decls */ + + +/* ---------------------------------------------------------------- + * ExecutorStart + * + * This routine must be called at the beginning of any execution of any + * query plan + * + * Takes a QueryDesc previously created by CreateQueryDesc (it's not real + * clear why we bother to separate the two functions, but...). The tupDesc + * field of the QueryDesc is filled in to describe the tuples that will be + * returned, and the internal fields (estate and planstate) are set up. + * + * eflags contains flag bits as described in executor.h. + * + * NB: the CurrentMemoryContext when this is called will become the parent + * of the per-query context used for this Executor invocation. + * ---------------------------------------------------------------- + */ +void +ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks: queryDesc must not be started already */ + Assert(queryDesc != NULL); + Assert(queryDesc->estate == NULL); + + /* + * If the transaction is read-only, we need to check if any writes are + * planned to non-temporary tables. EXPLAIN is considered read-only. + */ + if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecCheckXactReadOnly(queryDesc->parsetree); + + /* + * Build EState, switch into per-query memory context for startup. + */ + estate = CreateExecutorState(); + queryDesc->estate = estate; + + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Fill in parameters, if any, from queryDesc + */ + estate->es_param_list_info = queryDesc->params; + + if (queryDesc->plantree->nParamExec > 0) + estate->es_param_exec_vals = (ParamExecData *) + palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData)); + + /* + * Copy other important information into the EState + */ + estate->es_snapshot = queryDesc->snapshot; + estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot; + estate->es_instrument = queryDesc->doInstrument; + + /* + * Initialize the plan state tree + */ + InitPlan(queryDesc, eflags); + + MemoryContextSwitchTo(oldcontext); +} + +/* ---------------------------------------------------------------- + * ExecutorRun + * + * This is the main routine of the executor module. It accepts + * the query descriptor from the traffic cop and executes the + * query plan. + * + * ExecutorStart must have been called already. + * + * If direction is NoMovementScanDirection then nothing is done + * except to start up/shut down the destination. Otherwise, + * we retrieve up to 'count' tuples in the specified direction. + * + * Note: count = 0 is interpreted as no portal limit, i.e., run to + * completion. + * + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecutorRun(QueryDesc *queryDesc, + ScanDirection direction, long count) +{ + EState *estate; + CmdType operation; + DestReceiver *dest; + bool sendTuples; + TupleTableSlot *result; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * extract information from the query descriptor and the query feature. + */ + operation = queryDesc->operation; + dest = queryDesc->dest; + + /* + * startup tuple receiver, if we will be emitting tuples + */ + estate->es_processed = 0; + estate->es_lastoid = InvalidOid; + + sendTuples = (operation == CMD_SELECT || + queryDesc->parsetree->returningList); + + if (sendTuples) + (*dest->rStartup) (dest, operation, queryDesc->tupDesc); + + /* + * run plan + */ + if (ScanDirectionIsNoMovement(direction)) + result = NULL; + else + result = ExecutePlan(estate, + queryDesc->planstate, + operation, + count, + direction, + dest); + + /* + * shutdown tuple receiver, if we started it + */ + if (sendTuples) + (*dest->rShutdown) (dest); + + MemoryContextSwitchTo(oldcontext); + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorEnd + * + * This routine must be called at the end of execution of any + * query plan + * ---------------------------------------------------------------- + */ +void +ExecutorEnd(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context to run ExecEndPlan + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + ExecEndPlan(queryDesc->planstate, estate); + + /* + * Close the SELECT INTO relation if any + */ + if (estate->es_select_into) + CloseIntoRel(queryDesc); + + /* + * Must switch out of context before destroying it + */ + MemoryContextSwitchTo(oldcontext); + + /* + * Release EState and per-query memory context. This should release + * everything the executor has allocated. + */ + FreeExecutorState(estate); + + /* Reset queryDesc fields that no longer point to anything */ + queryDesc->tupDesc = NULL; + queryDesc->estate = NULL; + queryDesc->planstate = NULL; +} + +/* ---------------------------------------------------------------- + * ExecutorRewind + * + * This routine may be called on an open queryDesc to rewind it + * to the start. + * ---------------------------------------------------------------- + */ +void +ExecutorRewind(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* It's probably not sensible to rescan updating queries */ + Assert(queryDesc->operation == CMD_SELECT); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * rescan plan + */ + ExecReScan(queryDesc->planstate, NULL); + + MemoryContextSwitchTo(oldcontext); +} + + +/* + * ExecCheckRTPerms + * Check access permissions for all relations listed in a range table. + */ +void +ExecCheckRTPerms(List *rangeTable) +{ + ListCell *l; + + foreach(l, rangeTable) + { + RangeTblEntry *rte = lfirst(l); + + ExecCheckRTEPerms(rte); + } +} + +/* + * ExecCheckRTEPerms + * Check access permissions for a single RTE. + */ +static void +ExecCheckRTEPerms(RangeTblEntry *rte) +{ + AclMode requiredPerms; + Oid relOid; + Oid userid; + + /* + * Only plain-relation RTEs need to be checked here. Subquery RTEs are + * checked by ExecInitSubqueryScan if the subquery is still a separate + * subquery --- if it's been pulled up into our query level then the RTEs + * are in our rangetable and will be checked here. Function RTEs are + * checked by init_fcache when the function is prepared for execution. + * Join and special RTEs need no checks. + */ + if (rte->rtekind != RTE_RELATION) + return; + + /* + * No work if requiredPerms is empty. + */ + requiredPerms = rte->requiredPerms; + if (requiredPerms == 0) + return; + + relOid = rte->relid; + + /* + * userid to check as: current user unless we have a setuid indication. + * + * Note: GetUserId() is presently fast enough that there's no harm in + * calling it separately for each RTE. If that stops being true, we could + * call it once in ExecCheckRTPerms and pass the userid down from there. + * But for now, no need for the extra clutter. + */ + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + + /* + * We must have *all* the requiredPerms bits, so use aclmask not aclcheck. + */ + if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL) + != requiredPerms) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, + get_rel_name(relOid)); +} + +/* + * Check that the query does not imply any writes to non-temp tables. + */ +static void +ExecCheckXactReadOnly(Query *parsetree) +{ + ListCell *l; + + /* + * CREATE TABLE AS or SELECT INTO? + * + * XXX should we allow this if the destination is temp? + */ + if (parsetree->into != NULL) + goto fail; + + /* Fail if write permissions are requested on any non-temp table */ + foreach(l, parsetree->rtable) + { + RangeTblEntry *rte = lfirst(l); + + if (rte->rtekind == RTE_SUBQUERY) + { + ExecCheckXactReadOnly(rte->subquery); + continue; + } + + if (rte->rtekind != RTE_RELATION) + continue; + + if ((rte->requiredPerms & (~ACL_SELECT)) == 0) + continue; + + if (isTempNamespace(get_rel_namespace(rte->relid))) + continue; + + goto fail; + } + + return; + +fail: + ereport(ERROR, + (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), + errmsg("transaction is read-only"))); +} + + +/* ---------------------------------------------------------------- + * InitPlan + * + * Initializes the query plan: open files, allocate storage + * and start up the rule manager + * ---------------------------------------------------------------- + */ +static void +InitPlan(QueryDesc *queryDesc, int eflags) +{ + CmdType operation = queryDesc->operation; + Query *parseTree = queryDesc->parsetree; + Plan *plan = queryDesc->plantree; + EState *estate = queryDesc->estate; + PlanState *planstate; + List *rangeTable; + TupleDesc tupType; + ListCell *l; + + /* + * Do permissions checks. It's sufficient to examine the query's top + * rangetable here --- subplan RTEs will be checked during + * ExecInitSubPlan(). + */ + ExecCheckRTPerms(parseTree->rtable); + + /* + * get information from query descriptor + */ + rangeTable = parseTree->rtable; + + /* + * initialize the node's execution state + */ + estate->es_range_table = rangeTable; + + /* + * if there is a result relation, initialize result relation stuff + */ + if (parseTree->resultRelation) + { + List *resultRelations = parseTree->resultRelations; + int numResultRelations; + ResultRelInfo *resultRelInfos; + + if (resultRelations != NIL) + { + /* + * Multiple result relations (due to inheritance) + * parseTree->resultRelations identifies them all + */ + ResultRelInfo *resultRelInfo; + + numResultRelations = list_length(resultRelations); + resultRelInfos = (ResultRelInfo *) + palloc(numResultRelations * sizeof(ResultRelInfo)); + resultRelInfo = resultRelInfos; + foreach(l, resultRelations) + { + initResultRelInfo(resultRelInfo, + lfirst_int(l), + rangeTable, + operation, + estate->es_instrument); + resultRelInfo++; + } + } + else + { + /* + * Single result relation identified by parseTree->resultRelation + */ + numResultRelations = 1; + resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); + initResultRelInfo(resultRelInfos, + parseTree->resultRelation, + rangeTable, + operation, + estate->es_instrument); + } + + estate->es_result_relations = resultRelInfos; + estate->es_num_result_relations = numResultRelations; + /* Initialize to first or only result rel */ + estate->es_result_relation_info = resultRelInfos; + } + else + { + /* + * if no result relation, then set state appropriately + */ + estate->es_result_relations = NULL; + estate->es_num_result_relations = 0; + estate->es_result_relation_info = NULL; + } + + /* + * Detect whether we're doing SELECT INTO. If so, set the es_into_oids + * flag appropriately so that the plan tree will be initialized with the + * correct tuple descriptors. (Other SELECT INTO stuff comes later.) + */ + estate->es_select_into = false; + if (operation == CMD_SELECT && parseTree->into != NULL) + { + estate->es_select_into = true; + estate->es_into_oids = interpretOidsOption(parseTree->intoOptions); + } + + /* + * Have to lock relations selected FOR UPDATE/FOR SHARE before we + * initialize the plan tree, else we'd be doing a lock upgrade. + * While we are at it, build the ExecRowMark list. + */ + estate->es_rowMarks = NIL; + foreach(l, parseTree->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(l); + Oid relid = getrelid(rc->rti, rangeTable); + Relation relation; + ExecRowMark *erm; + + relation = heap_open(relid, RowShareLock); + erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); + erm->relation = relation; + erm->rti = rc->rti; + erm->forUpdate = rc->forUpdate; + erm->noWait = rc->noWait; + /* We'll set up ctidAttno below */ + erm->ctidAttNo = InvalidAttrNumber; + estate->es_rowMarks = lappend(estate->es_rowMarks, erm); + } + + /* + * initialize the executor "tuple" table. We need slots for all the plan + * nodes, plus possibly output slots for the junkfilter(s). At this point + * we aren't sure if we need junkfilters, so just add slots for them + * unconditionally. Also, if it's not a SELECT, set up a slot for use for + * trigger output tuples. + */ + { + int nSlots = ExecCountSlotsNode(plan); + + if (parseTree->resultRelations != NIL) + nSlots += list_length(parseTree->resultRelations); + else + nSlots += 1; + if (operation != CMD_SELECT) + nSlots++; /* for es_trig_tuple_slot */ + if (parseTree->returningLists) + nSlots++; /* for RETURNING projection */ + + estate->es_tupleTable = ExecCreateTupleTable(nSlots); + + if (operation != CMD_SELECT) + estate->es_trig_tuple_slot = + ExecAllocTableSlot(estate->es_tupleTable); + } + + /* mark EvalPlanQual not active */ + estate->es_topPlan = plan; + estate->es_evalPlanQual = NULL; + estate->es_evTupleNull = NULL; + estate->es_evTuple = NULL; + estate->es_useEvalPlan = false; + + /* + * initialize the private state information for all the nodes in the query + * tree. This opens files, allocates storage and leaves us ready to start + * processing tuples. + */ + planstate = ExecInitNode(plan, estate, eflags); + + /* + * Get the tuple descriptor describing the type of tuples to return. (this + * is especially important if we are creating a relation with "SELECT + * INTO") + */ + tupType = ExecGetResultType(planstate); + + /* + * Initialize the junk filter if needed. SELECT and INSERT queries need a + * filter if there are any junk attrs in the tlist. INSERT and SELECT + * INTO also need a filter if the plan may return raw disk tuples (else + * heap_insert will be scribbling on the source relation!). UPDATE and + * DELETE always need a filter, since there's always a junk 'ctid' + * attribute present --- no need to look first. + */ + { + bool junk_filter_needed = false; + ListCell *tlist; + + switch (operation) + { + case CMD_SELECT: + case CMD_INSERT: + foreach(tlist, plan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(tlist); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + if (!junk_filter_needed && + (operation == CMD_INSERT || estate->es_select_into) && + ExecMayReturnRawTuples(planstate)) + junk_filter_needed = true; + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + break; + } + + if (junk_filter_needed) + { + /* + * If there are multiple result relations, each one needs its own + * junk filter. Note this is only possible for UPDATE/DELETE, so + * we can't be fooled by some needing a filter and some not. + */ + if (parseTree->resultRelations != NIL) + { + PlanState **appendplans; + int as_nplans; + ResultRelInfo *resultRelInfo; + int i; + + /* Top plan had better be an Append here. */ + Assert(IsA(plan, Append)); + Assert(((Append *) plan)->isTarget); + Assert(IsA(planstate, AppendState)); + appendplans = ((AppendState *) planstate)->appendplans; + as_nplans = ((AppendState *) planstate)->as_nplans; + Assert(as_nplans == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + for (i = 0; i < as_nplans; i++) + { + PlanState *subplan = appendplans[i]; + JunkFilter *j; + + j = ExecInitJunkFilter(subplan->plan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + /* + * Since it must be UPDATE/DELETE, there had better be + * a "ctid" junk attribute in the tlist ... but ctid could + * be at a different resno for each result relation. + * We look up the ctid resnos now and save them in the + * junkfilters. + */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + + /* + * Set active junkfilter too; at this point ExecInitAppend has + * already selected an active result relation... + */ + estate->es_junkFilter = + estate->es_result_relation_info->ri_junkFilter; + } + else + { + /* Normal case with just one JunkFilter */ + JunkFilter *j; + + j = ExecInitJunkFilter(planstate->plan->targetlist, + tupType->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + estate->es_junkFilter = j; + if (estate->es_result_relation_info) + estate->es_result_relation_info->ri_junkFilter = j; + + if (operation == CMD_SELECT) + { + /* For SELECT, want to return the cleaned tuple type */ + tupType = j->jf_cleanTupType; + /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = (ExecRowMark *) lfirst(l); + char resname[32]; + + snprintf(resname, sizeof(resname), "ctid%u", erm->rti); + erm->ctidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->ctidAttNo)) + elog(ERROR, "could not find junk \"%s\" column", + resname); + } + } + else if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + } + } + else + estate->es_junkFilter = NULL; + } + + /* + * Initialize RETURNING projections if needed. + */ + if (parseTree->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + ResultRelInfo *resultRelInfo; + + /* + * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case. + * We assume all the sublists will generate the same output tupdesc. + */ + tupType = ExecTypeFromTL((List *) linitial(parseTree->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + slot = ExecAllocTableSlot(estate->es_tupleTable); + ExecSetSlotDescriptor(slot, tupType); + /* Need an econtext too */ + econtext = CreateExprContext(estate); + + /* + * Build a projection for each result rel. Note that any SubPlans in + * the RETURNING lists get attached to the topmost plan node. + */ + Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, parseTree->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot, + resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo++; + } + + /* + * Because we already ran ExecInitNode() for the top plan node, any + * subplans we just attached to it won't have been initialized; so we + * have to do it here. (Ugly, but the alternatives seem worse.) + */ + foreach(l, planstate->subPlan) + { + SubPlanState *sstate = (SubPlanState *) lfirst(l); + + Assert(IsA(sstate, SubPlanState)); + if (sstate->planstate == NULL) /* already inited? */ + ExecInitSubPlan(sstate, estate, eflags); + } + } + + queryDesc->tupDesc = tupType; + queryDesc->planstate = planstate; + + /* + * If doing SELECT INTO, initialize the "into" relation. We must wait + * till now so we have the "clean" result tuple type to create the new + * table from. + * + * If EXPLAIN, skip creating the "into" relation. + */ + if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + OpenIntoRel(queryDesc); +} + +/* + * Initialize ResultRelInfo data for one result relation + */ +static void +initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument) +{ + Oid resultRelationOid; + Relation resultRelationDesc; + + resultRelationOid = getrelid(resultRelationIndex, rangeTable); + resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock); + + switch (resultRelationDesc->rd_rel->relkind) + { + case RELKIND_SEQUENCE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change sequence \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_TOASTVALUE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change TOAST relation \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_VIEW: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change view \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + } + + MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); + resultRelInfo->type = T_ResultRelInfo; + resultRelInfo->ri_RangeTableIndex = resultRelationIndex; + resultRelInfo->ri_RelationDesc = resultRelationDesc; + resultRelInfo->ri_NumIndices = 0; + resultRelInfo->ri_IndexRelationDescs = NULL; + resultRelInfo->ri_IndexRelationInfo = NULL; + /* make a copy so as not to depend on relcache info not changing... */ + resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); + if (resultRelInfo->ri_TrigDesc) + { + int n = resultRelInfo->ri_TrigDesc->numtriggers; + + resultRelInfo->ri_TrigFunctions = (FmgrInfo *) + palloc0(n * sizeof(FmgrInfo)); + if (doInstrument) + resultRelInfo->ri_TrigInstrument = InstrAlloc(n); + else + resultRelInfo->ri_TrigInstrument = NULL; + } + else + { + resultRelInfo->ri_TrigFunctions = NULL; + resultRelInfo->ri_TrigInstrument = NULL; + } + resultRelInfo->ri_ConstraintExprs = NULL; + resultRelInfo->ri_junkFilter = NULL; + resultRelInfo->ri_projectReturning = NULL; + + /* + * If there are indices on the result relation, open them and save + * descriptors in the result relation info, so that we can add new index + * entries for the tuples we add/update. We need not do this for a + * DELETE, however, since deletion doesn't affect indexes. + */ + if (resultRelationDesc->rd_rel->relhasindex && + operation != CMD_DELETE) + ExecOpenIndices(resultRelInfo); +} + +/* + * ExecContextForcesOids + * + * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO, + * we need to ensure that result tuples have space for an OID iff they are + * going to be stored into a relation that has OIDs. In other contexts + * we are free to choose whether to leave space for OIDs in result tuples + * (we generally don't want to, but we do if a physical-tlist optimization + * is possible). This routine checks the plan context and returns TRUE if the + * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * *hasoids is set to the required value. + * + * One reason this is ugly is that all plan nodes in the plan tree will emit + * tuples with space for an OID, though we really only need the topmost node + * to do so. However, node types like Sort don't project new tuples but just + * return their inputs, and in those cases the requirement propagates down + * to the input node. Eventually we might make this code smart enough to + * recognize how far down the requirement really goes, but for now we just + * make all plan nodes do the same thing if the top level forces the choice. + * + * We assume that estate->es_result_relation_info is already set up to + * describe the target relation. Note that in an UPDATE that spans an + * inheritance tree, some of the target relations may have OIDs and some not. + * We have to make the decisions on a per-relation basis as we initialize + * each of the child plans of the topmost Append plan. + * + * SELECT INTO is even uglier, because we don't have the INTO relation's + * descriptor available when this code runs; we have to look aside at a + * flag set by InitPlan(). + */ +bool +ExecContextForcesOids(PlanState *planstate, bool *hasoids) +{ + if (planstate->state->es_select_into) + { + *hasoids = planstate->state->es_into_oids; + return true; + } + else + { + ResultRelInfo *ri = planstate->state->es_result_relation_info; + + if (ri != NULL) + { + Relation rel = ri->ri_RelationDesc; + + if (rel != NULL) + { + *hasoids = rel->rd_rel->relhasoids; + return true; + } + } + } + + return false; +} + +/* ---------------------------------------------------------------- + * ExecEndPlan + * + * Cleans up the query plan -- closes files and frees up storage + * + * NOTE: we are no longer very worried about freeing storage per se + * in this code; FreeExecutorState should be guaranteed to release all + * memory that needs to be released. What we are worried about doing + * is closing relations and dropping buffer pins. Thus, for example, + * tuple tables must be cleared or dropped to ensure pins are released. + * ---------------------------------------------------------------- + */ +void +ExecEndPlan(PlanState *planstate, EState *estate) +{ + ResultRelInfo *resultRelInfo; + int i; + ListCell *l; + + /* + * shut down any PlanQual processing we were doing + */ + if (estate->es_evalPlanQual != NULL) + EndEvalPlanQual(estate); + + /* + * shut down the node-type-specific query processing + */ + ExecEndNode(planstate); + + /* + * destroy the executor "tuple" table. + */ + ExecDropTupleTable(estate->es_tupleTable, true); + estate->es_tupleTable = NULL; + + /* + * close the result relation(s) if any, but hold locks until xact commit. + */ + resultRelInfo = estate->es_result_relations; + for (i = estate->es_num_result_relations; i > 0; i--) + { + /* Close indices and then the relation itself */ + ExecCloseIndices(resultRelInfo); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); + resultRelInfo++; + } + + /* + * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks + */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + + heap_close(erm->relation, NoLock); + } +} + +/* ---------------------------------------------------------------- + * ExecutePlan + * + * processes the query plan to retrieve 'numberTuples' tuples in the + * direction specified. + * + * Retrieves all tuples if numberTuples is 0 + * + * result is either a slot containing the last tuple in the case + * of a SELECT or NULL otherwise. + * + * Note: the ctid attribute is a 'junk' attribute that is removed before the + * user can see it + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecutePlan(EState *estate, + PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest) +{ + JunkFilter *junkfilter; + TupleTableSlot *planSlot; + TupleTableSlot *slot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + long current_tuple_count; + TupleTableSlot *result; + + /* + * initialize local variables + */ + current_tuple_count = 0; + result = NULL; + + /* + * Set the direction. + */ + estate->es_direction = direction; + + /* + * Process BEFORE EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecBSUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecBSInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * Loop until we've processed the proper number of tuples from the plan. + */ + + for (;;) + { + /* Reset the per-output-tuple exprcontext */ + ResetPerTupleExprContext(estate); + + /* + * Execute the plan and obtain a tuple + */ +lnext: ; + if (estate->es_useEvalPlan) + { + planSlot = EvalPlanQualNext(estate); + if (TupIsNull(planSlot)) + planSlot = ExecProcNode(planstate); + } + else + planSlot = ExecProcNode(planstate); + + /* + * if the tuple is null, then we assume there is nothing more to + * process so we just return null... + */ + if (TupIsNull(planSlot)) + { + result = NULL; + break; + } + slot = planSlot; + + /* + * if we have a junk filter, then project a new tuple with the junk + * removed. + * + * Store this new "clean" tuple in the junkfilter's resultSlot. + * (Formerly, we stored it back over the "dirty" tuple, which is WRONG + * because that tuple slot has the wrong descriptor.) + * + * Also, extract all the junk information we need. + */ + if ((junkfilter = estate->es_junkFilter) != NULL) + { + Datum datum; + bool isNull; + + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * Process any FOR UPDATE or FOR SHARE locking requested. + */ + else if (estate->es_rowMarks != NIL) + { + ListCell *l; + + lmark: ; + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + HeapTupleData tuple; + Buffer buffer; + ItemPointerData update_ctid; + TransactionId update_xmax; + TupleTableSlot *newSlot; + LockTupleMode lockmode; + HTSU_Result test; + + datum = ExecGetJunkAttribute(slot, + erm->ctidAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tuple.t_self = *((ItemPointer) DatumGetPointer(datum)); + + if (erm->forUpdate) + lockmode = LockTupleExclusive; + else + lockmode = LockTupleShared; + + test = heap_lock_tuple(erm->relation, &tuple, &buffer, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + lockmode, erm->noWait); + ReleaseBuffer(buffer); + switch (test) + { + case HeapTupleSelfUpdated: + /* treat it as deleted; do not process */ + goto lnext; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(&update_ctid, + &tuple.t_self)) + { + /* updated, so look at updated version */ + newSlot = EvalPlanQual(estate, + erm->rti, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(newSlot)) + { + slot = planSlot = newSlot; + estate->es_useEvalPlan = true; + goto lmark; + } + } + + /* + * if tuple was deleted or PlanQual failed for + * updated tuple - we must not return this tuple! + */ + goto lnext; + + default: + elog(ERROR, "unrecognized heap_lock_tuple status: %u", + test); + return NULL; + } + } + } + + /* + * Create a new "clean" tuple with all junk attributes removed. We + * don't need to do this for DELETE, however (there will in fact + * be no non-junk attributes in a DELETE!) + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + /* + * now that we have a tuple, do the appropriate thing with it.. either + * return it to the user, add it to a relation someplace, delete it + * from a relation, or modify some of its attributes. + */ + switch (operation) + { + case CMD_SELECT: + ExecSelect(slot, dest, estate); + result = slot; + break; + + case CMD_INSERT: + ExecInsert(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_DELETE: + ExecDelete(tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_UPDATE: + ExecUpdate(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + default: + elog(ERROR, "unrecognized operation code: %d", + (int) operation); + result = NULL; + break; + } + + /* + * check our tuple count.. if we've processed the proper number then + * quit, else loop again and process more tuples. Zero numberTuples + * means no limit. + */ + current_tuple_count++; + if (numberTuples && numberTuples == current_tuple_count) + break; + } + + /* + * Process AFTER EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecASUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecASDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecASInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * here, result is either a slot containing a tuple in the case of a + * SELECT or NULL otherwise. + */ + return result; +} + +/* ---------------------------------------------------------------- + * ExecSelect + * + * SELECTs are easy.. we just pass the tuple to the appropriate + * output function. + * ---------------------------------------------------------------- + */ +static void +ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, + EState *estate) +{ + (*dest->receiveSlot) (slot, dest); + IncrRetrieved(); + (estate->es_processed)++; +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * INSERTs are trickier.. we have to insert the tuple into + * the base relation and insert appropriate tuples into the + * index relations. + * ---------------------------------------------------------------- + */ +static void +ExecInsert(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_snapshot->curcid, + true, true); + + IncrAppended(); + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * ---------------------------------------------------------------- + */ +static void +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid, + estate->es_snapshot->curcid); + + if (!dodelete) /* "do nothing" */ + return; + } + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return; + } + + IncrDeleted(); + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + } +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * ---------------------------------------------------------------- + */ +static void +ExecUpdate(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, + tupleid, tuple, + estate->es_snapshot->curcid); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(estate->es_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return; + } + + IncrReplaced(); + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} ============================================================ --- tests/test_a_merge_8/parent 300798a2fc26fbf1e3b63d2e313a823d9226f183 +++ tests/test_a_merge_8/parent 300798a2fc26fbf1e3b63d2e313a823d9226f183 @@ -0,0 +1,1701 @@ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/reloptions.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/toasting.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/execdebug.h" +#include "executor/instrument.h" +#include "executor/nodeSubplan.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#include "parser/parse_clause.h" +#include "parser/parsetree.h" +#include "storage/smgr.h" +#include "utils/acl.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + + +typedef struct evalPlanQual +{ + Index rti; + EState *estate; + PlanState *planstate; + struct evalPlanQual *next; /* stack of active PlanQual plans */ + struct evalPlanQual *free; /* list of free PlanQual plans */ +} evalPlanQual; + +/* decls for local routines only used within this module */ +static void InitPlan(QueryDesc *queryDesc, int eflags); +static void initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument); +static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest); +static void ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, EState *estate); +static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot, + DestReceiver *dest); +static TupleTableSlot *EvalPlanQualNext(EState *estate); +static void EndEvalPlanQual(EState *estate); +static void ExecCheckRTEPerms(RangeTblEntry *rte); +static void ExecCheckXactReadOnly(Query *parsetree); +static void EvalPlanQualStart(evalPlanQual *epq, EState *estate, + evalPlanQual *priorepq); +static void EvalPlanQualStop(evalPlanQual *epq); +static void OpenIntoRel(QueryDesc *queryDesc); +static void CloseIntoRel(QueryDesc *queryDesc); +static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo); +static void intorel_receive(TupleTableSlot *slot, DestReceiver *self); +static void intorel_shutdown(DestReceiver *self); +static void intorel_destroy(DestReceiver *self); + +/* end of local decls */ + + +/* ---------------------------------------------------------------- + * ExecutorStart + * + * This routine must be called at the beginning of any execution of any + * query plan + * + * Takes a QueryDesc previously created by CreateQueryDesc (it's not real + * clear why we bother to separate the two functions, but...). The tupDesc + * field of the QueryDesc is filled in to describe the tuples that will be + * returned, and the internal fields (estate and planstate) are set up. + * + * eflags contains flag bits as described in executor.h. + * + * NB: the CurrentMemoryContext when this is called will become the parent + * of the per-query context used for this Executor invocation. + * ---------------------------------------------------------------- + */ +void +ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks: queryDesc must not be started already */ + Assert(queryDesc != NULL); + Assert(queryDesc->estate == NULL); + + /* + * If the transaction is read-only, we need to check if any writes are + * planned to non-temporary tables. EXPLAIN is considered read-only. + */ + if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecCheckXactReadOnly(queryDesc->parsetree); + + /* + * Build EState, switch into per-query memory context for startup. + */ + estate = CreateExecutorState(); + queryDesc->estate = estate; + + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Fill in parameters, if any, from queryDesc + */ + estate->es_param_list_info = queryDesc->params; + + if (queryDesc->plantree->nParamExec > 0) + estate->es_param_exec_vals = (ParamExecData *) + palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData)); + + /* + * Copy other important information into the EState + */ + estate->es_snapshot = queryDesc->snapshot; + estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot; + estate->es_instrument = queryDesc->doInstrument; + + /* + * Initialize the plan state tree + */ + InitPlan(queryDesc, eflags); + + MemoryContextSwitchTo(oldcontext); +} + +/* ---------------------------------------------------------------- + * ExecutorRun + * + * This is the main routine of the executor module. It accepts + * the query descriptor from the traffic cop and executes the + * query plan. + * + * ExecutorStart must have been called already. + * + * If direction is NoMovementScanDirection then nothing is done + * except to start up/shut down the destination. Otherwise, + * we retrieve up to 'count' tuples in the specified direction. + * + * Note: count = 0 is interpreted as no portal limit, i.e., run to + * completion. + * + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecutorRun(QueryDesc *queryDesc, + ScanDirection direction, long count) +{ + EState *estate; + CmdType operation; + DestReceiver *dest; + bool sendTuples; + TupleTableSlot *result; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * extract information from the query descriptor and the query feature. + */ + operation = queryDesc->operation; + dest = queryDesc->dest; + + /* + * startup tuple receiver, if we will be emitting tuples + */ + estate->es_processed = 0; + estate->es_lastoid = InvalidOid; + + sendTuples = (operation == CMD_SELECT || + queryDesc->parsetree->returningList); + + if (sendTuples) + (*dest->rStartup) (dest, operation, queryDesc->tupDesc); + + /* + * run plan + */ + if (ScanDirectionIsNoMovement(direction)) + result = NULL; + else + result = ExecutePlan(estate, + queryDesc->planstate, + operation, + count, + direction, + dest); + + /* + * shutdown tuple receiver, if we started it + */ + if (sendTuples) + (*dest->rShutdown) (dest); + + MemoryContextSwitchTo(oldcontext); + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorEnd + * + * This routine must be called at the end of execution of any + * query plan + * ---------------------------------------------------------------- + */ +void +ExecutorEnd(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context to run ExecEndPlan + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + ExecEndPlan(queryDesc->planstate, estate); + + /* + * Close the SELECT INTO relation if any + */ + if (estate->es_select_into) + CloseIntoRel(queryDesc); + + /* + * Must switch out of context before destroying it + */ + MemoryContextSwitchTo(oldcontext); + + /* + * Release EState and per-query memory context. This should release + * everything the executor has allocated. + */ + FreeExecutorState(estate); + + /* Reset queryDesc fields that no longer point to anything */ + queryDesc->tupDesc = NULL; + queryDesc->estate = NULL; + queryDesc->planstate = NULL; +} + +/* ---------------------------------------------------------------- + * ExecutorRewind + * + * This routine may be called on an open queryDesc to rewind it + * to the start. + * ---------------------------------------------------------------- + */ +void +ExecutorRewind(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* It's probably not sensible to rescan updating queries */ + Assert(queryDesc->operation == CMD_SELECT); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * rescan plan + */ + ExecReScan(queryDesc->planstate, NULL); + + MemoryContextSwitchTo(oldcontext); +} + + +/* + * ExecCheckRTPerms + * Check access permissions for all relations listed in a range table. + */ +void +ExecCheckRTPerms(List *rangeTable) +{ + ListCell *l; + + foreach(l, rangeTable) + { + RangeTblEntry *rte = lfirst(l); + + ExecCheckRTEPerms(rte); + } +} + +/* + * ExecCheckRTEPerms + * Check access permissions for a single RTE. + */ +static void +ExecCheckRTEPerms(RangeTblEntry *rte) +{ + AclMode requiredPerms; + Oid relOid; + Oid userid; + + /* + * Only plain-relation RTEs need to be checked here. Subquery RTEs are + * checked by ExecInitSubqueryScan if the subquery is still a separate + * subquery --- if it's been pulled up into our query level then the RTEs + * are in our rangetable and will be checked here. Function RTEs are + * checked by init_fcache when the function is prepared for execution. + * Join and special RTEs need no checks. + */ + if (rte->rtekind != RTE_RELATION) + return; + + /* + * No work if requiredPerms is empty. + */ + requiredPerms = rte->requiredPerms; + if (requiredPerms == 0) + return; + + relOid = rte->relid; + + /* + * userid to check as: current user unless we have a setuid indication. + * + * Note: GetUserId() is presently fast enough that there's no harm in + * calling it separately for each RTE. If that stops being true, we could + * call it once in ExecCheckRTPerms and pass the userid down from there. + * But for now, no need for the extra clutter. + */ + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + + /* + * We must have *all* the requiredPerms bits, so use aclmask not aclcheck. + */ + if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL) + != requiredPerms) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, + get_rel_name(relOid)); +} + +/* + * Check that the query does not imply any writes to non-temp tables. + */ +static void +ExecCheckXactReadOnly(Query *parsetree) +{ + ListCell *l; + + /* + * CREATE TABLE AS or SELECT INTO? + * + * XXX should we allow this if the destination is temp? + */ + if (parsetree->into != NULL) + goto fail; + + /* Fail if write permissions are requested on any non-temp table */ + foreach(l, parsetree->rtable) + { + RangeTblEntry *rte = lfirst(l); + + if (rte->rtekind == RTE_SUBQUERY) + { + ExecCheckXactReadOnly(rte->subquery); + continue; + } + + if (rte->rtekind != RTE_RELATION) + continue; + + if ((rte->requiredPerms & (~ACL_SELECT)) == 0) + continue; + + if (isTempNamespace(get_rel_namespace(rte->relid))) + continue; + + goto fail; + } + + return; + +fail: + ereport(ERROR, + (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), + errmsg("transaction is read-only"))); +} + + +/* ---------------------------------------------------------------- + * InitPlan + * + * Initializes the query plan: open files, allocate storage + * and start up the rule manager + * ---------------------------------------------------------------- + */ +static void +InitPlan(QueryDesc *queryDesc, int eflags) +{ + CmdType operation = queryDesc->operation; + Query *parseTree = queryDesc->parsetree; + Plan *plan = queryDesc->plantree; + EState *estate = queryDesc->estate; + PlanState *planstate; + List *rangeTable; + TupleDesc tupType; + ListCell *l; + + /* + * Do permissions checks. It's sufficient to examine the query's top + * rangetable here --- subplan RTEs will be checked during + * ExecInitSubPlan(). + */ + ExecCheckRTPerms(parseTree->rtable); + + /* + * get information from query descriptor + */ + rangeTable = parseTree->rtable; + + /* + * initialize the node's execution state + */ + estate->es_range_table = rangeTable; + + /* + * if there is a result relation, initialize result relation stuff + */ + if (parseTree->resultRelation) + { + List *resultRelations = parseTree->resultRelations; + int numResultRelations; + ResultRelInfo *resultRelInfos; + + if (resultRelations != NIL) + { + /* + * Multiple result relations (due to inheritance) + * parseTree->resultRelations identifies them all + */ + ResultRelInfo *resultRelInfo; + + numResultRelations = list_length(resultRelations); + resultRelInfos = (ResultRelInfo *) + palloc(numResultRelations * sizeof(ResultRelInfo)); + resultRelInfo = resultRelInfos; + foreach(l, resultRelations) + { + initResultRelInfo(resultRelInfo, + lfirst_int(l), + rangeTable, + operation, + estate->es_instrument); + resultRelInfo++; + } + } + else + { + /* + * Single result relation identified by parseTree->resultRelation + */ + numResultRelations = 1; + resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); + initResultRelInfo(resultRelInfos, + parseTree->resultRelation, + rangeTable, + operation, + estate->es_instrument); + } + + estate->es_result_relations = resultRelInfos; + estate->es_num_result_relations = numResultRelations; + /* Initialize to first or only result rel */ + estate->es_result_relation_info = resultRelInfos; + } + else + { + /* + * if no result relation, then set state appropriately + */ + estate->es_result_relations = NULL; + estate->es_num_result_relations = 0; + estate->es_result_relation_info = NULL; + } + + /* + * Detect whether we're doing SELECT INTO. If so, set the es_into_oids + * flag appropriately so that the plan tree will be initialized with the + * correct tuple descriptors. (Other SELECT INTO stuff comes later.) + */ + estate->es_select_into = false; + if (operation == CMD_SELECT && parseTree->into != NULL) + { + estate->es_select_into = true; + estate->es_into_oids = interpretOidsOption(parseTree->intoOptions); + } + + /* + * Have to lock relations selected FOR UPDATE/FOR SHARE before we + * initialize the plan tree, else we'd be doing a lock upgrade. + * While we are at it, build the ExecRowMark list. + */ + estate->es_rowMarks = NIL; + foreach(l, parseTree->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(l); + Oid relid = getrelid(rc->rti, rangeTable); + Relation relation; + ExecRowMark *erm; + + relation = heap_open(relid, RowShareLock); + erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); + erm->relation = relation; + erm->rti = rc->rti; + erm->forUpdate = rc->forUpdate; + erm->noWait = rc->noWait; + /* We'll set up ctidAttno below */ + erm->ctidAttNo = InvalidAttrNumber; + estate->es_rowMarks = lappend(estate->es_rowMarks, erm); + } + + /* + * initialize the executor "tuple" table. We need slots for all the plan + * nodes, plus possibly output slots for the junkfilter(s). At this point + * we aren't sure if we need junkfilters, so just add slots for them + * unconditionally. Also, if it's not a SELECT, set up a slot for use for + * trigger output tuples. + */ + { + int nSlots = ExecCountSlotsNode(plan); + + if (parseTree->resultRelations != NIL) + nSlots += list_length(parseTree->resultRelations); + else + nSlots += 1; + if (operation != CMD_SELECT) + nSlots++; /* for es_trig_tuple_slot */ + if (parseTree->returningLists) + nSlots++; /* for RETURNING projection */ + + estate->es_tupleTable = ExecCreateTupleTable(nSlots); + + if (operation != CMD_SELECT) + estate->es_trig_tuple_slot = + ExecAllocTableSlot(estate->es_tupleTable); + } + + /* mark EvalPlanQual not active */ + estate->es_topPlan = plan; + estate->es_evalPlanQual = NULL; + estate->es_evTupleNull = NULL; + estate->es_evTuple = NULL; + estate->es_useEvalPlan = false; + + /* + * initialize the private state information for all the nodes in the query + * tree. This opens files, allocates storage and leaves us ready to start + * processing tuples. + */ + planstate = ExecInitNode(plan, estate, eflags); + + /* + * Get the tuple descriptor describing the type of tuples to return. (this + * is especially important if we are creating a relation with "SELECT + * INTO") + */ + tupType = ExecGetResultType(planstate); + + /* + * Initialize the junk filter if needed. SELECT and INSERT queries need a + * filter if there are any junk attrs in the tlist. INSERT and SELECT + * INTO also need a filter if the plan may return raw disk tuples (else + * heap_insert will be scribbling on the source relation!). UPDATE and + * DELETE always need a filter, since there's always a junk 'ctid' + * attribute present --- no need to look first. + */ + { + bool junk_filter_needed = false; + ListCell *tlist; + + switch (operation) + { + case CMD_SELECT: + case CMD_INSERT: + foreach(tlist, plan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(tlist); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + if (!junk_filter_needed && + (operation == CMD_INSERT || estate->es_select_into) && + ExecMayReturnRawTuples(planstate)) + junk_filter_needed = true; + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + break; + } + + if (junk_filter_needed) + { + /* + * If there are multiple result relations, each one needs its own + * junk filter. Note this is only possible for UPDATE/DELETE, so + * we can't be fooled by some needing a filter and some not. + */ + if (parseTree->resultRelations != NIL) + { + PlanState **appendplans; + int as_nplans; + ResultRelInfo *resultRelInfo; + int i; + + /* Top plan had better be an Append here. */ + Assert(IsA(plan, Append)); + Assert(((Append *) plan)->isTarget); + Assert(IsA(planstate, AppendState)); + appendplans = ((AppendState *) planstate)->appendplans; + as_nplans = ((AppendState *) planstate)->as_nplans; + Assert(as_nplans == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + for (i = 0; i < as_nplans; i++) + { + PlanState *subplan = appendplans[i]; + JunkFilter *j; + + j = ExecInitJunkFilter(subplan->plan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + /* + * Since it must be UPDATE/DELETE, there had better be + * a "ctid" junk attribute in the tlist ... but ctid could + * be at a different resno for each result relation. + * We look up the ctid resnos now and save them in the + * junkfilters. + */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + + /* + * Set active junkfilter too; at this point ExecInitAppend has + * already selected an active result relation... + */ + estate->es_junkFilter = + estate->es_result_relation_info->ri_junkFilter; + } + else + { + /* Normal case with just one JunkFilter */ + JunkFilter *j; + + j = ExecInitJunkFilter(planstate->plan->targetlist, + tupType->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + estate->es_junkFilter = j; + if (estate->es_result_relation_info) + estate->es_result_relation_info->ri_junkFilter = j; + + if (operation == CMD_SELECT) + { + /* For SELECT, want to return the cleaned tuple type */ + tupType = j->jf_cleanTupType; + /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = (ExecRowMark *) lfirst(l); + char resname[32]; + + snprintf(resname, sizeof(resname), "ctid%u", erm->rti); + erm->ctidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->ctidAttNo)) + elog(ERROR, "could not find junk \"%s\" column", + resname); + } + } + else if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + } + } + else + estate->es_junkFilter = NULL; + } + + /* + * Initialize RETURNING projections if needed. + */ + if (parseTree->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + ResultRelInfo *resultRelInfo; + + /* + * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case. + * We assume all the sublists will generate the same output tupdesc. + */ + tupType = ExecTypeFromTL((List *) linitial(parseTree->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + slot = ExecAllocTableSlot(estate->es_tupleTable); + ExecSetSlotDescriptor(slot, tupType); + /* Need an econtext too */ + econtext = CreateExprContext(estate); + + /* + * Build a projection for each result rel. Note that any SubPlans in + * the RETURNING lists get attached to the topmost plan node. + */ + Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, parseTree->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot); + resultRelInfo++; + } + + /* + * Because we already ran ExecInitNode() for the top plan node, any + * subplans we just attached to it won't have been initialized; so we + * have to do it here. (Ugly, but the alternatives seem worse.) + */ + foreach(l, planstate->subPlan) + { + SubPlanState *sstate = (SubPlanState *) lfirst(l); + + Assert(IsA(sstate, SubPlanState)); + if (sstate->planstate == NULL) /* already inited? */ + ExecInitSubPlan(sstate, estate, eflags); + } + } + + queryDesc->tupDesc = tupType; + queryDesc->planstate = planstate; + + /* + * If doing SELECT INTO, initialize the "into" relation. We must wait + * till now so we have the "clean" result tuple type to create the new + * table from. + * + * If EXPLAIN, skip creating the "into" relation. + */ + if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + OpenIntoRel(queryDesc); +} + +/* + * Initialize ResultRelInfo data for one result relation + */ +static void +initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument) +{ + Oid resultRelationOid; + Relation resultRelationDesc; + + resultRelationOid = getrelid(resultRelationIndex, rangeTable); + resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock); + + switch (resultRelationDesc->rd_rel->relkind) + { + case RELKIND_SEQUENCE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change sequence \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_TOASTVALUE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change TOAST relation \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_VIEW: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change view \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + } + + MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); + resultRelInfo->type = T_ResultRelInfo; + resultRelInfo->ri_RangeTableIndex = resultRelationIndex; + resultRelInfo->ri_RelationDesc = resultRelationDesc; + resultRelInfo->ri_NumIndices = 0; + resultRelInfo->ri_IndexRelationDescs = NULL; + resultRelInfo->ri_IndexRelationInfo = NULL; + /* make a copy so as not to depend on relcache info not changing... */ + resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); + if (resultRelInfo->ri_TrigDesc) + { + int n = resultRelInfo->ri_TrigDesc->numtriggers; + + resultRelInfo->ri_TrigFunctions = (FmgrInfo *) + palloc0(n * sizeof(FmgrInfo)); + if (doInstrument) + resultRelInfo->ri_TrigInstrument = InstrAlloc(n); + else + resultRelInfo->ri_TrigInstrument = NULL; + } + else + { + resultRelInfo->ri_TrigFunctions = NULL; + resultRelInfo->ri_TrigInstrument = NULL; + } + resultRelInfo->ri_ConstraintExprs = NULL; + resultRelInfo->ri_junkFilter = NULL; + resultRelInfo->ri_projectReturning = NULL; + + /* + * If there are indices on the result relation, open them and save + * descriptors in the result relation info, so that we can add new index + * entries for the tuples we add/update. We need not do this for a + * DELETE, however, since deletion doesn't affect indexes. + */ + if (resultRelationDesc->rd_rel->relhasindex && + operation != CMD_DELETE) + ExecOpenIndices(resultRelInfo); +} + +/* + * ExecContextForcesOids + * + * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO, + * we need to ensure that result tuples have space for an OID iff they are + * going to be stored into a relation that has OIDs. In other contexts + * we are free to choose whether to leave space for OIDs in result tuples + * (we generally don't want to, but we do if a physical-tlist optimization + * is possible). This routine checks the plan context and returns TRUE if the + * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * *hasoids is set to the required value. + * + * One reason this is ugly is that all plan nodes in the plan tree will emit + * tuples with space for an OID, though we really only need the topmost node + * to do so. However, node types like Sort don't project new tuples but just + * return their inputs, and in those cases the requirement propagates down + * to the input node. Eventually we might make this code smart enough to + * recognize how far down the requirement really goes, but for now we just + * make all plan nodes do the same thing if the top level forces the choice. + * + * We assume that estate->es_result_relation_info is already set up to + * describe the target relation. Note that in an UPDATE that spans an + * inheritance tree, some of the target relations may have OIDs and some not. + * We have to make the decisions on a per-relation basis as we initialize + * each of the child plans of the topmost Append plan. + * + * SELECT INTO is even uglier, because we don't have the INTO relation's + * descriptor available when this code runs; we have to look aside at a + * flag set by InitPlan(). + */ +bool +ExecContextForcesOids(PlanState *planstate, bool *hasoids) +{ + if (planstate->state->es_select_into) + { + *hasoids = planstate->state->es_into_oids; + return true; + } + else + { + ResultRelInfo *ri = planstate->state->es_result_relation_info; + + if (ri != NULL) + { + Relation rel = ri->ri_RelationDesc; + + if (rel != NULL) + { + *hasoids = rel->rd_rel->relhasoids; + return true; + } + } + } + + return false; +} + +/* ---------------------------------------------------------------- + * ExecEndPlan + * + * Cleans up the query plan -- closes files and frees up storage + * + * NOTE: we are no longer very worried about freeing storage per se + * in this code; FreeExecutorState should be guaranteed to release all + * memory that needs to be released. What we are worried about doing + * is closing relations and dropping buffer pins. Thus, for example, + * tuple tables must be cleared or dropped to ensure pins are released. + * ---------------------------------------------------------------- + */ +void +ExecEndPlan(PlanState *planstate, EState *estate) +{ + ResultRelInfo *resultRelInfo; + int i; + ListCell *l; + + /* + * shut down any PlanQual processing we were doing + */ + if (estate->es_evalPlanQual != NULL) + EndEvalPlanQual(estate); + + /* + * shut down the node-type-specific query processing + */ + ExecEndNode(planstate); + + /* + * destroy the executor "tuple" table. + */ + ExecDropTupleTable(estate->es_tupleTable, true); + estate->es_tupleTable = NULL; + + /* + * close the result relation(s) if any, but hold locks until xact commit. + */ + resultRelInfo = estate->es_result_relations; + for (i = estate->es_num_result_relations; i > 0; i--) + { + /* Close indices and then the relation itself */ + ExecCloseIndices(resultRelInfo); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); + resultRelInfo++; + } + + /* + * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks + */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + + heap_close(erm->relation, NoLock); + } +} + +/* ---------------------------------------------------------------- + * ExecutePlan + * + * processes the query plan to retrieve 'numberTuples' tuples in the + * direction specified. + * + * Retrieves all tuples if numberTuples is 0 + * + * result is either a slot containing the last tuple in the case + * of a SELECT or NULL otherwise. + * + * Note: the ctid attribute is a 'junk' attribute that is removed before the + * user can see it + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecutePlan(EState *estate, + PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest) +{ + JunkFilter *junkfilter; + TupleTableSlot *planSlot; + TupleTableSlot *slot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + long current_tuple_count; + TupleTableSlot *result; + + /* + * initialize local variables + */ + current_tuple_count = 0; + result = NULL; + + /* + * Set the direction. + */ + estate->es_direction = direction; + + /* + * Process BEFORE EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecBSUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecBSInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * Loop until we've processed the proper number of tuples from the plan. + */ + + for (;;) + { + /* Reset the per-output-tuple exprcontext */ + ResetPerTupleExprContext(estate); + + /* + * Execute the plan and obtain a tuple + */ +lnext: ; + if (estate->es_useEvalPlan) + { + planSlot = EvalPlanQualNext(estate); + if (TupIsNull(planSlot)) + planSlot = ExecProcNode(planstate); + } + else + planSlot = ExecProcNode(planstate); + + /* + * if the tuple is null, then we assume there is nothing more to + * process so we just return null... + */ + if (TupIsNull(planSlot)) + { + result = NULL; + break; + } + slot = planSlot; + + /* + * if we have a junk filter, then project a new tuple with the junk + * removed. + * + * Store this new "clean" tuple in the junkfilter's resultSlot. + * (Formerly, we stored it back over the "dirty" tuple, which is WRONG + * because that tuple slot has the wrong descriptor.) + * + * Also, extract all the junk information we need. + */ + if ((junkfilter = estate->es_junkFilter) != NULL) + { + Datum datum; + bool isNull; + + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * Process any FOR UPDATE or FOR SHARE locking requested. + */ + else if (estate->es_rowMarks != NIL) + { + ListCell *l; + + lmark: ; + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + HeapTupleData tuple; + Buffer buffer; + ItemPointerData update_ctid; + TransactionId update_xmax; + TupleTableSlot *newSlot; + LockTupleMode lockmode; + HTSU_Result test; + + datum = ExecGetJunkAttribute(slot, + erm->ctidAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tuple.t_self = *((ItemPointer) DatumGetPointer(datum)); + + if (erm->forUpdate) + lockmode = LockTupleExclusive; + else + lockmode = LockTupleShared; + + test = heap_lock_tuple(erm->relation, &tuple, &buffer, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + lockmode, erm->noWait); + ReleaseBuffer(buffer); + switch (test) + { + case HeapTupleSelfUpdated: + /* treat it as deleted; do not process */ + goto lnext; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(&update_ctid, + &tuple.t_self)) + { + /* updated, so look at updated version */ + newSlot = EvalPlanQual(estate, + erm->rti, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(newSlot)) + { + slot = planSlot = newSlot; + estate->es_useEvalPlan = true; + goto lmark; + } + } + + /* + * if tuple was deleted or PlanQual failed for + * updated tuple - we must not return this tuple! + */ + goto lnext; + + default: + elog(ERROR, "unrecognized heap_lock_tuple status: %u", + test); + return NULL; + } + } + } + + /* + * Create a new "clean" tuple with all junk attributes removed. We + * don't need to do this for DELETE, however (there will in fact + * be no non-junk attributes in a DELETE!) + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + /* + * now that we have a tuple, do the appropriate thing with it.. either + * return it to the user, add it to a relation someplace, delete it + * from a relation, or modify some of its attributes. + */ + switch (operation) + { + case CMD_SELECT: + ExecSelect(slot, dest, estate); + result = slot; + break; + + case CMD_INSERT: + ExecInsert(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_DELETE: + ExecDelete(tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_UPDATE: + ExecUpdate(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + default: + elog(ERROR, "unrecognized operation code: %d", + (int) operation); + result = NULL; + break; + } + + /* + * check our tuple count.. if we've processed the proper number then + * quit, else loop again and process more tuples. Zero numberTuples + * means no limit. + */ + current_tuple_count++; + if (numberTuples && numberTuples == current_tuple_count) + break; + } + + /* + * Process AFTER EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecASUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecASDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecASInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * here, result is either a slot containing a tuple in the case of a + * SELECT or NULL otherwise. + */ + return result; +} + +/* ---------------------------------------------------------------- + * ExecSelect + * + * SELECTs are easy.. we just pass the tuple to the appropriate + * output function. + * ---------------------------------------------------------------- + */ +static void +ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, + EState *estate) +{ + (*dest->receiveSlot) (slot, dest); + IncrRetrieved(); + (estate->es_processed)++; +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * INSERTs are trickier.. we have to insert the tuple into + * the base relation and insert appropriate tuples into the + * index relations. + * ---------------------------------------------------------------- + */ +static void +ExecInsert(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_snapshot->curcid, + true, true); + + IncrAppended(); + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * ---------------------------------------------------------------- + */ +static void +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid, + estate->es_snapshot->curcid); + + if (!dodelete) /* "do nothing" */ + return; + } + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return; + } + + IncrDeleted(); + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + } +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * ---------------------------------------------------------------- + */ +static void +ExecUpdate(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, + tupleid, tuple, + estate->es_snapshot->curcid); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(estate->es_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return; + } + + IncrReplaced(); + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} ============================================================ --- tests/test_a_merge_8/right 13cb3ac0b7c84eaf7aa7d8ba22670bd3cd22060a +++ tests/test_a_merge_8/right 13cb3ac0b7c84eaf7aa7d8ba22670bd3cd22060a @@ -0,0 +1,1791 @@ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/reloptions.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/toasting.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/execdebug.h" +#include "executor/instrument.h" +#include "executor/nodeSubplan.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#include "parser/parse_clause.h" +#include "parser/parsetree.h" +#include "storage/smgr.h" +#include "utils/acl.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + + +typedef struct evalPlanQual +{ + Index rti; + EState *estate; + PlanState *planstate; + struct evalPlanQual *next; /* stack of active PlanQual plans */ + struct evalPlanQual *free; /* list of free PlanQual plans */ +} evalPlanQual; + +/* decls for local routines only used within this module */ +static void InitPlan(QueryDesc *queryDesc, int eflags); +static void initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument); +static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest); +static void ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, EState *estate); +static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, EState *estate); +static void ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot, + DestReceiver *dest); +static TupleTableSlot *EvalPlanQualNext(EState *estate); +static void EndEvalPlanQual(EState *estate); +static void ExecCheckRTEPerms(RangeTblEntry *rte); +static void ExecCheckXactReadOnly(Query *parsetree); +static void EvalPlanQualStart(evalPlanQual *epq, EState *estate, + evalPlanQual *priorepq); +static void EvalPlanQualStop(evalPlanQual *epq); +static void OpenIntoRel(QueryDesc *queryDesc); +static void CloseIntoRel(QueryDesc *queryDesc); +static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo); +static void intorel_receive(TupleTableSlot *slot, DestReceiver *self); +static void intorel_shutdown(DestReceiver *self); +static void intorel_destroy(DestReceiver *self); + +/* end of local decls */ + + +/* ---------------------------------------------------------------- + * ExecutorStart + * + * This routine must be called at the beginning of any execution of any + * query plan + * + * Takes a QueryDesc previously created by CreateQueryDesc (it's not real + * clear why we bother to separate the two functions, but...). The tupDesc + * field of the QueryDesc is filled in to describe the tuples that will be + * returned, and the internal fields (estate and planstate) are set up. + * + * eflags contains flag bits as described in executor.h. + * + * NB: the CurrentMemoryContext when this is called will become the parent + * of the per-query context used for this Executor invocation. + * ---------------------------------------------------------------- + */ +void +ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks: queryDesc must not be started already */ + Assert(queryDesc != NULL); + Assert(queryDesc->estate == NULL); + + /* + * If the transaction is read-only, we need to check if any writes are + * planned to non-temporary tables. EXPLAIN is considered read-only. + */ + if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecCheckXactReadOnly(queryDesc->parsetree); + + /* + * Build EState, switch into per-query memory context for startup. + */ + estate = CreateExecutorState(); + queryDesc->estate = estate; + + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Fill in parameters, if any, from queryDesc + */ + estate->es_param_list_info = queryDesc->params; + + if (queryDesc->plantree->nParamExec > 0) + estate->es_param_exec_vals = (ParamExecData *) + palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData)); + + /* + * Copy other important information into the EState + */ + estate->es_snapshot = queryDesc->snapshot; + estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot; + estate->es_instrument = queryDesc->doInstrument; + + /* + * Initialize the plan state tree + */ + InitPlan(queryDesc, eflags); + + MemoryContextSwitchTo(oldcontext); +} + +/* ---------------------------------------------------------------- + * ExecutorRun + * + * This is the main routine of the executor module. It accepts + * the query descriptor from the traffic cop and executes the + * query plan. + * + * ExecutorStart must have been called already. + * + * If direction is NoMovementScanDirection then nothing is done + * except to start up/shut down the destination. Otherwise, + * we retrieve up to 'count' tuples in the specified direction. + * + * Note: count = 0 is interpreted as no portal limit, i.e., run to + * completion. + * + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecutorRun(QueryDesc *queryDesc, + ScanDirection direction, long count) +{ + EState *estate; + CmdType operation; + DestReceiver *dest; + bool sendTuples; + TupleTableSlot *result; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * extract information from the query descriptor and the query feature. + */ + operation = queryDesc->operation; + dest = queryDesc->dest; + + /* + * startup tuple receiver, if we will be emitting tuples + */ + estate->es_processed = 0; + estate->es_lastoid = InvalidOid; + + sendTuples = (operation == CMD_SELECT || + queryDesc->parsetree->returningList); + + if (sendTuples) + (*dest->rStartup) (dest, operation, queryDesc->tupDesc); + + /* + * run plan + */ + if (ScanDirectionIsNoMovement(direction)) + result = NULL; + else + result = ExecutePlan(estate, + queryDesc->planstate, + operation, + count, + direction, + dest); + + /* + * shutdown tuple receiver, if we started it + */ + if (sendTuples) + (*dest->rShutdown) (dest); + + MemoryContextSwitchTo(oldcontext); + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorEnd + * + * This routine must be called at the end of execution of any + * query plan + * ---------------------------------------------------------------- + */ +void +ExecutorEnd(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* + * Switch into per-query memory context to run ExecEndPlan + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + ExecEndPlan(queryDesc->planstate, estate); + + /* + * Close the SELECT INTO relation if any + */ + if (estate->es_select_into) + CloseIntoRel(queryDesc); + + /* + * Must switch out of context before destroying it + */ + MemoryContextSwitchTo(oldcontext); + + /* + * Release EState and per-query memory context. This should release + * everything the executor has allocated. + */ + FreeExecutorState(estate); + + /* Reset queryDesc fields that no longer point to anything */ + queryDesc->tupDesc = NULL; + queryDesc->estate = NULL; + queryDesc->planstate = NULL; +} + +/* ---------------------------------------------------------------- + * ExecutorRewind + * + * This routine may be called on an open queryDesc to rewind it + * to the start. + * ---------------------------------------------------------------- + */ +void +ExecutorRewind(QueryDesc *queryDesc) +{ + EState *estate; + MemoryContext oldcontext; + + /* sanity checks */ + Assert(queryDesc != NULL); + + estate = queryDesc->estate; + + Assert(estate != NULL); + + /* It's probably not sensible to rescan updating queries */ + Assert(queryDesc->operation == CMD_SELECT); + + /* + * Switch into per-query memory context + */ + oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * rescan plan + */ + ExecReScan(queryDesc->planstate, NULL); + + MemoryContextSwitchTo(oldcontext); +} + + +/* + * ExecCheckRTPerms + * Check access permissions for all relations listed in a range table. + */ +void +ExecCheckRTPerms(List *rangeTable) +{ + ListCell *l; + + foreach(l, rangeTable) + { + RangeTblEntry *rte = lfirst(l); + + ExecCheckRTEPerms(rte); + } +} + +/* + * ExecCheckRTEPerms + * Check access permissions for a single RTE. + */ +static void +ExecCheckRTEPerms(RangeTblEntry *rte) +{ + AclMode requiredPerms; + Oid relOid; + Oid userid; + + /* + * Only plain-relation RTEs need to be checked here. Subquery RTEs are + * checked by ExecInitSubqueryScan if the subquery is still a separate + * subquery --- if it's been pulled up into our query level then the RTEs + * are in our rangetable and will be checked here. Function RTEs are + * checked by init_fcache when the function is prepared for execution. + * Join and special RTEs need no checks. + */ + if (rte->rtekind != RTE_RELATION) + return; + + /* + * No work if requiredPerms is empty. + */ + requiredPerms = rte->requiredPerms; + if (requiredPerms == 0) + return; + + relOid = rte->relid; + + /* + * userid to check as: current user unless we have a setuid indication. + * + * Note: GetUserId() is presently fast enough that there's no harm in + * calling it separately for each RTE. If that stops being true, we could + * call it once in ExecCheckRTPerms and pass the userid down from there. + * But for now, no need for the extra clutter. + */ + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + + /* + * We must have *all* the requiredPerms bits, so use aclmask not aclcheck. + */ + if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL) + != requiredPerms) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, + get_rel_name(relOid)); +} + +/* + * Check that the query does not imply any writes to non-temp tables. + */ +static void +ExecCheckXactReadOnly(Query *parsetree) +{ + ListCell *l; + + /* + * CREATE TABLE AS or SELECT INTO? + * + * XXX should we allow this if the destination is temp? + */ + if (parsetree->into != NULL) + goto fail; + + /* Fail if write permissions are requested on any non-temp table */ + foreach(l, parsetree->rtable) + { + RangeTblEntry *rte = lfirst(l); + + if (rte->rtekind == RTE_SUBQUERY) + { + ExecCheckXactReadOnly(rte->subquery); + continue; + } + + if (rte->rtekind != RTE_RELATION) + continue; + + if ((rte->requiredPerms & (~ACL_SELECT)) == 0) + continue; + + if (isTempNamespace(get_rel_namespace(rte->relid))) + continue; + + goto fail; + } + + return; + +fail: + ereport(ERROR, + (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), + errmsg("transaction is read-only"))); +} + + +/* ---------------------------------------------------------------- + * InitPlan + * + * Initializes the query plan: open files, allocate storage + * and start up the rule manager + * ---------------------------------------------------------------- + */ +static void +InitPlan(QueryDesc *queryDesc, int eflags) +{ + CmdType operation = queryDesc->operation; + Query *parseTree = queryDesc->parsetree; + Plan *plan = queryDesc->plantree; + EState *estate = queryDesc->estate; + PlanState *planstate; + List *rangeTable; + TupleDesc tupType; + ListCell *l; + + /* + * Do permissions checks. It's sufficient to examine the query's top + * rangetable here --- subplan RTEs will be checked during + * ExecInitSubPlan(). + */ + ExecCheckRTPerms(parseTree->rtable); + + /* + * get information from query descriptor + */ + rangeTable = parseTree->rtable; + + /* + * initialize the node's execution state + */ + estate->es_range_table = rangeTable; + + /* + * if there is a result relation, initialize result relation stuff + */ + if (parseTree->resultRelation) + { + List *resultRelations = parseTree->resultRelations; + int numResultRelations; + ResultRelInfo *resultRelInfos; + + if (resultRelations != NIL) + { + /* + * Multiple result relations (due to inheritance) + * parseTree->resultRelations identifies them all + */ + ResultRelInfo *resultRelInfo; + + numResultRelations = list_length(resultRelations); + resultRelInfos = (ResultRelInfo *) + palloc(numResultRelations * sizeof(ResultRelInfo)); + resultRelInfo = resultRelInfos; + foreach(l, resultRelations) + { + initResultRelInfo(resultRelInfo, + lfirst_int(l), + rangeTable, + operation, + estate->es_instrument); + resultRelInfo++; + } + } + else + { + /* + * Single result relation identified by parseTree->resultRelation + */ + numResultRelations = 1; + resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); + initResultRelInfo(resultRelInfos, + parseTree->resultRelation, + rangeTable, + operation, + estate->es_instrument); + } + + estate->es_result_relations = resultRelInfos; + estate->es_num_result_relations = numResultRelations; + /* Initialize to first or only result rel */ + estate->es_result_relation_info = resultRelInfos; + } + else + { + /* + * if no result relation, then set state appropriately + */ + estate->es_result_relations = NULL; + estate->es_num_result_relations = 0; + estate->es_result_relation_info = NULL; + } + + /* + * Detect whether we're doing SELECT INTO. If so, set the es_into_oids + * flag appropriately so that the plan tree will be initialized with the + * correct tuple descriptors. (Other SELECT INTO stuff comes later.) + */ + estate->es_select_into = false; + if (operation == CMD_SELECT && parseTree->into != NULL) + { + estate->es_select_into = true; + estate->es_into_oids = interpretOidsOption(parseTree->intoOptions); + } + + /* + * Have to lock relations selected FOR UPDATE/FOR SHARE before we + * initialize the plan tree, else we'd be doing a lock upgrade. + * While we are at it, build the ExecRowMark list. + */ + estate->es_rowMarks = NIL; + foreach(l, parseTree->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(l); + Oid relid = getrelid(rc->rti, rangeTable); + Relation relation; + ExecRowMark *erm; + + relation = heap_open(relid, RowShareLock); + erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); + erm->relation = relation; + erm->rti = rc->rti; + erm->forUpdate = rc->forUpdate; + erm->noWait = rc->noWait; + /* We'll set up ctidAttno below */ + erm->ctidAttNo = InvalidAttrNumber; + estate->es_rowMarks = lappend(estate->es_rowMarks, erm); + } + + /* + * initialize the executor "tuple" table. We need slots for all the plan + * nodes, plus possibly output slots for the junkfilter(s). At this point + * we aren't sure if we need junkfilters, so just add slots for them + * unconditionally. Also, if it's not a SELECT, set up a slot for use for + * trigger output tuples. + */ + { + int nSlots = ExecCountSlotsNode(plan); + + if (parseTree->resultRelations != NIL) + nSlots += list_length(parseTree->resultRelations); + else + nSlots += 1; + if (operation != CMD_SELECT) + nSlots++; /* for es_trig_tuple_slot */ + if (parseTree->returningLists) + nSlots++; /* for RETURNING projection */ + + estate->es_tupleTable = ExecCreateTupleTable(nSlots); + + if (operation != CMD_SELECT) + estate->es_trig_tuple_slot = + ExecAllocTableSlot(estate->es_tupleTable); + } + + /* mark EvalPlanQual not active */ + estate->es_topPlan = plan; + estate->es_evalPlanQual = NULL; + estate->es_evTupleNull = NULL; + estate->es_evTuple = NULL; + estate->es_useEvalPlan = false; + + /* + * initialize the private state information for all the nodes in the query + * tree. This opens files, allocates storage and leaves us ready to start + * processing tuples. + */ + planstate = ExecInitNode(plan, estate, eflags); + + /* + * Get the tuple descriptor describing the type of tuples to return. (this + * is especially important if we are creating a relation with "SELECT + * INTO") + */ + tupType = ExecGetResultType(planstate); + + /* + * Initialize the junk filter if needed. SELECT and INSERT queries need a + * filter if there are any junk attrs in the tlist. INSERT and SELECT + * INTO also need a filter if the plan may return raw disk tuples (else + * heap_insert will be scribbling on the source relation!). UPDATE and + * DELETE always need a filter, since there's always a junk 'ctid' + * attribute present --- no need to look first. + */ + { + bool junk_filter_needed = false; + ListCell *tlist; + + switch (operation) + { + case CMD_SELECT: + case CMD_INSERT: + foreach(tlist, plan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(tlist); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + if (!junk_filter_needed && + (operation == CMD_INSERT || estate->es_select_into) && + ExecMayReturnRawTuples(planstate)) + junk_filter_needed = true; + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + break; + } + + if (junk_filter_needed) + { + /* + * If there are multiple result relations, each one needs its own + * junk filter. Note this is only possible for UPDATE/DELETE, so + * we can't be fooled by some needing a filter and some not. + */ + if (parseTree->resultRelations != NIL) + { + PlanState **appendplans; + int as_nplans; + ResultRelInfo *resultRelInfo; + int i; + + /* Top plan had better be an Append here. */ + Assert(IsA(plan, Append)); + Assert(((Append *) plan)->isTarget); + Assert(IsA(planstate, AppendState)); + appendplans = ((AppendState *) planstate)->appendplans; + as_nplans = ((AppendState *) planstate)->as_nplans; + Assert(as_nplans == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + for (i = 0; i < as_nplans; i++) + { + PlanState *subplan = appendplans[i]; + JunkFilter *j; + + j = ExecInitJunkFilter(subplan->plan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + /* + * Since it must be UPDATE/DELETE, there had better be + * a "ctid" junk attribute in the tlist ... but ctid could + * be at a different resno for each result relation. + * We look up the ctid resnos now and save them in the + * junkfilters. + */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + + /* + * Set active junkfilter too; at this point ExecInitAppend has + * already selected an active result relation... + */ + estate->es_junkFilter = + estate->es_result_relation_info->ri_junkFilter; + } + else + { + /* Normal case with just one JunkFilter */ + JunkFilter *j; + + j = ExecInitJunkFilter(planstate->plan->targetlist, + tupType->tdhasoid, + ExecAllocTableSlot(estate->es_tupleTable)); + estate->es_junkFilter = j; + if (estate->es_result_relation_info) + estate->es_result_relation_info->ri_junkFilter = j; + + if (operation == CMD_SELECT) + { + /* For SELECT, want to return the cleaned tuple type */ + tupType = j->jf_cleanTupType; + /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = (ExecRowMark *) lfirst(l); + char resname[32]; + + snprintf(resname, sizeof(resname), "ctid%u", erm->rti); + erm->ctidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->ctidAttNo)) + elog(ERROR, "could not find junk \"%s\" column", + resname); + } + } + else if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + } + } + else + estate->es_junkFilter = NULL; + } + + /* + * Initialize RETURNING projections if needed. + */ + if (parseTree->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + ResultRelInfo *resultRelInfo; + + /* + * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case. + * We assume all the sublists will generate the same output tupdesc. + */ + tupType = ExecTypeFromTL((List *) linitial(parseTree->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + slot = ExecAllocTableSlot(estate->es_tupleTable); + ExecSetSlotDescriptor(slot, tupType); + /* Need an econtext too */ + econtext = CreateExprContext(estate); + + /* + * Build a projection for each result rel. Note that any SubPlans in + * the RETURNING lists get attached to the topmost plan node. + */ + Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, parseTree->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot); + resultRelInfo++; + } + + /* + * Because we already ran ExecInitNode() for the top plan node, any + * subplans we just attached to it won't have been initialized; so we + * have to do it here. (Ugly, but the alternatives seem worse.) + */ + foreach(l, planstate->subPlan) + { + SubPlanState *sstate = (SubPlanState *) lfirst(l); + + Assert(IsA(sstate, SubPlanState)); + if (sstate->planstate == NULL) /* already inited? */ + ExecInitSubPlan(sstate, estate, eflags); + } + } + + queryDesc->tupDesc = tupType; + queryDesc->planstate = planstate; + + /* + * If doing SELECT INTO, initialize the "into" relation. We must wait + * till now so we have the "clean" result tuple type to create the new + * table from. + * + * If EXPLAIN, skip creating the "into" relation. + */ + if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + OpenIntoRel(queryDesc); +} + +/* + * Initialize ResultRelInfo data for one result relation + */ +static void +initResultRelInfo(ResultRelInfo *resultRelInfo, + Index resultRelationIndex, + List *rangeTable, + CmdType operation, + bool doInstrument) +{ + Oid resultRelationOid; + Relation resultRelationDesc; + + resultRelationOid = getrelid(resultRelationIndex, rangeTable); + resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock); + + switch (resultRelationDesc->rd_rel->relkind) + { + case RELKIND_SEQUENCE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change sequence \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_TOASTVALUE: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change TOAST relation \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + case RELKIND_VIEW: + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change view \"%s\"", + RelationGetRelationName(resultRelationDesc)))); + break; + } + + MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); + resultRelInfo->type = T_ResultRelInfo; + resultRelInfo->ri_RangeTableIndex = resultRelationIndex; + resultRelInfo->ri_RelationDesc = resultRelationDesc; + resultRelInfo->ri_NumIndices = 0; + resultRelInfo->ri_IndexRelationDescs = NULL; + resultRelInfo->ri_IndexRelationInfo = NULL; + /* make a copy so as not to depend on relcache info not changing... */ + resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); + if (resultRelInfo->ri_TrigDesc) + { + int n = resultRelInfo->ri_TrigDesc->numtriggers; + + resultRelInfo->ri_TrigFunctions = (FmgrInfo *) + palloc0(n * sizeof(FmgrInfo)); + if (doInstrument) + resultRelInfo->ri_TrigInstrument = InstrAlloc(n); + else + resultRelInfo->ri_TrigInstrument = NULL; + } + else + { + resultRelInfo->ri_TrigFunctions = NULL; + resultRelInfo->ri_TrigInstrument = NULL; + } + resultRelInfo->ri_ConstraintExprs = NULL; + resultRelInfo->ri_junkFilter = NULL; + resultRelInfo->ri_projectReturning = NULL; + + /* + * If there are indices on the result relation, open them and save + * descriptors in the result relation info, so that we can add new index + * entries for the tuples we add/update. We need not do this for a + * DELETE, however, since deletion doesn't affect indexes. + */ + if (resultRelationDesc->rd_rel->relhasindex && + operation != CMD_DELETE) + ExecOpenIndices(resultRelInfo); +} + +/* + * ExecContextForcesOids + * + * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO, + * we need to ensure that result tuples have space for an OID iff they are + * going to be stored into a relation that has OIDs. In other contexts + * we are free to choose whether to leave space for OIDs in result tuples + * (we generally don't want to, but we do if a physical-tlist optimization + * is possible). This routine checks the plan context and returns TRUE if the + * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * *hasoids is set to the required value. + * + * One reason this is ugly is that all plan nodes in the plan tree will emit + * tuples with space for an OID, though we really only need the topmost node + * to do so. However, node types like Sort don't project new tuples but just + * return their inputs, and in those cases the requirement propagates down + * to the input node. Eventually we might make this code smart enough to + * recognize how far down the requirement really goes, but for now we just + * make all plan nodes do the same thing if the top level forces the choice. + * + * We assume that estate->es_result_relation_info is already set up to + * describe the target relation. Note that in an UPDATE that spans an + * inheritance tree, some of the target relations may have OIDs and some not. + * We have to make the decisions on a per-relation basis as we initialize + * each of the child plans of the topmost Append plan. + * + * SELECT INTO is even uglier, because we don't have the INTO relation's + * descriptor available when this code runs; we have to look aside at a + * flag set by InitPlan(). + */ +bool +ExecContextForcesOids(PlanState *planstate, bool *hasoids) +{ + if (planstate->state->es_select_into) + { + *hasoids = planstate->state->es_into_oids; + return true; + } + else + { + ResultRelInfo *ri = planstate->state->es_result_relation_info; + + if (ri != NULL) + { + Relation rel = ri->ri_RelationDesc; + + if (rel != NULL) + { + *hasoids = rel->rd_rel->relhasoids; + return true; + } + } + } + + return false; +} + +/* ---------------------------------------------------------------- + * ExecEndPlan + * + * Cleans up the query plan -- closes files and frees up storage + * + * NOTE: we are no longer very worried about freeing storage per se + * in this code; FreeExecutorState should be guaranteed to release all + * memory that needs to be released. What we are worried about doing + * is closing relations and dropping buffer pins. Thus, for example, + * tuple tables must be cleared or dropped to ensure pins are released. + * ---------------------------------------------------------------- + */ +void +ExecEndPlan(PlanState *planstate, EState *estate) +{ + ResultRelInfo *resultRelInfo; + int i; + ListCell *l; + + /* + * shut down any PlanQual processing we were doing + */ + if (estate->es_evalPlanQual != NULL) + EndEvalPlanQual(estate); + + /* + * shut down the node-type-specific query processing + */ + ExecEndNode(planstate); + + /* + * destroy the executor "tuple" table. + */ + ExecDropTupleTable(estate->es_tupleTable, true); + estate->es_tupleTable = NULL; + + /* + * close the result relation(s) if any, but hold locks until xact commit. + */ + resultRelInfo = estate->es_result_relations; + for (i = estate->es_num_result_relations; i > 0; i--) + { + /* Close indices and then the relation itself */ + ExecCloseIndices(resultRelInfo); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); + resultRelInfo++; + } + + /* + * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks + */ + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + + heap_close(erm->relation, NoLock); + } +} + +/* ---------------------------------------------------------------- + * ExecutePlan + * + * processes the query plan to retrieve 'numberTuples' tuples in the + * direction specified. + * + * Retrieves all tuples if numberTuples is 0 + * + * result is either a slot containing the last tuple in the case + * of a SELECT or NULL otherwise. + * + * Note: the ctid attribute is a 'junk' attribute that is removed before the + * user can see it + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecutePlan(EState *estate, + PlanState *planstate, + CmdType operation, + long numberTuples, + ScanDirection direction, + DestReceiver *dest) +{ + JunkFilter *junkfilter; + TupleTableSlot *planSlot; + TupleTableSlot *slot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + long current_tuple_count; + TupleTableSlot *result; + + /* + * initialize local variables + */ + current_tuple_count = 0; + result = NULL; + + /* + * Set the direction. + */ + estate->es_direction = direction; + + /* + * Process BEFORE EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecBSUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecBSInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * Loop until we've processed the proper number of tuples from the plan. + */ + + for (;;) + { + /* Reset the per-output-tuple exprcontext */ + ResetPerTupleExprContext(estate); + + /* + * Execute the plan and obtain a tuple + */ +lnext: ; + if (estate->es_useEvalPlan) + { + planSlot = EvalPlanQualNext(estate); + if (TupIsNull(planSlot)) + planSlot = ExecProcNode(planstate); + } + else + planSlot = ExecProcNode(planstate); + + /* + * if the tuple is null, then we assume there is nothing more to + * process so we just return null... + */ + if (TupIsNull(planSlot)) + { + result = NULL; + break; + } + slot = planSlot; + + /* + * if we have a junk filter, then project a new tuple with the junk + * removed. + * + * Store this new "clean" tuple in the junkfilter's resultSlot. + * (Formerly, we stored it back over the "dirty" tuple, which is WRONG + * because that tuple slot has the wrong descriptor.) + * + * Also, extract all the junk information we need. + */ + if ((junkfilter = estate->es_junkFilter) != NULL) + { + Datum datum; + bool isNull; + + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * Process any FOR UPDATE or FOR SHARE locking requested. + */ + else if (estate->es_rowMarks != NIL) + { + ListCell *l; + + lmark: ; + foreach(l, estate->es_rowMarks) + { + ExecRowMark *erm = lfirst(l); + HeapTupleData tuple; + Buffer buffer; + ItemPointerData update_ctid; + TransactionId update_xmax; + TupleTableSlot *newSlot; + LockTupleMode lockmode; + HTSU_Result test; + + datum = ExecGetJunkAttribute(slot, + erm->ctidAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tuple.t_self = *((ItemPointer) DatumGetPointer(datum)); + + if (erm->forUpdate) + lockmode = LockTupleExclusive; + else + lockmode = LockTupleShared; + + test = heap_lock_tuple(erm->relation, &tuple, &buffer, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + lockmode, erm->noWait); + ReleaseBuffer(buffer); + switch (test) + { + case HeapTupleSelfUpdated: + /* treat it as deleted; do not process */ + goto lnext; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (!ItemPointerEquals(&update_ctid, + &tuple.t_self)) + { + /* updated, so look at updated version */ + newSlot = EvalPlanQual(estate, + erm->rti, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(newSlot)) + { + slot = planSlot = newSlot; + estate->es_useEvalPlan = true; + goto lmark; + } + } + + /* + * if tuple was deleted or PlanQual failed for + * updated tuple - we must not return this tuple! + */ + goto lnext; + + default: + elog(ERROR, "unrecognized heap_lock_tuple status: %u", + test); + return NULL; + } + } + } + + /* + * Create a new "clean" tuple with all junk attributes removed. We + * don't need to do this for DELETE, however (there will in fact + * be no non-junk attributes in a DELETE!) + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + /* + * now that we have a tuple, do the appropriate thing with it.. either + * return it to the user, add it to a relation someplace, delete it + * from a relation, or modify some of its attributes. + */ + switch (operation) + { + case CMD_SELECT: + ExecSelect(slot, dest, estate); + result = slot; + break; + + case CMD_INSERT: + ExecInsert(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_DELETE: + ExecDelete(tupleid, planSlot, dest, estate); + result = NULL; + break; + + case CMD_UPDATE: + ExecUpdate(slot, tupleid, planSlot, dest, estate); + result = NULL; + break; + + default: + elog(ERROR, "unrecognized operation code: %d", + (int) operation); + result = NULL; + break; + } + + /* + * check our tuple count.. if we've processed the proper number then + * quit, else loop again and process more tuples. Zero numberTuples + * means no limit. + */ + current_tuple_count++; + if (numberTuples && numberTuples == current_tuple_count) + break; + } + + /* + * Process AFTER EACH STATEMENT triggers + */ + switch (operation) + { + case CMD_UPDATE: + ExecASUpdateTriggers(estate, estate->es_result_relation_info); + break; + case CMD_DELETE: + ExecASDeleteTriggers(estate, estate->es_result_relation_info); + break; + case CMD_INSERT: + ExecASInsertTriggers(estate, estate->es_result_relation_info); + break; + default: + /* do nothing */ + break; + } + + /* + * here, result is either a slot containing a tuple in the case of a + * SELECT or NULL otherwise. + */ + return result; +} + +/* ---------------------------------------------------------------- + * ExecSelect + * + * SELECTs are easy.. we just pass the tuple to the appropriate + * output function. + * ---------------------------------------------------------------- + */ +static void +ExecSelect(TupleTableSlot *slot, + DestReceiver *dest, + EState *estate) +{ + (*dest->receiveSlot) (slot, dest); + IncrRetrieved(); + (estate->es_processed)++; +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * INSERTs are trickier.. we have to insert the tuple into + * the base relation and insert appropriate tuples into the + * index relations. + * ---------------------------------------------------------------- + */ +static void +ExecInsert(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && +#ifdef REPLICATION + (txn_type != REPLICATED_REMOTE) && +#endif + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ +#ifndef REPLICATION + if (resultRelationDesc->rd_att->constr) +#else + if ((txn_type != REPLICATED_REMOTE) && + (resultRelationDesc->rd_att->constr)) +#endif + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_snapshot->curcid, + true, true); + + IncrAppended(); + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + +#ifdef REPLICATION + if (txn_type != REPLICATED_REMOTE) + { +#endif + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + +#ifdef REPLICATION + } + + if ( txn_type == REPLICATED_LOCAL ) + { + Oid resultRelationOid; + TupleCollection *tcoll; + + tcoll = &(((QueryInfo *) CurrentWriteSet->currQuery)->tcoll); + resultRelationOid = RelationGetRelid(resultRelationDesc); + if(resultRelationOid == tcoll->rel->relOid) + WriteSetCollectTuple(tupleid, slot, CurrentWriteSet->currQuery, + estate->es_snapshot); + } +#endif +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * ---------------------------------------------------------------- + */ +static void +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && +#ifdef REPLICATION + (txn_type != REPLICATED_REMOTE) && +#endif + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid, + estate->es_snapshot->curcid); + + if (!dodelete) /* "do nothing" */ + return; + } + +#ifdef REPLICATION + /* initialize the TxnToAbort return value */ + TxnToAbort = InvalidTransactionId; + + /* + * Add the tuple info to the WriteSet. + */ + if ( txn_type == REPLICATED_LOCAL ) + { + Oid resultRelationOid; + TupleCollection *tcoll; + + tcoll = &(((QueryInfo *) CurrentWriteSet->currQuery)->tcoll); + resultRelationOid = RelationGetRelid(resultRelationDesc); + if (resultRelationOid == tcoll->rel->relOid) + WriteSetCollectTuple(tupleid, planSlot, + CurrentWriteSet->currQuery, + estate->es_snapshot); + } +#endif + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, +#ifndef REPLICATION + true /* wait for commit */ ); +#else + /* remote transaction don't wait */ + (txn_type != REPLICATED_REMOTE)); +#endif + + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + +#ifdef REPLICATION + case HeapTupleBeingUpdated: + if (txn_type == REPLICATED_REMOTE) + { + /* + * A running local transaction has a lock on the tuple. Abort + * that local transaction and return, signaling that we must + * wait until the other transaction releases the lock. + */ +#ifdef RMGR_DEBUG + elog(DEBUG5, + "ExecDelete: need to terminate a local transaction %d", update_xmax); +#endif + TxnToAbort = update_xmax; + return; + } +#ifdef RMGR_DEBUG + else + /* should be impossible */ + Assert(result != HeapTupleBeingUpdated); +#endif +#endif + + case HeapTupleUpdated: +#ifdef RMGR_DEBUG + if (txn_type == REPLICATED_REMOTE) + { + elog(DEBUG5, "ExecDelete: a concurrent update has committed before. Abort this transaction."); + //FIXME + } +#endif + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return; + } + + IncrDeleted(); + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + } +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * ---------------------------------------------------------------- + */ +static void +ExecUpdate(TupleTableSlot *slot, + ItemPointer tupleid, + TupleTableSlot *planSlot, + DestReceiver *dest, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, + tupleid, tuple, + estate->es_snapshot->curcid); + + if (newtuple == NULL) /* "do nothing" */ + return; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_snapshot->curcid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + &update_ctid, + update_xmax, + estate->es_snapshot->curcid); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(estate->es_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return; + } + + IncrReplaced(); + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + */ + if (resultRelInfo->ri_NumIndices > 0) + ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot, dest); +} ============================================================ --- ChangeLog 76b87bded2668c398a2b7b880a8dce10ee2ac269 +++ ChangeLog 3d7447330bb3e54087255fc337ff8cba0bc43741 @@ -1,3 +1,9 @@ +2007-02-05 Markus Schiltknecht + + * tests/test_a_merge_8/*: + * testsuite.lua: Added a merge test case, distilled from a + failing merge in my PostgreSQL repository. + 2007-02-05 Derek Scherger * file_io.{cc,hh} (walk_tree_recursive, tree_walker::visit_dir, ============================================================ --- testsuite.lua 76181ad2963afcf0b47c431e129a2dd7b7c1ed83 +++ testsuite.lua 3cd1fd512cde3e786e1b98e8f173226c259a2a5f @@ -702,4 +702,4 @@ table.insert(tests, "ls_unknown_in_subdi table.insert(tests, "checkout_clobbers_workspace") table.insert(tests, "update_clobbers_workspace") table.insert(tests, "ls_unknown_in_subdir") +table.insert(tests, "test_a_merge_8") -