Update copyright for 2022
[pgsql.git] / src / include / nodes / plannodes.h
blob0b518ce6b28e5bcdd5d918287c0b3bbacf028a6d
1 /*-------------------------------------------------------------------------
3 * plannodes.h
4 * definitions for query plan nodes
7 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/nodes/plannodes.h
12 *-------------------------------------------------------------------------
14 #ifndef PLANNODES_H
15 #define PLANNODES_H
17 #include "access/sdir.h"
18 #include "access/stratnum.h"
19 #include "lib/stringinfo.h"
20 #include "nodes/bitmapset.h"
21 #include "nodes/lockoptions.h"
22 #include "nodes/primnodes.h"
25 /* ----------------------------------------------------------------
26 * node definitions
27 * ----------------------------------------------------------------
30 /* ----------------
31 * PlannedStmt node
33 * The output of the planner is a Plan tree headed by a PlannedStmt node.
34 * PlannedStmt holds the "one time" information needed by the executor.
36 * For simplicity in APIs, we also wrap utility statements in PlannedStmt
37 * nodes; in such cases, commandType == CMD_UTILITY, the statement itself
38 * is in the utilityStmt field, and the rest of the struct is mostly dummy.
39 * (We do use canSetTag, stmt_location, stmt_len, and possibly queryId.)
40 * ----------------
42 typedef struct PlannedStmt
44 NodeTag type;
46 CmdType commandType; /* select|insert|update|delete|utility */
48 uint64 queryId; /* query identifier (copied from Query) */
50 bool hasReturning; /* is it insert|update|delete RETURNING? */
52 bool hasModifyingCTE; /* has insert|update|delete in WITH? */
54 bool canSetTag; /* do I set the command result tag? */
56 bool transientPlan; /* redo plan when TransactionXmin changes? */
58 bool dependsOnRole; /* is plan specific to current role? */
60 bool parallelModeNeeded; /* parallel mode required to execute? */
62 int jitFlags; /* which forms of JIT should be performed */
64 struct Plan *planTree; /* tree of Plan nodes */
66 List *rtable; /* list of RangeTblEntry nodes */
68 /* rtable indexes of target relations for INSERT/UPDATE/DELETE */
69 List *resultRelations; /* integer list of RT indexes, or NIL */
71 List *appendRelations; /* list of AppendRelInfo nodes */
73 List *subplans; /* Plan trees for SubPlan expressions; note
74 * that some could be NULL */
76 Bitmapset *rewindPlanIDs; /* indices of subplans that require REWIND */
78 List *rowMarks; /* a list of PlanRowMark's */
80 List *relationOids; /* OIDs of relations the plan depends on */
82 List *invalItems; /* other dependencies, as PlanInvalItems */
84 List *paramExecTypes; /* type OIDs for PARAM_EXEC Params */
86 Node *utilityStmt; /* non-null if this is utility stmt */
88 /* statement location in source string (copied from Query) */
89 int stmt_location; /* start location, or -1 if unknown */
90 int stmt_len; /* length in bytes; 0 means "rest of string" */
91 } PlannedStmt;
93 /* macro for fetching the Plan associated with a SubPlan node */
94 #define exec_subplan_get_plan(plannedstmt, subplan) \
95 ((Plan *) list_nth((plannedstmt)->subplans, (subplan)->plan_id - 1))
98 /* ----------------
99 * Plan node
101 * All plan nodes "derive" from the Plan structure by having the
102 * Plan structure as the first field. This ensures that everything works
103 * when nodes are cast to Plan's. (node pointers are frequently cast to Plan*
104 * when passed around generically in the executor)
106 * We never actually instantiate any Plan nodes; this is just the common
107 * abstract superclass for all Plan-type nodes.
108 * ----------------
110 typedef struct Plan
112 NodeTag type;
115 * estimated execution costs for plan (see costsize.c for more info)
117 Cost startup_cost; /* cost expended before fetching any tuples */
118 Cost total_cost; /* total cost (assuming all tuples fetched) */
121 * planner's estimate of result size of this plan step
123 Cardinality plan_rows; /* number of rows plan is expected to emit */
124 int plan_width; /* average row width in bytes */
127 * information needed for parallel query
129 bool parallel_aware; /* engage parallel-aware logic? */
130 bool parallel_safe; /* OK to use as part of parallel plan? */
133 * information needed for asynchronous execution
135 bool async_capable; /* engage asynchronous-capable logic? */
138 * Common structural data for all Plan types.
140 int plan_node_id; /* unique across entire final plan tree */
141 List *targetlist; /* target list to be computed at this node */
142 List *qual; /* implicitly-ANDed qual conditions */
143 struct Plan *lefttree; /* input plan tree(s) */
144 struct Plan *righttree;
145 List *initPlan; /* Init Plan nodes (un-correlated expr
146 * subselects) */
149 * Information for management of parameter-change-driven rescanning
151 * extParam includes the paramIDs of all external PARAM_EXEC params
152 * affecting this plan node or its children. setParam params from the
153 * node's initPlans are not included, but their extParams are.
155 * allParam includes all the extParam paramIDs, plus the IDs of local
156 * params that affect the node (i.e., the setParams of its initplans).
157 * These are _all_ the PARAM_EXEC params that affect this node.
159 Bitmapset *extParam;
160 Bitmapset *allParam;
161 } Plan;
163 /* ----------------
164 * these are defined to avoid confusion problems with "left"
165 * and "right" and "inner" and "outer". The convention is that
166 * the "left" plan is the "outer" plan and the "right" plan is
167 * the inner plan, but these make the code more readable.
168 * ----------------
170 #define innerPlan(node) (((Plan *)(node))->righttree)
171 #define outerPlan(node) (((Plan *)(node))->lefttree)
174 /* ----------------
175 * Result node -
176 * If no outer plan, evaluate a variable-free targetlist.
177 * If outer plan, return tuples from outer plan (after a level of
178 * projection as shown by targetlist).
180 * If resconstantqual isn't NULL, it represents a one-time qualification
181 * test (i.e., one that doesn't depend on any variables from the outer plan,
182 * so needs to be evaluated only once).
183 * ----------------
185 typedef struct Result
187 Plan plan;
188 Node *resconstantqual;
189 } Result;
191 /* ----------------
192 * ProjectSet node -
193 * Apply a projection that includes set-returning functions to the
194 * output tuples of the outer plan.
195 * ----------------
197 typedef struct ProjectSet
199 Plan plan;
200 } ProjectSet;
202 /* ----------------
203 * ModifyTable node -
204 * Apply rows produced by outer plan to result table(s),
205 * by inserting, updating, or deleting.
207 * If the originally named target table is a partitioned table, both
208 * nominalRelation and rootRelation contain the RT index of the partition
209 * root, which is not otherwise mentioned in the plan. Otherwise rootRelation
210 * is zero. However, nominalRelation will always be set, as it's the rel that
211 * EXPLAIN should claim is the INSERT/UPDATE/DELETE target.
213 * Note that rowMarks and epqParam are presumed to be valid for all the
214 * table(s); they can't contain any info that varies across tables.
215 * ----------------
217 typedef struct ModifyTable
219 Plan plan;
220 CmdType operation; /* INSERT, UPDATE, or DELETE */
221 bool canSetTag; /* do we set the command tag/es_processed? */
222 Index nominalRelation; /* Parent RT index for use of EXPLAIN */
223 Index rootRelation; /* Root RT index, if target is partitioned */
224 bool partColsUpdated; /* some part key in hierarchy updated? */
225 List *resultRelations; /* integer list of RT indexes */
226 List *updateColnosLists; /* per-target-table update_colnos lists */
227 List *withCheckOptionLists; /* per-target-table WCO lists */
228 List *returningLists; /* per-target-table RETURNING tlists */
229 List *fdwPrivLists; /* per-target-table FDW private data lists */
230 Bitmapset *fdwDirectModifyPlans; /* indices of FDW DM plans */
231 List *rowMarks; /* PlanRowMarks (non-locking only) */
232 int epqParam; /* ID of Param for EvalPlanQual re-eval */
233 OnConflictAction onConflictAction; /* ON CONFLICT action */
234 List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
235 List *onConflictSet; /* INSERT ON CONFLICT DO UPDATE targetlist */
236 List *onConflictCols; /* target column numbers for onConflictSet */
237 Node *onConflictWhere; /* WHERE for ON CONFLICT UPDATE */
238 Index exclRelRTI; /* RTI of the EXCLUDED pseudo relation */
239 List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
240 } ModifyTable;
242 struct PartitionPruneInfo; /* forward reference to struct below */
244 /* ----------------
245 * Append node -
246 * Generate the concatenation of the results of sub-plans.
247 * ----------------
249 typedef struct Append
251 Plan plan;
252 Bitmapset *apprelids; /* RTIs of appendrel(s) formed by this node */
253 List *appendplans;
254 int nasyncplans; /* # of asynchronous plans */
257 * All 'appendplans' preceding this index are non-partial plans. All
258 * 'appendplans' from this index onwards are partial plans.
260 int first_partial_plan;
262 /* Info for run-time subplan pruning; NULL if we're not doing that */
263 struct PartitionPruneInfo *part_prune_info;
264 } Append;
266 /* ----------------
267 * MergeAppend node -
268 * Merge the results of pre-sorted sub-plans to preserve the ordering.
269 * ----------------
271 typedef struct MergeAppend
273 Plan plan;
274 Bitmapset *apprelids; /* RTIs of appendrel(s) formed by this node */
275 List *mergeplans;
276 /* these fields are just like the sort-key info in struct Sort: */
277 int numCols; /* number of sort-key columns */
278 AttrNumber *sortColIdx; /* their indexes in the target list */
279 Oid *sortOperators; /* OIDs of operators to sort them by */
280 Oid *collations; /* OIDs of collations */
281 bool *nullsFirst; /* NULLS FIRST/LAST directions */
282 /* Info for run-time subplan pruning; NULL if we're not doing that */
283 struct PartitionPruneInfo *part_prune_info;
284 } MergeAppend;
286 /* ----------------
287 * RecursiveUnion node -
288 * Generate a recursive union of two subplans.
290 * The "outer" subplan is always the non-recursive term, and the "inner"
291 * subplan is the recursive term.
292 * ----------------
294 typedef struct RecursiveUnion
296 Plan plan;
297 int wtParam; /* ID of Param representing work table */
298 /* Remaining fields are zero/null in UNION ALL case */
299 int numCols; /* number of columns to check for
300 * duplicate-ness */
301 AttrNumber *dupColIdx; /* their indexes in the target list */
302 Oid *dupOperators; /* equality operators to compare with */
303 Oid *dupCollations;
304 long numGroups; /* estimated number of groups in input */
305 } RecursiveUnion;
307 /* ----------------
308 * BitmapAnd node -
309 * Generate the intersection of the results of sub-plans.
311 * The subplans must be of types that yield tuple bitmaps. The targetlist
312 * and qual fields of the plan are unused and are always NIL.
313 * ----------------
315 typedef struct BitmapAnd
317 Plan plan;
318 List *bitmapplans;
319 } BitmapAnd;
321 /* ----------------
322 * BitmapOr node -
323 * Generate the union of the results of sub-plans.
325 * The subplans must be of types that yield tuple bitmaps. The targetlist
326 * and qual fields of the plan are unused and are always NIL.
327 * ----------------
329 typedef struct BitmapOr
331 Plan plan;
332 bool isshared;
333 List *bitmapplans;
334 } BitmapOr;
337 * ==========
338 * Scan nodes
339 * ==========
341 typedef struct Scan
343 Plan plan;
344 Index scanrelid; /* relid is index into the range table */
345 } Scan;
347 /* ----------------
348 * sequential scan node
349 * ----------------
351 typedef struct SeqScan
353 Scan scan;
354 } SeqScan;
356 /* ----------------
357 * table sample scan node
358 * ----------------
360 typedef struct SampleScan
362 Scan scan;
363 /* use struct pointer to avoid including parsenodes.h here */
364 struct TableSampleClause *tablesample;
365 } SampleScan;
367 /* ----------------
368 * index scan node
370 * indexqualorig is an implicitly-ANDed list of index qual expressions, each
371 * in the same form it appeared in the query WHERE condition. Each should
372 * be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
373 * The indexkey is a Var or expression referencing column(s) of the index's
374 * base table. The comparisonval might be any expression, but it won't use
375 * any columns of the base table. The expressions are ordered by index
376 * column position (but items referencing the same index column can appear
377 * in any order). indexqualorig is used at runtime only if we have to recheck
378 * a lossy indexqual.
380 * indexqual has the same form, but the expressions have been commuted if
381 * necessary to put the indexkeys on the left, and the indexkeys are replaced
382 * by Var nodes identifying the index columns (their varno is INDEX_VAR and
383 * their varattno is the index column number).
385 * indexorderbyorig is similarly the original form of any ORDER BY expressions
386 * that are being implemented by the index, while indexorderby is modified to
387 * have index column Vars on the left-hand side. Here, multiple expressions
388 * must appear in exactly the ORDER BY order, and this is not necessarily the
389 * index column order. Only the expressions are provided, not the auxiliary
390 * sort-order information from the ORDER BY SortGroupClauses; it's assumed
391 * that the sort ordering is fully determinable from the top-level operators.
392 * indexorderbyorig is used at runtime to recheck the ordering, if the index
393 * cannot calculate an accurate ordering. It is also needed for EXPLAIN.
395 * indexorderbyops is a list of the OIDs of the operators used to sort the
396 * ORDER BY expressions. This is used together with indexorderbyorig to
397 * recheck ordering at run time. (Note that indexorderby, indexorderbyorig,
398 * and indexorderbyops are used for amcanorderbyop cases, not amcanorder.)
400 * indexorderdir specifies the scan ordering, for indexscans on amcanorder
401 * indexes (for other indexes it should be "don't care").
402 * ----------------
404 typedef struct IndexScan
406 Scan scan;
407 Oid indexid; /* OID of index to scan */
408 List *indexqual; /* list of index quals (usually OpExprs) */
409 List *indexqualorig; /* the same in original form */
410 List *indexorderby; /* list of index ORDER BY exprs */
411 List *indexorderbyorig; /* the same in original form */
412 List *indexorderbyops; /* OIDs of sort ops for ORDER BY exprs */
413 ScanDirection indexorderdir; /* forward or backward or don't care */
414 } IndexScan;
416 /* ----------------
417 * index-only scan node
419 * IndexOnlyScan is very similar to IndexScan, but it specifies an
420 * index-only scan, in which the data comes from the index not the heap.
421 * Because of this, *all* Vars in the plan node's targetlist, qual, and
422 * index expressions reference index columns and have varno = INDEX_VAR.
424 * We could almost use indexqual directly against the index's output tuple
425 * when rechecking lossy index operators, but that won't work for quals on
426 * index columns that are not retrievable. Hence, recheckqual is needed
427 * for rechecks: it expresses the same condition as indexqual, but using
428 * only index columns that are retrievable. (We will not generate an
429 * index-only scan if this is not possible. An example is that if an
430 * index has table column "x" in a retrievable index column "ind1", plus
431 * an expression f(x) in a non-retrievable column "ind2", an indexable
432 * query on f(x) will use "ind2" in indexqual and f(ind1) in recheckqual.
433 * Without the "ind1" column, an index-only scan would be disallowed.)
435 * We don't currently need a recheckable equivalent of indexorderby,
436 * because we don't support lossy operators in index ORDER BY.
438 * To help EXPLAIN interpret the index Vars for display, we provide
439 * indextlist, which represents the contents of the index as a targetlist
440 * with one TLE per index column. Vars appearing in this list reference
441 * the base table, and this is the only field in the plan node that may
442 * contain such Vars. Also, for the convenience of setrefs.c, TLEs in
443 * indextlist are marked as resjunk if they correspond to columns that
444 * the index AM cannot reconstruct.
445 * ----------------
447 typedef struct IndexOnlyScan
449 Scan scan;
450 Oid indexid; /* OID of index to scan */
451 List *indexqual; /* list of index quals (usually OpExprs) */
452 List *recheckqual; /* index quals in recheckable form */
453 List *indexorderby; /* list of index ORDER BY exprs */
454 List *indextlist; /* TargetEntry list describing index's cols */
455 ScanDirection indexorderdir; /* forward or backward or don't care */
456 } IndexOnlyScan;
458 /* ----------------
459 * bitmap index scan node
461 * BitmapIndexScan delivers a bitmap of potential tuple locations;
462 * it does not access the heap itself. The bitmap is used by an
463 * ancestor BitmapHeapScan node, possibly after passing through
464 * intermediate BitmapAnd and/or BitmapOr nodes to combine it with
465 * the results of other BitmapIndexScans.
467 * The fields have the same meanings as for IndexScan, except we don't
468 * store a direction flag because direction is uninteresting.
470 * In a BitmapIndexScan plan node, the targetlist and qual fields are
471 * not used and are always NIL. The indexqualorig field is unused at
472 * run time too, but is saved for the benefit of EXPLAIN.
473 * ----------------
475 typedef struct BitmapIndexScan
477 Scan scan;
478 Oid indexid; /* OID of index to scan */
479 bool isshared; /* Create shared bitmap if set */
480 List *indexqual; /* list of index quals (OpExprs) */
481 List *indexqualorig; /* the same in original form */
482 } BitmapIndexScan;
484 /* ----------------
485 * bitmap sequential scan node
487 * This needs a copy of the qual conditions being used by the input index
488 * scans because there are various cases where we need to recheck the quals;
489 * for example, when the bitmap is lossy about the specific rows on a page
490 * that meet the index condition.
491 * ----------------
493 typedef struct BitmapHeapScan
495 Scan scan;
496 List *bitmapqualorig; /* index quals, in standard expr form */
497 } BitmapHeapScan;
499 /* ----------------
500 * tid scan node
502 * tidquals is an implicitly OR'ed list of qual expressions of the form
503 * "CTID = pseudoconstant", or "CTID = ANY(pseudoconstant_array)",
504 * or a CurrentOfExpr for the relation.
505 * ----------------
507 typedef struct TidScan
509 Scan scan;
510 List *tidquals; /* qual(s) involving CTID = something */
511 } TidScan;
513 /* ----------------
514 * tid range scan node
516 * tidrangequals is an implicitly AND'ed list of qual expressions of the form
517 * "CTID relop pseudoconstant", where relop is one of >,>=,<,<=.
518 * ----------------
520 typedef struct TidRangeScan
522 Scan scan;
523 List *tidrangequals; /* qual(s) involving CTID op something */
524 } TidRangeScan;
526 /* ----------------
527 * subquery scan node
529 * SubqueryScan is for scanning the output of a sub-query in the range table.
530 * We often need an extra plan node above the sub-query's plan to perform
531 * expression evaluations (which we can't push into the sub-query without
532 * risking changing its semantics). Although we are not scanning a physical
533 * relation, we make this a descendant of Scan anyway for code-sharing
534 * purposes.
536 * Note: we store the sub-plan in the type-specific subplan field, not in
537 * the generic lefttree field as you might expect. This is because we do
538 * not want plan-tree-traversal routines to recurse into the subplan without
539 * knowing that they are changing Query contexts.
540 * ----------------
542 typedef struct SubqueryScan
544 Scan scan;
545 Plan *subplan;
546 } SubqueryScan;
548 /* ----------------
549 * FunctionScan node
550 * ----------------
552 typedef struct FunctionScan
554 Scan scan;
555 List *functions; /* list of RangeTblFunction nodes */
556 bool funcordinality; /* WITH ORDINALITY */
557 } FunctionScan;
559 /* ----------------
560 * ValuesScan node
561 * ----------------
563 typedef struct ValuesScan
565 Scan scan;
566 List *values_lists; /* list of expression lists */
567 } ValuesScan;
569 /* ----------------
570 * TableFunc scan node
571 * ----------------
573 typedef struct TableFuncScan
575 Scan scan;
576 TableFunc *tablefunc; /* table function node */
577 } TableFuncScan;
579 /* ----------------
580 * CteScan node
581 * ----------------
583 typedef struct CteScan
585 Scan scan;
586 int ctePlanId; /* ID of init SubPlan for CTE */
587 int cteParam; /* ID of Param representing CTE output */
588 } CteScan;
590 /* ----------------
591 * NamedTuplestoreScan node
592 * ----------------
594 typedef struct NamedTuplestoreScan
596 Scan scan;
597 char *enrname; /* Name given to Ephemeral Named Relation */
598 } NamedTuplestoreScan;
600 /* ----------------
601 * WorkTableScan node
602 * ----------------
604 typedef struct WorkTableScan
606 Scan scan;
607 int wtParam; /* ID of Param representing work table */
608 } WorkTableScan;
610 /* ----------------
611 * ForeignScan node
613 * fdw_exprs and fdw_private are both under the control of the foreign-data
614 * wrapper, but fdw_exprs is presumed to contain expression trees and will
615 * be post-processed accordingly by the planner; fdw_private won't be.
616 * Note that everything in both lists must be copiable by copyObject().
617 * One way to store an arbitrary blob of bytes is to represent it as a bytea
618 * Const. Usually, though, you'll be better off choosing a representation
619 * that can be dumped usefully by nodeToString().
621 * fdw_scan_tlist is a targetlist describing the contents of the scan tuple
622 * returned by the FDW; it can be NIL if the scan tuple matches the declared
623 * rowtype of the foreign table, which is the normal case for a simple foreign
624 * table scan. (If the plan node represents a foreign join, fdw_scan_tlist
625 * is required since there is no rowtype available from the system catalogs.)
626 * When fdw_scan_tlist is provided, Vars in the node's tlist and quals must
627 * have varno INDEX_VAR, and their varattnos correspond to resnos in the
628 * fdw_scan_tlist (which are also column numbers in the actual scan tuple).
629 * fdw_scan_tlist is never actually executed; it just holds expression trees
630 * describing what is in the scan tuple's columns.
632 * fdw_recheck_quals should contain any quals which the core system passed to
633 * the FDW but which were not added to scan.plan.qual; that is, it should
634 * contain the quals being checked remotely. This is needed for correct
635 * behavior during EvalPlanQual rechecks.
637 * When the plan node represents a foreign join, scan.scanrelid is zero and
638 * fs_relids must be consulted to identify the join relation. (fs_relids
639 * is valid for simple scans as well, but will always match scan.scanrelid.)
641 * If the FDW's PlanDirectModify() callback decides to repurpose a ForeignScan
642 * node to perform the UPDATE or DELETE operation directly in the remote
643 * server, it sets 'operation' and 'resultRelation' to identify the operation
644 * type and target relation. Note that these fields are only set if the
645 * modification is performed *fully* remotely; otherwise, the modification is
646 * driven by a local ModifyTable node and 'operation' is left to CMD_SELECT.
647 * ----------------
649 typedef struct ForeignScan
651 Scan scan;
652 CmdType operation; /* SELECT/INSERT/UPDATE/DELETE */
653 Index resultRelation; /* direct modification target's RT index */
654 Oid fs_server; /* OID of foreign server */
655 List *fdw_exprs; /* expressions that FDW may evaluate */
656 List *fdw_private; /* private data for FDW */
657 List *fdw_scan_tlist; /* optional tlist describing scan tuple */
658 List *fdw_recheck_quals; /* original quals not in scan.plan.qual */
659 Bitmapset *fs_relids; /* RTIs generated by this scan */
660 bool fsSystemCol; /* true if any "system column" is needed */
661 } ForeignScan;
663 /* ----------------
664 * CustomScan node
666 * The comments for ForeignScan's fdw_exprs, fdw_private, fdw_scan_tlist,
667 * and fs_relids fields apply equally to CustomScan's custom_exprs,
668 * custom_private, custom_scan_tlist, and custom_relids fields. The
669 * convention of setting scan.scanrelid to zero for joins applies as well.
671 * Note that since Plan trees can be copied, custom scan providers *must*
672 * fit all plan data they need into those fields; embedding CustomScan in
673 * a larger struct will not work.
674 * ----------------
676 struct CustomScanMethods;
678 typedef struct CustomScan
680 Scan scan;
681 uint32 flags; /* mask of CUSTOMPATH_* flags, see
682 * nodes/extensible.h */
683 List *custom_plans; /* list of Plan nodes, if any */
684 List *custom_exprs; /* expressions that custom code may evaluate */
685 List *custom_private; /* private data for custom code */
686 List *custom_scan_tlist; /* optional tlist describing scan tuple */
687 Bitmapset *custom_relids; /* RTIs generated by this scan */
688 const struct CustomScanMethods *methods;
689 } CustomScan;
692 * ==========
693 * Join nodes
694 * ==========
697 /* ----------------
698 * Join node
700 * jointype: rule for joining tuples from left and right subtrees
701 * inner_unique each outer tuple can match to no more than one inner tuple
702 * joinqual: qual conditions that came from JOIN/ON or JOIN/USING
703 * (plan.qual contains conditions that came from WHERE)
705 * When jointype is INNER, joinqual and plan.qual are semantically
706 * interchangeable. For OUTER jointypes, the two are *not* interchangeable;
707 * only joinqual is used to determine whether a match has been found for
708 * the purpose of deciding whether to generate null-extended tuples.
709 * (But plan.qual is still applied before actually returning a tuple.)
710 * For an outer join, only joinquals are allowed to be used as the merge
711 * or hash condition of a merge or hash join.
713 * inner_unique is set if the joinquals are such that no more than one inner
714 * tuple could match any given outer tuple. This allows the executor to
715 * skip searching for additional matches. (This must be provable from just
716 * the joinquals, ignoring plan.qual, due to where the executor tests it.)
717 * ----------------
719 typedef struct Join
721 Plan plan;
722 JoinType jointype;
723 bool inner_unique;
724 List *joinqual; /* JOIN quals (in addition to plan.qual) */
725 } Join;
727 /* ----------------
728 * nest loop join node
730 * The nestParams list identifies any executor Params that must be passed
731 * into execution of the inner subplan carrying values from the current row
732 * of the outer subplan. Currently we restrict these values to be simple
733 * Vars, but perhaps someday that'd be worth relaxing. (Note: during plan
734 * creation, the paramval can actually be a PlaceHolderVar expression; but it
735 * must be a Var with varno OUTER_VAR by the time it gets to the executor.)
736 * ----------------
738 typedef struct NestLoop
740 Join join;
741 List *nestParams; /* list of NestLoopParam nodes */
742 } NestLoop;
744 typedef struct NestLoopParam
746 NodeTag type;
747 int paramno; /* number of the PARAM_EXEC Param to set */
748 Var *paramval; /* outer-relation Var to assign to Param */
749 } NestLoopParam;
751 /* ----------------
752 * merge join node
754 * The expected ordering of each mergeable column is described by a btree
755 * opfamily OID, a collation OID, a direction (BTLessStrategyNumber or
756 * BTGreaterStrategyNumber) and a nulls-first flag. Note that the two sides
757 * of each mergeclause may be of different datatypes, but they are ordered the
758 * same way according to the common opfamily and collation. The operator in
759 * each mergeclause must be an equality operator of the indicated opfamily.
760 * ----------------
762 typedef struct MergeJoin
764 Join join;
765 bool skip_mark_restore; /* Can we skip mark/restore calls? */
766 List *mergeclauses; /* mergeclauses as expression trees */
767 /* these are arrays, but have the same length as the mergeclauses list: */
768 Oid *mergeFamilies; /* per-clause OIDs of btree opfamilies */
769 Oid *mergeCollations; /* per-clause OIDs of collations */
770 int *mergeStrategies; /* per-clause ordering (ASC or DESC) */
771 bool *mergeNullsFirst; /* per-clause nulls ordering */
772 } MergeJoin;
774 /* ----------------
775 * hash join node
776 * ----------------
778 typedef struct HashJoin
780 Join join;
781 List *hashclauses;
782 List *hashoperators;
783 List *hashcollations;
786 * List of expressions to be hashed for tuples from the outer plan, to
787 * perform lookups in the hashtable over the inner plan.
789 List *hashkeys;
790 } HashJoin;
792 /* ----------------
793 * materialization node
794 * ----------------
796 typedef struct Material
798 Plan plan;
799 } Material;
801 /* ----------------
802 * memoize node
803 * ----------------
805 typedef struct Memoize
807 Plan plan;
809 int numKeys; /* size of the two arrays below */
811 Oid *hashOperators; /* hash operators for each key */
812 Oid *collations; /* cache keys */
813 List *param_exprs; /* exprs containing parameters */
814 bool singlerow; /* true if the cache entry should be marked as
815 * complete after we store the first tuple in
816 * it. */
817 bool binary_mode; /* true when cache key should be compared bit
818 * by bit, false when using hash equality ops */
819 uint32 est_entries; /* The maximum number of entries that the
820 * planner expects will fit in the cache, or 0
821 * if unknown */
822 Bitmapset *keyparamids; /* paramids from param_exprs */
823 } Memoize;
825 /* ----------------
826 * sort node
827 * ----------------
829 typedef struct Sort
831 Plan plan;
832 int numCols; /* number of sort-key columns */
833 AttrNumber *sortColIdx; /* their indexes in the target list */
834 Oid *sortOperators; /* OIDs of operators to sort them by */
835 Oid *collations; /* OIDs of collations */
836 bool *nullsFirst; /* NULLS FIRST/LAST directions */
837 } Sort;
839 /* ----------------
840 * incremental sort node
841 * ----------------
843 typedef struct IncrementalSort
845 Sort sort;
846 int nPresortedCols; /* number of presorted columns */
847 } IncrementalSort;
849 /* ---------------
850 * group node -
851 * Used for queries with GROUP BY (but no aggregates) specified.
852 * The input must be presorted according to the grouping columns.
853 * ---------------
855 typedef struct Group
857 Plan plan;
858 int numCols; /* number of grouping columns */
859 AttrNumber *grpColIdx; /* their indexes in the target list */
860 Oid *grpOperators; /* equality operators to compare with */
861 Oid *grpCollations;
862 } Group;
864 /* ---------------
865 * aggregate node
867 * An Agg node implements plain or grouped aggregation. For grouped
868 * aggregation, we can work with presorted input or unsorted input;
869 * the latter strategy uses an internal hashtable.
871 * Notice the lack of any direct info about the aggregate functions to be
872 * computed. They are found by scanning the node's tlist and quals during
873 * executor startup. (It is possible that there are no aggregate functions;
874 * this could happen if they get optimized away by constant-folding, or if
875 * we are using the Agg node to implement hash-based grouping.)
876 * ---------------
878 typedef struct Agg
880 Plan plan;
881 AggStrategy aggstrategy; /* basic strategy, see nodes.h */
882 AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
883 int numCols; /* number of grouping columns */
884 AttrNumber *grpColIdx; /* their indexes in the target list */
885 Oid *grpOperators; /* equality operators to compare with */
886 Oid *grpCollations;
887 long numGroups; /* estimated number of groups in input */
888 uint64 transitionSpace; /* for pass-by-ref transition data */
889 Bitmapset *aggParams; /* IDs of Params used in Aggref inputs */
890 /* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
891 List *groupingSets; /* grouping sets to use */
892 List *chain; /* chained Agg/Sort nodes */
893 } Agg;
895 /* ----------------
896 * window aggregate node
897 * ----------------
899 typedef struct WindowAgg
901 Plan plan;
902 Index winref; /* ID referenced by window functions */
903 int partNumCols; /* number of columns in partition clause */
904 AttrNumber *partColIdx; /* their indexes in the target list */
905 Oid *partOperators; /* equality operators for partition columns */
906 Oid *partCollations; /* collations for partition columns */
907 int ordNumCols; /* number of columns in ordering clause */
908 AttrNumber *ordColIdx; /* their indexes in the target list */
909 Oid *ordOperators; /* equality operators for ordering columns */
910 Oid *ordCollations; /* collations for ordering columns */
911 int frameOptions; /* frame_clause options, see WindowDef */
912 Node *startOffset; /* expression for starting bound, if any */
913 Node *endOffset; /* expression for ending bound, if any */
914 /* these fields are used with RANGE offset PRECEDING/FOLLOWING: */
915 Oid startInRangeFunc; /* in_range function for startOffset */
916 Oid endInRangeFunc; /* in_range function for endOffset */
917 Oid inRangeColl; /* collation for in_range tests */
918 bool inRangeAsc; /* use ASC sort order for in_range tests? */
919 bool inRangeNullsFirst; /* nulls sort first for in_range tests? */
920 } WindowAgg;
922 /* ----------------
923 * unique node
924 * ----------------
926 typedef struct Unique
928 Plan plan;
929 int numCols; /* number of columns to check for uniqueness */
930 AttrNumber *uniqColIdx; /* their indexes in the target list */
931 Oid *uniqOperators; /* equality operators to compare with */
932 Oid *uniqCollations; /* collations for equality comparisons */
933 } Unique;
935 /* ------------
936 * gather node
938 * Note: rescan_param is the ID of a PARAM_EXEC parameter slot. That slot
939 * will never actually contain a value, but the Gather node must flag it as
940 * having changed whenever it is rescanned. The child parallel-aware scan
941 * nodes are marked as depending on that parameter, so that the rescan
942 * machinery is aware that their output is likely to change across rescans.
943 * In some cases we don't need a rescan Param, so rescan_param is set to -1.
944 * ------------
946 typedef struct Gather
948 Plan plan;
949 int num_workers; /* planned number of worker processes */
950 int rescan_param; /* ID of Param that signals a rescan, or -1 */
951 bool single_copy; /* don't execute plan more than once */
952 bool invisible; /* suppress EXPLAIN display (for testing)? */
953 Bitmapset *initParam; /* param id's of initplans which are referred
954 * at gather or one of it's child node */
955 } Gather;
957 /* ------------
958 * gather merge node
959 * ------------
961 typedef struct GatherMerge
963 Plan plan;
964 int num_workers; /* planned number of worker processes */
965 int rescan_param; /* ID of Param that signals a rescan, or -1 */
966 /* remaining fields are just like the sort-key info in struct Sort */
967 int numCols; /* number of sort-key columns */
968 AttrNumber *sortColIdx; /* their indexes in the target list */
969 Oid *sortOperators; /* OIDs of operators to sort them by */
970 Oid *collations; /* OIDs of collations */
971 bool *nullsFirst; /* NULLS FIRST/LAST directions */
972 Bitmapset *initParam; /* param id's of initplans which are referred
973 * at gather merge or one of it's child node */
974 } GatherMerge;
976 /* ----------------
977 * hash build node
979 * If the executor is supposed to try to apply skew join optimization, then
980 * skewTable/skewColumn/skewInherit identify the outer relation's join key
981 * column, from which the relevant MCV statistics can be fetched.
982 * ----------------
984 typedef struct Hash
986 Plan plan;
989 * List of expressions to be hashed for tuples from Hash's outer plan,
990 * needed to put them into the hashtable.
992 List *hashkeys; /* hash keys for the hashjoin condition */
993 Oid skewTable; /* outer join key's table OID, or InvalidOid */
994 AttrNumber skewColumn; /* outer join key's column #, or zero */
995 bool skewInherit; /* is outer join rel an inheritance tree? */
996 /* all other info is in the parent HashJoin node */
997 Cardinality rows_total; /* estimate total rows if parallel_aware */
998 } Hash;
1000 /* ----------------
1001 * setop node
1002 * ----------------
1004 typedef struct SetOp
1006 Plan plan;
1007 SetOpCmd cmd; /* what to do, see nodes.h */
1008 SetOpStrategy strategy; /* how to do it, see nodes.h */
1009 int numCols; /* number of columns to check for
1010 * duplicate-ness */
1011 AttrNumber *dupColIdx; /* their indexes in the target list */
1012 Oid *dupOperators; /* equality operators to compare with */
1013 Oid *dupCollations;
1014 AttrNumber flagColIdx; /* where is the flag column, if any */
1015 int firstFlag; /* flag value for first input relation */
1016 long numGroups; /* estimated number of groups in input */
1017 } SetOp;
1019 /* ----------------
1020 * lock-rows node
1022 * rowMarks identifies the rels to be locked by this node; it should be
1023 * a subset of the rowMarks listed in the top-level PlannedStmt.
1024 * epqParam is a Param that all scan nodes below this one must depend on.
1025 * It is used to force re-evaluation of the plan during EvalPlanQual.
1026 * ----------------
1028 typedef struct LockRows
1030 Plan plan;
1031 List *rowMarks; /* a list of PlanRowMark's */
1032 int epqParam; /* ID of Param for EvalPlanQual re-eval */
1033 } LockRows;
1035 /* ----------------
1036 * limit node
1038 * Note: as of Postgres 8.2, the offset and count expressions are expected
1039 * to yield int8, rather than int4 as before.
1040 * ----------------
1042 typedef struct Limit
1044 Plan plan;
1045 Node *limitOffset; /* OFFSET parameter, or NULL if none */
1046 Node *limitCount; /* COUNT parameter, or NULL if none */
1047 LimitOption limitOption; /* limit type */
1048 int uniqNumCols; /* number of columns to check for similarity */
1049 AttrNumber *uniqColIdx; /* their indexes in the target list */
1050 Oid *uniqOperators; /* equality operators to compare with */
1051 Oid *uniqCollations; /* collations for equality comparisons */
1052 } Limit;
1056 * RowMarkType -
1057 * enums for types of row-marking operations
1059 * The first four of these values represent different lock strengths that
1060 * we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
1061 * We support these on regular tables, as well as on foreign tables whose FDWs
1062 * report support for late locking. For other foreign tables, any locking
1063 * that might be done for such requests must happen during the initial row
1064 * fetch; their FDWs provide no mechanism for going back to lock a row later.
1065 * This means that the semantics will be a bit different than for a local
1066 * table; in particular we are likely to lock more rows than would be locked
1067 * locally, since remote rows will be locked even if they then fail
1068 * locally-checked restriction or join quals. However, the prospect of
1069 * doing a separate remote query to lock each selected row is usually pretty
1070 * unappealing, so early locking remains a credible design choice for FDWs.
1072 * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
1073 * identify all the source rows, not only those from the target relations, so
1074 * that we can perform EvalPlanQual rechecking at need. For plain tables we
1075 * can just fetch the TID, much as for a target relation; this case is
1076 * represented by ROW_MARK_REFERENCE. Otherwise (for example for VALUES or
1077 * FUNCTION scans) we have to copy the whole row value. ROW_MARK_COPY is
1078 * pretty inefficient, since most of the time we'll never need the data; but
1079 * fortunately the overhead is usually not performance-critical in practice.
1080 * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
1081 * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
1082 * (Again, this probably doesn't make sense if a physical remote fetch is
1083 * needed, but for FDWs that map to local storage it might be credible.)
1085 typedef enum RowMarkType
1087 ROW_MARK_EXCLUSIVE, /* obtain exclusive tuple lock */
1088 ROW_MARK_NOKEYEXCLUSIVE, /* obtain no-key exclusive tuple lock */
1089 ROW_MARK_SHARE, /* obtain shared tuple lock */
1090 ROW_MARK_KEYSHARE, /* obtain keyshare tuple lock */
1091 ROW_MARK_REFERENCE, /* just fetch the TID, don't lock it */
1092 ROW_MARK_COPY /* physically copy the row value */
1093 } RowMarkType;
1095 #define RowMarkRequiresRowShareLock(marktype) ((marktype) <= ROW_MARK_KEYSHARE)
1098 * PlanRowMark -
1099 * plan-time representation of FOR [KEY] UPDATE/SHARE clauses
1101 * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
1102 * PlanRowMark node for each non-target relation in the query. Relations that
1103 * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
1104 * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
1106 * Initially all PlanRowMarks have rti == prti and isParent == false.
1107 * When the planner discovers that a relation is the root of an inheritance
1108 * tree, it sets isParent true, and adds an additional PlanRowMark to the
1109 * list for each child relation (including the target rel itself in its role
1110 * as a child, if it is not a partitioned table). Any non-leaf partitioned
1111 * child relations will also have entries with isParent = true. The child
1112 * entries have rti == child rel's RT index and prti == top parent's RT index,
1113 * and can therefore be recognized as children by the fact that prti != rti.
1114 * The parent's allMarkTypes field gets the OR of (1<<markType) across all
1115 * its children (this definition allows children to use different markTypes).
1117 * The planner also adds resjunk output columns to the plan that carry
1118 * information sufficient to identify the locked or fetched rows. When
1119 * markType != ROW_MARK_COPY, these columns are named
1120 * tableoid%u OID of table
1121 * ctid%u TID of row
1122 * The tableoid column is only present for an inheritance hierarchy.
1123 * When markType == ROW_MARK_COPY, there is instead a single column named
1124 * wholerow%u whole-row value of relation
1125 * (An inheritance hierarchy could have all three resjunk output columns,
1126 * if some children use a different markType than others.)
1127 * In all three cases, %u represents the rowmark ID number (rowmarkId).
1128 * This number is unique within a plan tree, except that child relation
1129 * entries copy their parent's rowmarkId. (Assigning unique numbers
1130 * means we needn't renumber rowmarkIds when flattening subqueries, which
1131 * would require finding and renaming the resjunk columns as well.)
1132 * Note this means that all tables in an inheritance hierarchy share the
1133 * same resjunk column names.
1135 typedef struct PlanRowMark
1137 NodeTag type;
1138 Index rti; /* range table index of markable relation */
1139 Index prti; /* range table index of parent relation */
1140 Index rowmarkId; /* unique identifier for resjunk columns */
1141 RowMarkType markType; /* see enum above */
1142 int allMarkTypes; /* OR of (1<<markType) for all children */
1143 LockClauseStrength strength; /* LockingClause's strength, or LCS_NONE */
1144 LockWaitPolicy waitPolicy; /* NOWAIT and SKIP LOCKED options */
1145 bool isParent; /* true if this is a "dummy" parent entry */
1146 } PlanRowMark;
1150 * Node types to represent partition pruning information.
1154 * PartitionPruneInfo - Details required to allow the executor to prune
1155 * partitions.
1157 * Here we store mapping details to allow translation of a partitioned table's
1158 * index as returned by the partition pruning code into subplan indexes for
1159 * plan types which support arbitrary numbers of subplans, such as Append.
1160 * We also store various details to tell the executor when it should be
1161 * performing partition pruning.
1163 * Each PartitionedRelPruneInfo describes the partitioning rules for a single
1164 * partitioned table (a/k/a level of partitioning). Since a partitioning
1165 * hierarchy could contain multiple levels, we represent it by a List of
1166 * PartitionedRelPruneInfos, where the first entry represents the topmost
1167 * partitioned table and additional entries represent non-leaf child
1168 * partitions, ordered such that parents appear before their children.
1169 * Then, since an Append-type node could have multiple partitioning
1170 * hierarchies among its children, we have an unordered List of those Lists.
1172 * prune_infos List of Lists containing PartitionedRelPruneInfo nodes,
1173 * one sublist per run-time-prunable partition hierarchy
1174 * appearing in the parent plan node's subplans.
1175 * other_subplans Indexes of any subplans that are not accounted for
1176 * by any of the PartitionedRelPruneInfo nodes in
1177 * "prune_infos". These subplans must not be pruned.
1179 typedef struct PartitionPruneInfo
1181 NodeTag type;
1182 List *prune_infos;
1183 Bitmapset *other_subplans;
1184 } PartitionPruneInfo;
1187 * PartitionedRelPruneInfo - Details required to allow the executor to prune
1188 * partitions for a single partitioned table.
1190 * subplan_map[] and subpart_map[] are indexed by partition index of the
1191 * partitioned table referenced by 'rtindex', the partition index being the
1192 * order that the partitions are defined in the table's PartitionDesc. For a
1193 * leaf partition p, subplan_map[p] contains the zero-based index of the
1194 * partition's subplan in the parent plan's subplan list; it is -1 if the
1195 * partition is non-leaf or has been pruned. For a non-leaf partition p,
1196 * subpart_map[p] contains the zero-based index of that sub-partition's
1197 * PartitionedRelPruneInfo in the hierarchy's PartitionedRelPruneInfo list;
1198 * it is -1 if the partition is a leaf or has been pruned. Note that subplan
1199 * indexes, as stored in 'subplan_map', are global across the parent plan
1200 * node, but partition indexes are valid only within a particular hierarchy.
1201 * relid_map[p] contains the partition's OID, or 0 if the partition was pruned.
1203 typedef struct PartitionedRelPruneInfo
1205 NodeTag type;
1206 Index rtindex; /* RT index of partition rel for this level */
1207 Bitmapset *present_parts; /* Indexes of all partitions which subplans or
1208 * subparts are present for */
1209 int nparts; /* Length of the following arrays: */
1210 int *subplan_map; /* subplan index by partition index, or -1 */
1211 int *subpart_map; /* subpart index by partition index, or -1 */
1212 Oid *relid_map; /* relation OID by partition index, or 0 */
1215 * initial_pruning_steps shows how to prune during executor startup (i.e.,
1216 * without use of any PARAM_EXEC Params); it is NIL if no startup pruning
1217 * is required. exec_pruning_steps shows how to prune with PARAM_EXEC
1218 * Params; it is NIL if no per-scan pruning is required.
1220 List *initial_pruning_steps; /* List of PartitionPruneStep */
1221 List *exec_pruning_steps; /* List of PartitionPruneStep */
1222 Bitmapset *execparamids; /* All PARAM_EXEC Param IDs in
1223 * exec_pruning_steps */
1224 } PartitionedRelPruneInfo;
1227 * Abstract Node type for partition pruning steps (there are no concrete
1228 * Nodes of this type).
1230 * step_id is the global identifier of the step within its pruning context.
1232 typedef struct PartitionPruneStep
1234 NodeTag type;
1235 int step_id;
1236 } PartitionPruneStep;
1239 * PartitionPruneStepOp - Information to prune using a set of mutually ANDed
1240 * OpExpr clauses
1242 * This contains information extracted from up to partnatts OpExpr clauses,
1243 * where partnatts is the number of partition key columns. 'opstrategy' is the
1244 * strategy of the operator in the clause matched to the last partition key.
1245 * 'exprs' contains expressions which comprise the lookup key to be passed to
1246 * the partition bound search function. 'cmpfns' contains the OIDs of
1247 * comparison functions used to compare aforementioned expressions with
1248 * partition bounds. Both 'exprs' and 'cmpfns' contain the same number of
1249 * items, up to partnatts items.
1251 * Once we find the offset of a partition bound using the lookup key, we
1252 * determine which partitions to include in the result based on the value of
1253 * 'opstrategy'. For example, if it were equality, we'd return just the
1254 * partition that would contain that key or a set of partitions if the key
1255 * didn't consist of all partitioning columns. For non-equality strategies,
1256 * we'd need to include other partitions as appropriate.
1258 * 'nullkeys' is the set containing the offset of the partition keys (0 to
1259 * partnatts - 1) that were matched to an IS NULL clause. This is only
1260 * considered for hash partitioning as we need to pass which keys are null
1261 * to the hash partition bound search function. It is never possible to
1262 * have an expression be present in 'exprs' for a given partition key and
1263 * the corresponding bit set in 'nullkeys'.
1265 typedef struct PartitionPruneStepOp
1267 PartitionPruneStep step;
1269 StrategyNumber opstrategy;
1270 List *exprs;
1271 List *cmpfns;
1272 Bitmapset *nullkeys;
1273 } PartitionPruneStepOp;
1276 * PartitionPruneStepCombine - Information to prune using a BoolExpr clause
1278 * For BoolExpr clauses, we combine the set of partitions determined for each
1279 * of the argument clauses.
1281 typedef enum PartitionPruneCombineOp
1283 PARTPRUNE_COMBINE_UNION,
1284 PARTPRUNE_COMBINE_INTERSECT
1285 } PartitionPruneCombineOp;
1287 typedef struct PartitionPruneStepCombine
1289 PartitionPruneStep step;
1291 PartitionPruneCombineOp combineOp;
1292 List *source_stepids;
1293 } PartitionPruneStepCombine;
1297 * Plan invalidation info
1299 * We track the objects on which a PlannedStmt depends in two ways:
1300 * relations are recorded as a simple list of OIDs, and everything else
1301 * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
1302 * to be used with the syscache invalidation mechanism, so it identifies a
1303 * system catalog entry by cache ID and hash value.
1305 typedef struct PlanInvalItem
1307 NodeTag type;
1308 int cacheId; /* a syscache ID, see utils/syscache.h */
1309 uint32 hashValue; /* hash value of object's cache lookup key */
1310 } PlanInvalItem;
1312 #endif /* PLANNODES_H */