PostgreSQL Source Code git master
planner.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * planner.c
4 * The query optimizer external interface.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/plan/planner.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/genam.h"
22#include "access/parallel.h"
23#include "access/sysattr.h"
24#include "access/table.h"
26#include "catalog/pg_inherits.h"
27#include "catalog/pg_proc.h"
28#include "catalog/pg_type.h"
29#include "executor/executor.h"
30#include "foreign/fdwapi.h"
31#include "jit/jit.h"
32#include "lib/bipartite_match.h"
33#include "lib/knapsack.h"
34#include "miscadmin.h"
35#include "nodes/makefuncs.h"
36#include "nodes/nodeFuncs.h"
37#ifdef OPTIMIZER_DEBUG
38#include "nodes/print.h"
39#endif
40#include "nodes/supportnodes.h"
42#include "optimizer/clauses.h"
43#include "optimizer/cost.h"
44#include "optimizer/optimizer.h"
46#include "optimizer/pathnode.h"
47#include "optimizer/paths.h"
48#include "optimizer/plancat.h"
49#include "optimizer/planmain.h"
50#include "optimizer/planner.h"
51#include "optimizer/prep.h"
52#include "optimizer/subselect.h"
53#include "optimizer/tlist.h"
54#include "parser/analyze.h"
55#include "parser/parse_agg.h"
56#include "parser/parse_clause.h"
58#include "parser/parsetree.h"
62#include "utils/lsyscache.h"
63#include "utils/rel.h"
64#include "utils/selfuncs.h"
65
66/* GUC parameters */
71
72/* Hook for plugins to get control in planner() */
74
75/* Hook for plugins to get control when grouping_planner() plans upper rels */
77
78
79/* Expression kind codes for preprocess_expression */
80#define EXPRKIND_QUAL 0
81#define EXPRKIND_TARGET 1
82#define EXPRKIND_RTFUNC 2
83#define EXPRKIND_RTFUNC_LATERAL 3
84#define EXPRKIND_VALUES 4
85#define EXPRKIND_VALUES_LATERAL 5
86#define EXPRKIND_LIMIT 6
87#define EXPRKIND_APPINFO 7
88#define EXPRKIND_PHV 8
89#define EXPRKIND_TABLESAMPLE 9
90#define EXPRKIND_ARBITER_ELEM 10
91#define EXPRKIND_TABLEFUNC 11
92#define EXPRKIND_TABLEFUNC_LATERAL 12
93#define EXPRKIND_GROUPEXPR 13
94
95/*
96 * Data specific to grouping sets
97 */
98typedef struct
99{
109
110/*
111 * Temporary structure for use during WindowClause reordering in order to be
112 * able to sort WindowClauses on partitioning/ordering prefix.
113 */
114typedef struct
115{
117 List *uniqueOrder; /* A List of unique ordering/partitioning
118 * clauses per Window */
120
121/* Passthrough data for standard_qp_callback */
122typedef struct
123{
124 List *activeWindows; /* active windows, if any */
125 grouping_sets_data *gset_data; /* grouping sets data, if any */
126 SetOperationStmt *setop; /* parent set operation or NULL if not a
127 * subquery belonging to a set operation */
129
130/* Local functions */
131static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
132static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
133static void grouping_planner(PlannerInfo *root, double tuple_fraction,
134 SetOperationStmt *setops);
136static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
137 int *tleref_to_colnum_map);
139static double preprocess_limit(PlannerInfo *root,
140 double tuple_fraction,
141 int64 *offset_est, int64 *count_est);
143static List *extract_rollup_sets(List *groupingSets);
144static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
145static void standard_qp_callback(PlannerInfo *root, void *extra);
147 double path_rows,
149 List *target_list);
151 RelOptInfo *input_rel,
152 PathTarget *target,
153 bool target_parallel_safe,
157 RelOptInfo *input_rel,
158 RelOptInfo *grouped_rel);
160 PathTarget *target, bool target_parallel_safe,
161 Node *havingQual);
163 RelOptInfo *input_rel,
164 RelOptInfo *grouped_rel,
165 const AggClauseCosts *agg_costs,
167 GroupPathExtraData *extra,
168 RelOptInfo **partially_grouped_rel_p);
170 RelOptInfo *grouped_rel,
171 Path *path,
172 bool is_sorted,
173 bool can_hash,
175 const AggClauseCosts *agg_costs,
176 double dNumGroups);
178 RelOptInfo *input_rel,
179 PathTarget *input_target,
180 PathTarget *output_target,
181 bool output_target_parallel_safe,
182 WindowFuncLists *wflists,
183 List *activeWindows);
185 RelOptInfo *window_rel,
186 Path *path,
187 PathTarget *input_target,
188 PathTarget *output_target,
189 WindowFuncLists *wflists,
190 List *activeWindows);
192 RelOptInfo *input_rel,
193 PathTarget *target);
195 RelOptInfo *input_rel,
196 RelOptInfo *final_distinct_rel,
197 PathTarget *target);
199 RelOptInfo *input_rel,
200 RelOptInfo *distinct_rel);
202 List *needed_pathkeys,
203 List *path_pathkeys);
205 RelOptInfo *input_rel,
206 PathTarget *target,
207 bool target_parallel_safe,
208 double limit_tuples);
210 PathTarget *final_target);
212 PathTarget *grouping_target,
213 Node *havingQual);
214static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
216 WindowFuncLists *wflists);
218static void name_active_windows(List *activeWindows);
220 PathTarget *final_target,
221 List *activeWindows);
223 List *tlist);
225 PathTarget *final_target,
226 bool *have_postponed_srfs);
228 List *targets, List *targets_contain_srfs);
230 RelOptInfo *grouped_rel,
231 RelOptInfo *partially_grouped_rel,
232 const AggClauseCosts *agg_costs,
234 double dNumGroups,
235 GroupPathExtraData *extra);
237 RelOptInfo *grouped_rel,
238 RelOptInfo *input_rel,
240 GroupPathExtraData *extra,
241 bool force_rel_creation);
243 RelOptInfo *rel,
244 Path *path,
245 Path *cheapest_path,
246 List *pathkeys,
247 double limit_tuples);
249static bool can_partial_agg(PlannerInfo *root);
251 RelOptInfo *rel,
252 List *scanjoin_targets,
253 List *scanjoin_targets_contain_srfs,
254 bool scanjoin_target_parallel_safe,
255 bool tlist_same_exprs);
257 RelOptInfo *input_rel,
258 RelOptInfo *grouped_rel,
259 RelOptInfo *partially_grouped_rel,
260 const AggClauseCosts *agg_costs,
263 GroupPathExtraData *extra);
264static bool group_by_has_partkey(RelOptInfo *input_rel,
265 List *targetList,
266 List *groupClause);
267static int common_prefix_cmp(const void *a, const void *b);
269 List *targetlist);
270
271
272/*****************************************************************************
273 *
274 * Query optimizer entry point
275 *
276 * To support loadable plugins that monitor or modify planner behavior,
277 * we provide a hook variable that lets a plugin get control before and
278 * after the standard planning process. The plugin would normally call
279 * standard_planner().
280 *
281 * Note to plugin authors: standard_planner() scribbles on its Query input,
282 * so you'd better copy that data structure if you want to plan more than once.
283 *
284 *****************************************************************************/
286planner(Query *parse, const char *query_string, int cursorOptions,
287 ParamListInfo boundParams)
288{
289 PlannedStmt *result;
290
291 if (planner_hook)
292 result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
293 else
294 result = standard_planner(parse, query_string, cursorOptions, boundParams);
295
296 pgstat_report_plan_id(result->planId, false);
297
298 return result;
299}
300
302standard_planner(Query *parse, const char *query_string, int cursorOptions,
303 ParamListInfo boundParams)
304{
305 PlannedStmt *result;
306 PlannerGlobal *glob;
307 double tuple_fraction;
309 RelOptInfo *final_rel;
310 Path *best_path;
311 Plan *top_plan;
312 ListCell *lp,
313 *lr;
314
315 /*
316 * Set up global state for this planner invocation. This data is needed
317 * across all levels of sub-Query that might exist in the given command,
318 * so we keep it in a separate struct that's linked to by each per-Query
319 * PlannerInfo.
320 */
321 glob = makeNode(PlannerGlobal);
322
323 glob->boundParams = boundParams;
324 glob->subplans = NIL;
325 glob->subpaths = NIL;
326 glob->subroots = NIL;
327 glob->rewindPlanIDs = NULL;
328 glob->finalrtable = NIL;
329 glob->allRelids = NULL;
330 glob->prunableRelids = NULL;
331 glob->finalrteperminfos = NIL;
332 glob->finalrowmarks = NIL;
333 glob->resultRelations = NIL;
334 glob->firstResultRels = NIL;
335 glob->appendRelations = NIL;
336 glob->partPruneInfos = NIL;
337 glob->relationOids = NIL;
338 glob->invalItems = NIL;
339 glob->paramExecTypes = NIL;
340 glob->lastPHId = 0;
341 glob->lastRowMarkId = 0;
342 glob->lastPlanNodeId = 0;
343 glob->transientPlan = false;
344 glob->dependsOnRole = false;
345 glob->partition_directory = NULL;
346
347 /*
348 * Assess whether it's feasible to use parallel mode for this query. We
349 * can't do this in a standalone backend, or if the command will try to
350 * modify any data, or if this is a cursor operation, or if GUCs are set
351 * to values that don't permit parallelism, or if parallel-unsafe
352 * functions are present in the query tree.
353 *
354 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
355 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
356 * the command is writing into a completely new table which workers won't
357 * be able to see. If the workers could see the table, the fact that
358 * group locking would cause them to ignore the leader's heavyweight GIN
359 * page locks would make this unsafe. We'll have to fix that somehow if
360 * we want to allow parallel inserts in general; updates and deletes have
361 * additional problems especially around combo CIDs.)
362 *
363 * For now, we don't try to use parallel mode if we're running inside a
364 * parallel worker. We might eventually be able to relax this
365 * restriction, but for now it seems best not to have parallel workers
366 * trying to create their own parallel workers.
367 */
368 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
370 parse->commandType == CMD_SELECT &&
371 !parse->hasModifyingCTE &&
374 {
375 /* all the cheap tests pass, so scan the query tree */
377 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
378 }
379 else
380 {
381 /* skip the query tree scan, just assume it's unsafe */
382 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
383 glob->parallelModeOK = false;
384 }
385
386 /*
387 * glob->parallelModeNeeded is normally set to false here and changed to
388 * true during plan creation if a Gather or Gather Merge plan is actually
389 * created (cf. create_gather_plan, create_gather_merge_plan).
390 *
391 * However, if debug_parallel_query = on or debug_parallel_query =
392 * regress, then we impose parallel mode whenever it's safe to do so, even
393 * if the final plan doesn't use parallelism. It's not safe to do so if
394 * the query contains anything parallel-unsafe; parallelModeOK will be
395 * false in that case. Note that parallelModeOK can't change after this
396 * point. Otherwise, everything in the query is either parallel-safe or
397 * parallel-restricted, and in either case it should be OK to impose
398 * parallel-mode restrictions. If that ends up breaking something, then
399 * either some function the user included in the query is incorrectly
400 * labeled as parallel-safe or parallel-restricted when in reality it's
401 * parallel-unsafe, or else the query planner itself has a bug.
402 */
403 glob->parallelModeNeeded = glob->parallelModeOK &&
405
406 /* Determine what fraction of the plan is likely to be scanned */
407 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
408 {
409 /*
410 * We have no real idea how many tuples the user will ultimately FETCH
411 * from a cursor, but it is often the case that he doesn't want 'em
412 * all, or would prefer a fast-start plan anyway so that he can
413 * process some of the tuples sooner. Use a GUC parameter to decide
414 * what fraction to optimize for.
415 */
416 tuple_fraction = cursor_tuple_fraction;
417
418 /*
419 * We document cursor_tuple_fraction as simply being a fraction, which
420 * means the edge cases 0 and 1 have to be treated specially here. We
421 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
422 */
423 if (tuple_fraction >= 1.0)
424 tuple_fraction = 0.0;
425 else if (tuple_fraction <= 0.0)
426 tuple_fraction = 1e-10;
427 }
428 else
429 {
430 /* Default assumption is we need all the tuples */
431 tuple_fraction = 0.0;
432 }
433
434 /* primary planning entry point (may recurse for subqueries) */
435 root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
436
437 /* Select best Path and turn it into a Plan */
438 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
439 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
440
441 top_plan = create_plan(root, best_path);
442
443 /*
444 * If creating a plan for a scrollable cursor, make sure it can run
445 * backwards on demand. Add a Material node at the top at need.
446 */
447 if (cursorOptions & CURSOR_OPT_SCROLL)
448 {
449 if (!ExecSupportsBackwardScan(top_plan))
450 top_plan = materialize_finished_plan(top_plan);
451 }
452
453 /*
454 * Optionally add a Gather node for testing purposes, provided this is
455 * actually a safe thing to do.
456 *
457 * We can add Gather even when top_plan has parallel-safe initPlans, but
458 * then we have to move the initPlans to the Gather node because of
459 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
460 * regression tests when debug_parallel_query = regress, because initPlans
461 * that would normally appear on the top_plan move to the Gather, causing
462 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
463 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
464 */
466 top_plan->parallel_safe &&
467 (top_plan->initPlan == NIL ||
469 {
470 Gather *gather = makeNode(Gather);
471 Cost initplan_cost;
472 bool unsafe_initplans;
473
474 gather->plan.targetlist = top_plan->targetlist;
475 gather->plan.qual = NIL;
476 gather->plan.lefttree = top_plan;
477 gather->plan.righttree = NULL;
478 gather->num_workers = 1;
479 gather->single_copy = true;
481
482 /* Transfer any initPlans to the new top node */
483 gather->plan.initPlan = top_plan->initPlan;
484 top_plan->initPlan = NIL;
485
486 /*
487 * Since this Gather has no parallel-aware descendants to signal to,
488 * we don't need a rescan Param.
489 */
490 gather->rescan_param = -1;
491
492 /*
493 * Ideally we'd use cost_gather here, but setting up dummy path data
494 * to satisfy it doesn't seem much cleaner than knowing what it does.
495 */
496 gather->plan.startup_cost = top_plan->startup_cost +
498 gather->plan.total_cost = top_plan->total_cost +
500 gather->plan.plan_rows = top_plan->plan_rows;
501 gather->plan.plan_width = top_plan->plan_width;
502 gather->plan.parallel_aware = false;
503 gather->plan.parallel_safe = false;
504
505 /*
506 * Delete the initplans' cost from top_plan. We needn't add it to the
507 * Gather node, since the above coding already included it there.
508 */
510 &initplan_cost, &unsafe_initplans);
511 top_plan->startup_cost -= initplan_cost;
512 top_plan->total_cost -= initplan_cost;
513
514 /* use parallel mode for parallel plans. */
515 root->glob->parallelModeNeeded = true;
516
517 top_plan = &gather->plan;
518 }
519
520 /*
521 * If any Params were generated, run through the plan tree and compute
522 * each plan node's extParam/allParam sets. Ideally we'd merge this into
523 * set_plan_references' tree traversal, but for now it has to be separate
524 * because we need to visit subplans before not after main plan.
525 */
526 if (glob->paramExecTypes != NIL)
527 {
528 Assert(list_length(glob->subplans) == list_length(glob->subroots));
529 forboth(lp, glob->subplans, lr, glob->subroots)
530 {
531 Plan *subplan = (Plan *) lfirst(lp);
532 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
533
534 SS_finalize_plan(subroot, subplan);
535 }
536 SS_finalize_plan(root, top_plan);
537 }
538
539 /* final cleanup of the plan */
540 Assert(glob->finalrtable == NIL);
541 Assert(glob->finalrteperminfos == NIL);
542 Assert(glob->finalrowmarks == NIL);
543 Assert(glob->resultRelations == NIL);
544 Assert(glob->appendRelations == NIL);
545 top_plan = set_plan_references(root, top_plan);
546 /* ... and the subplans (both regular subplans and initplans) */
547 Assert(list_length(glob->subplans) == list_length(glob->subroots));
548 forboth(lp, glob->subplans, lr, glob->subroots)
549 {
550 Plan *subplan = (Plan *) lfirst(lp);
551 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
552
553 lfirst(lp) = set_plan_references(subroot, subplan);
554 }
555
556 /* build the PlannedStmt result */
557 result = makeNode(PlannedStmt);
558
559 result->commandType = parse->commandType;
560 result->queryId = parse->queryId;
561 result->hasReturning = (parse->returningList != NIL);
562 result->hasModifyingCTE = parse->hasModifyingCTE;
563 result->canSetTag = parse->canSetTag;
564 result->transientPlan = glob->transientPlan;
565 result->dependsOnRole = glob->dependsOnRole;
567 result->planTree = top_plan;
568 result->partPruneInfos = glob->partPruneInfos;
569 result->rtable = glob->finalrtable;
571 glob->prunableRelids);
572 result->permInfos = glob->finalrteperminfos;
573 result->resultRelations = glob->resultRelations;
574 result->firstResultRels = glob->firstResultRels;
575 result->appendRelations = glob->appendRelations;
576 result->subplans = glob->subplans;
577 result->rewindPlanIDs = glob->rewindPlanIDs;
578 result->rowMarks = glob->finalrowmarks;
579 result->relationOids = glob->relationOids;
580 result->invalItems = glob->invalItems;
581 result->paramExecTypes = glob->paramExecTypes;
582 /* utilityStmt should be null, but we might as well copy it */
583 result->utilityStmt = parse->utilityStmt;
584 result->stmt_location = parse->stmt_location;
585 result->stmt_len = parse->stmt_len;
586
587 result->jitFlags = PGJIT_NONE;
588 if (jit_enabled && jit_above_cost >= 0 &&
589 top_plan->total_cost > jit_above_cost)
590 {
591 result->jitFlags |= PGJIT_PERFORM;
592
593 /*
594 * Decide how much effort should be put into generating better code.
595 */
596 if (jit_optimize_above_cost >= 0 &&
598 result->jitFlags |= PGJIT_OPT3;
599 if (jit_inline_above_cost >= 0 &&
601 result->jitFlags |= PGJIT_INLINE;
602
603 /*
604 * Decide which operations should be JITed.
605 */
606 if (jit_expressions)
607 result->jitFlags |= PGJIT_EXPR;
609 result->jitFlags |= PGJIT_DEFORM;
610 }
611
612 if (glob->partition_directory != NULL)
613 DestroyPartitionDirectory(glob->partition_directory);
614
615 return result;
616}
617
618
619/*--------------------
620 * subquery_planner
621 * Invokes the planner on a subquery. We recurse to here for each
622 * sub-SELECT found in the query tree.
623 *
624 * glob is the global state for the current planner run.
625 * parse is the querytree produced by the parser & rewriter.
626 * parent_root is the immediate parent Query's info (NULL at the top level).
627 * hasRecursion is true if this is a recursive WITH query.
628 * tuple_fraction is the fraction of tuples we expect will be retrieved.
629 * tuple_fraction is interpreted as explained for grouping_planner, below.
630 * setops is used for set operation subqueries to provide the subquery with
631 * the context in which it's being used so that Paths correctly sorted for the
632 * set operation can be generated. NULL when not planning a set operation
633 * child, or when a child of a set op that isn't interested in sorted input.
634 *
635 * Basically, this routine does the stuff that should only be done once
636 * per Query object. It then calls grouping_planner. At one time,
637 * grouping_planner could be invoked recursively on the same Query object;
638 * that's not currently true, but we keep the separation between the two
639 * routines anyway, in case we need it again someday.
640 *
641 * subquery_planner will be called recursively to handle sub-Query nodes
642 * found within the query's expressions and rangetable.
643 *
644 * Returns the PlannerInfo struct ("root") that contains all data generated
645 * while planning the subquery. In particular, the Path(s) attached to
646 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
647 * cheapest way(s) to implement the query. The top level will select the
648 * best Path and pass it through createplan.c to produce a finished Plan.
649 *--------------------
650 */
653 bool hasRecursion, double tuple_fraction,
654 SetOperationStmt *setops)
655{
657 List *newWithCheckOptions;
658 List *newHaving;
659 bool hasOuterJoins;
660 bool hasResultRTEs;
661 RelOptInfo *final_rel;
662 ListCell *l;
663
664 /* Create a PlannerInfo data structure for this subquery */
666 root->parse = parse;
667 root->glob = glob;
668 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
669 root->parent_root = parent_root;
670 root->plan_params = NIL;
671 root->outer_params = NULL;
672 root->planner_cxt = CurrentMemoryContext;
673 root->init_plans = NIL;
674 root->cte_plan_ids = NIL;
675 root->multiexpr_params = NIL;
676 root->join_domains = NIL;
677 root->eq_classes = NIL;
678 root->ec_merging_done = false;
679 root->last_rinfo_serial = 0;
680 root->all_result_relids =
681 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
682 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
683 root->append_rel_list = NIL;
684 root->row_identity_vars = NIL;
685 root->rowMarks = NIL;
686 memset(root->upper_rels, 0, sizeof(root->upper_rels));
687 memset(root->upper_targets, 0, sizeof(root->upper_targets));
688 root->processed_groupClause = NIL;
689 root->processed_distinctClause = NIL;
690 root->processed_tlist = NIL;
691 root->update_colnos = NIL;
692 root->grouping_map = NULL;
693 root->minmax_aggs = NIL;
694 root->qual_security_level = 0;
695 root->hasPseudoConstantQuals = false;
696 root->hasAlternativeSubPlans = false;
697 root->placeholdersFrozen = false;
698 root->hasRecursion = hasRecursion;
699 if (hasRecursion)
700 root->wt_param_id = assign_special_exec_param(root);
701 else
702 root->wt_param_id = -1;
703 root->non_recursive_path = NULL;
704 root->partColsUpdated = false;
705
706 /*
707 * Create the top-level join domain. This won't have valid contents until
708 * deconstruct_jointree fills it in, but the node needs to exist before
709 * that so we can build EquivalenceClasses referencing it.
710 */
711 root->join_domains = list_make1(makeNode(JoinDomain));
712
713 /*
714 * If there is a WITH list, process each WITH query and either convert it
715 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
716 */
717 if (parse->cteList)
719
720 /*
721 * If it's a MERGE command, transform the joinlist as appropriate.
722 */
724
725 /*
726 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
727 * that we don't need so many special cases to deal with that situation.
728 */
730
731 /*
732 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
733 * to transform them into joins. Note that this step does not descend
734 * into subqueries; if we pull up any subqueries below, their SubLinks are
735 * processed just before pulling them up.
736 */
737 if (parse->hasSubLinks)
739
740 /*
741 * Scan the rangetable for function RTEs, do const-simplification on them,
742 * and then inline them if possible (producing subqueries that might get
743 * pulled up next). Recursion issues here are handled in the same way as
744 * for SubLinks.
745 */
747
748 /*
749 * Scan the rangetable for relations with virtual generated columns, and
750 * replace all Var nodes in the query that reference these columns with
751 * the generation expressions. Recursion issues here are handled in the
752 * same way as for SubLinks.
753 */
755
756 /*
757 * Check to see if any subqueries in the jointree can be merged into this
758 * query.
759 */
761
762 /*
763 * If this is a simple UNION ALL query, flatten it into an appendrel. We
764 * do this now because it requires applying pull_up_subqueries to the leaf
765 * queries of the UNION ALL, which weren't touched above because they
766 * weren't referenced by the jointree (they will be after we do this).
767 */
768 if (parse->setOperations)
770
771 /*
772 * Survey the rangetable to see what kinds of entries are present. We can
773 * skip some later processing if relevant SQL features are not used; for
774 * example if there are no JOIN RTEs we can avoid the expense of doing
775 * flatten_join_alias_vars(). This must be done after we have finished
776 * adding rangetable entries, of course. (Note: actually, processing of
777 * inherited or partitioned rels can cause RTEs for their child tables to
778 * get added later; but those must all be RTE_RELATION entries, so they
779 * don't invalidate the conclusions drawn here.)
780 */
781 root->hasJoinRTEs = false;
782 root->hasLateralRTEs = false;
783 root->group_rtindex = 0;
784 hasOuterJoins = false;
785 hasResultRTEs = false;
786 foreach(l, parse->rtable)
787 {
789
790 switch (rte->rtekind)
791 {
792 case RTE_RELATION:
793 if (rte->inh)
794 {
795 /*
796 * Check to see if the relation actually has any children;
797 * if not, clear the inh flag so we can treat it as a
798 * plain base relation.
799 *
800 * Note: this could give a false-positive result, if the
801 * rel once had children but no longer does. We used to
802 * be able to clear rte->inh later on when we discovered
803 * that, but no more; we have to handle such cases as
804 * full-fledged inheritance.
805 */
806 rte->inh = has_subclass(rte->relid);
807 }
808 break;
809 case RTE_JOIN:
810 root->hasJoinRTEs = true;
811 if (IS_OUTER_JOIN(rte->jointype))
812 hasOuterJoins = true;
813 break;
814 case RTE_RESULT:
815 hasResultRTEs = true;
816 break;
817 case RTE_GROUP:
818 Assert(parse->hasGroupRTE);
819 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
820 break;
821 default:
822 /* No work here for other RTE types */
823 break;
824 }
825
826 if (rte->lateral)
827 root->hasLateralRTEs = true;
828
829 /*
830 * We can also determine the maximum security level required for any
831 * securityQuals now. Addition of inheritance-child RTEs won't affect
832 * this, because child tables don't have their own securityQuals; see
833 * expand_single_inheritance_child().
834 */
835 if (rte->securityQuals)
836 root->qual_security_level = Max(root->qual_security_level,
837 list_length(rte->securityQuals));
838 }
839
840 /*
841 * If we have now verified that the query target relation is
842 * non-inheriting, mark it as a leaf target.
843 */
844 if (parse->resultRelation)
845 {
846 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
847
848 if (!rte->inh)
849 root->leaf_result_relids =
850 bms_make_singleton(parse->resultRelation);
851 }
852
853 /*
854 * Preprocess RowMark information. We need to do this after subquery
855 * pullup, so that all base relations are present.
856 */
858
859 /*
860 * Set hasHavingQual to remember if HAVING clause is present. Needed
861 * because preprocess_expression will reduce a constant-true condition to
862 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
863 */
864 root->hasHavingQual = (parse->havingQual != NULL);
865
866 /*
867 * Do expression preprocessing on targetlist and quals, as well as other
868 * random expressions in the querytree. Note that we do not need to
869 * handle sort/group expressions explicitly, because they are actually
870 * part of the targetlist.
871 */
872 parse->targetList = (List *)
873 preprocess_expression(root, (Node *) parse->targetList,
875
876 newWithCheckOptions = NIL;
877 foreach(l, parse->withCheckOptions)
878 {
880
881 wco->qual = preprocess_expression(root, wco->qual,
883 if (wco->qual != NULL)
884 newWithCheckOptions = lappend(newWithCheckOptions, wco);
885 }
886 parse->withCheckOptions = newWithCheckOptions;
887
888 parse->returningList = (List *)
889 preprocess_expression(root, (Node *) parse->returningList,
891
893
894 parse->havingQual = preprocess_expression(root, parse->havingQual,
896
897 foreach(l, parse->windowClause)
898 {
900
901 /* partitionClause/orderClause are sort/group expressions */
906 }
907
908 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
910 parse->limitCount = preprocess_expression(root, parse->limitCount,
912
913 if (parse->onConflict)
914 {
915 parse->onConflict->arbiterElems = (List *)
917 (Node *) parse->onConflict->arbiterElems,
919 parse->onConflict->arbiterWhere =
921 parse->onConflict->arbiterWhere,
923 parse->onConflict->onConflictSet = (List *)
925 (Node *) parse->onConflict->onConflictSet,
927 parse->onConflict->onConflictWhere =
929 parse->onConflict->onConflictWhere,
931 /* exclRelTlist contains only Vars, so no preprocessing needed */
932 }
933
934 foreach(l, parse->mergeActionList)
935 {
937
938 action->targetList = (List *)
940 (Node *) action->targetList,
942 action->qual =
944 (Node *) action->qual,
946 }
947
948 parse->mergeJoinCondition =
949 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
950
951 root->append_rel_list = (List *)
952 preprocess_expression(root, (Node *) root->append_rel_list,
954
955 /* Also need to preprocess expressions within RTEs */
956 foreach(l, parse->rtable)
957 {
959 int kind;
960 ListCell *lcsq;
961
962 if (rte->rtekind == RTE_RELATION)
963 {
964 if (rte->tablesample)
967 (Node *) rte->tablesample,
969 }
970 else if (rte->rtekind == RTE_SUBQUERY)
971 {
972 /*
973 * We don't want to do all preprocessing yet on the subquery's
974 * expressions, since that will happen when we plan it. But if it
975 * contains any join aliases of our level, those have to get
976 * expanded now, because planning of the subquery won't do it.
977 * That's only possible if the subquery is LATERAL.
978 */
979 if (rte->lateral && root->hasJoinRTEs)
980 rte->subquery = (Query *)
982 (Node *) rte->subquery);
983 }
984 else if (rte->rtekind == RTE_FUNCTION)
985 {
986 /* Preprocess the function expression(s) fully */
987 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
988 rte->functions = (List *)
989 preprocess_expression(root, (Node *) rte->functions, kind);
990 }
991 else if (rte->rtekind == RTE_TABLEFUNC)
992 {
993 /* Preprocess the function expression(s) fully */
994 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
995 rte->tablefunc = (TableFunc *)
996 preprocess_expression(root, (Node *) rte->tablefunc, kind);
997 }
998 else if (rte->rtekind == RTE_VALUES)
999 {
1000 /* Preprocess the values lists fully */
1001 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1002 rte->values_lists = (List *)
1004 }
1005 else if (rte->rtekind == RTE_GROUP)
1006 {
1007 /* Preprocess the groupexprs list fully */
1008 rte->groupexprs = (List *)
1009 preprocess_expression(root, (Node *) rte->groupexprs,
1011 }
1012
1013 /*
1014 * Process each element of the securityQuals list as if it were a
1015 * separate qual expression (as indeed it is). We need to do it this
1016 * way to get proper canonicalization of AND/OR structure. Note that
1017 * this converts each element into an implicit-AND sublist.
1018 */
1019 foreach(lcsq, rte->securityQuals)
1020 {
1022 (Node *) lfirst(lcsq),
1024 }
1025 }
1026
1027 /*
1028 * Now that we are done preprocessing expressions, and in particular done
1029 * flattening join alias variables, get rid of the joinaliasvars lists.
1030 * They no longer match what expressions in the rest of the tree look
1031 * like, because we have not preprocessed expressions in those lists (and
1032 * do not want to; for example, expanding a SubLink there would result in
1033 * a useless unreferenced subplan). Leaving them in place simply creates
1034 * a hazard for later scans of the tree. We could try to prevent that by
1035 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1036 * but that doesn't sound very reliable.
1037 */
1038 if (root->hasJoinRTEs)
1039 {
1040 foreach(l, parse->rtable)
1041 {
1043
1044 rte->joinaliasvars = NIL;
1045 }
1046 }
1047
1048 /*
1049 * Replace any Vars in the subquery's targetlist and havingQual that
1050 * reference GROUP outputs with the underlying grouping expressions.
1051 *
1052 * Note that we need to perform this replacement after we've preprocessed
1053 * the grouping expressions. This is to ensure that there is only one
1054 * instance of SubPlan for each SubLink contained within the grouping
1055 * expressions.
1056 */
1057 if (parse->hasGroupRTE)
1058 {
1059 parse->targetList = (List *)
1060 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1061 parse->havingQual =
1062 flatten_group_exprs(root, root->parse, parse->havingQual);
1063 }
1064
1065 /* Constant-folding might have removed all set-returning functions */
1066 if (parse->hasTargetSRFs)
1067 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1068
1069 /*
1070 * In some cases we may want to transfer a HAVING clause into WHERE. We
1071 * cannot do so if the HAVING clause contains aggregates (obviously) or
1072 * volatile functions (since a HAVING clause is supposed to be executed
1073 * only once per group). We also can't do this if there are any nonempty
1074 * grouping sets and the clause references any columns that are nullable
1075 * by the grouping sets; moving such a clause into WHERE would potentially
1076 * change the results. (If there are only empty grouping sets, then the
1077 * HAVING clause must be degenerate as discussed below.)
1078 *
1079 * Also, it may be that the clause is so expensive to execute that we're
1080 * better off doing it only once per group, despite the loss of
1081 * selectivity. This is hard to estimate short of doing the entire
1082 * planning process twice, so we use a heuristic: clauses containing
1083 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1084 * clause into WHERE, in hopes of eliminating tuples before aggregation
1085 * instead of after.
1086 *
1087 * If the query has explicit grouping then we can simply move such a
1088 * clause into WHERE; any group that fails the clause will not be in the
1089 * output because none of its tuples will reach the grouping or
1090 * aggregation stage. Otherwise we must have a degenerate (variable-free)
1091 * HAVING clause, which we put in WHERE so that query_planner() can use it
1092 * in a gating Result node, but also keep in HAVING to ensure that we
1093 * don't emit a bogus aggregated row. (This could be done better, but it
1094 * seems not worth optimizing.)
1095 *
1096 * Note that a HAVING clause may contain expressions that are not fully
1097 * preprocessed. This can happen if these expressions are part of
1098 * grouping items. In such cases, they are replaced with GROUP Vars in
1099 * the parser and then replaced back after we've done with expression
1100 * preprocessing on havingQual. This is not an issue if the clause
1101 * remains in HAVING, because these expressions will be matched to lower
1102 * target items in setrefs.c. However, if the clause is moved or copied
1103 * into WHERE, we need to ensure that these expressions are fully
1104 * preprocessed.
1105 *
1106 * Note that both havingQual and parse->jointree->quals are in
1107 * implicitly-ANDed-list form at this point, even though they are declared
1108 * as Node *.
1109 */
1110 newHaving = NIL;
1111 foreach(l, (List *) parse->havingQual)
1112 {
1113 Node *havingclause = (Node *) lfirst(l);
1114
1115 if (contain_agg_clause(havingclause) ||
1116 contain_volatile_functions(havingclause) ||
1117 contain_subplans(havingclause) ||
1118 (parse->groupClause && parse->groupingSets &&
1119 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1120 {
1121 /* keep it in HAVING */
1122 newHaving = lappend(newHaving, havingclause);
1123 }
1124 else if (parse->groupClause)
1125 {
1126 Node *whereclause;
1127
1128 /* Preprocess the HAVING clause fully */
1129 whereclause = preprocess_expression(root, havingclause,
1131 /* ... and move it to WHERE */
1132 parse->jointree->quals = (Node *)
1133 list_concat((List *) parse->jointree->quals,
1134 (List *) whereclause);
1135 }
1136 else
1137 {
1138 Node *whereclause;
1139
1140 /* Preprocess the HAVING clause fully */
1141 whereclause = preprocess_expression(root, copyObject(havingclause),
1143 /* ... and put a copy in WHERE */
1144 parse->jointree->quals = (Node *)
1145 list_concat((List *) parse->jointree->quals,
1146 (List *) whereclause);
1147 /* ... and also keep it in HAVING */
1148 newHaving = lappend(newHaving, havingclause);
1149 }
1150 }
1151 parse->havingQual = (Node *) newHaving;
1152
1153 /*
1154 * If we have any outer joins, try to reduce them to plain inner joins.
1155 * This step is most easily done after we've done expression
1156 * preprocessing.
1157 */
1158 if (hasOuterJoins)
1160
1161 /*
1162 * If we have any RTE_RESULT relations, see if they can be deleted from
1163 * the jointree. We also rely on this processing to flatten single-child
1164 * FromExprs underneath outer joins. This step is most effectively done
1165 * after we've done expression preprocessing and outer join reduction.
1166 */
1167 if (hasResultRTEs || hasOuterJoins)
1169
1170 /*
1171 * Do the main planning.
1172 */
1173 grouping_planner(root, tuple_fraction, setops);
1174
1175 /*
1176 * Capture the set of outer-level param IDs we have access to, for use in
1177 * extParam/allParam calculations later.
1178 */
1180
1181 /*
1182 * If any initPlans were created in this query level, adjust the surviving
1183 * Paths' costs and parallel-safety flags to account for them. The
1184 * initPlans won't actually get attached to the plan tree till
1185 * create_plan() runs, but we must include their effects now.
1186 */
1187 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1188 SS_charge_for_initplans(root, final_rel);
1189
1190 /*
1191 * Make sure we've identified the cheapest Path for the final rel. (By
1192 * doing this here not in grouping_planner, we include initPlan costs in
1193 * the decision, though it's unlikely that will change anything.)
1194 */
1195 set_cheapest(final_rel);
1196
1197 return root;
1198}
1199
1200/*
1201 * preprocess_expression
1202 * Do subquery_planner's preprocessing work for an expression,
1203 * which can be a targetlist, a WHERE clause (including JOIN/ON
1204 * conditions), a HAVING clause, or a few other things.
1205 */
1206static Node *
1208{
1209 /*
1210 * Fall out quickly if expression is empty. This occurs often enough to
1211 * be worth checking. Note that null->null is the correct conversion for
1212 * implicit-AND result format, too.
1213 */
1214 if (expr == NULL)
1215 return NULL;
1216
1217 /*
1218 * If the query has any join RTEs, replace join alias variables with
1219 * base-relation variables. We must do this first, since any expressions
1220 * we may extract from the joinaliasvars lists have not been preprocessed.
1221 * For example, if we did this after sublink processing, sublinks expanded
1222 * out from join aliases would not get processed. But we can skip this in
1223 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1224 * they can't contain any Vars of the current query level.
1225 */
1226 if (root->hasJoinRTEs &&
1227 !(kind == EXPRKIND_RTFUNC ||
1228 kind == EXPRKIND_VALUES ||
1229 kind == EXPRKIND_TABLESAMPLE ||
1230 kind == EXPRKIND_TABLEFUNC))
1231 expr = flatten_join_alias_vars(root, root->parse, expr);
1232
1233 /*
1234 * Simplify constant expressions. For function RTEs, this was already
1235 * done by preprocess_function_rtes. (But note we must do it again for
1236 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1237 * un-simplified subexpressions inserted by flattening of subqueries or
1238 * join alias variables.)
1239 *
1240 * Note: an essential effect of this is to convert named-argument function
1241 * calls to positional notation and insert the current actual values of
1242 * any default arguments for functions. To ensure that happens, we *must*
1243 * process all expressions here. Previous PG versions sometimes skipped
1244 * const-simplification if it didn't seem worth the trouble, but we can't
1245 * do that anymore.
1246 *
1247 * Note: this also flattens nested AND and OR expressions into N-argument
1248 * form. All processing of a qual expression after this point must be
1249 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1250 * with AND directly under AND, nor OR directly under OR.
1251 */
1252 if (kind != EXPRKIND_RTFUNC)
1253 expr = eval_const_expressions(root, expr);
1254
1255 /*
1256 * If it's a qual or havingQual, canonicalize it.
1257 */
1258 if (kind == EXPRKIND_QUAL)
1259 {
1260 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1261
1262#ifdef OPTIMIZER_DEBUG
1263 printf("After canonicalize_qual()\n");
1264 pprint(expr);
1265#endif
1266 }
1267
1268 /*
1269 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1270 * hashfuncid of any that might execute more quickly by using hash lookups
1271 * instead of a linear search.
1272 */
1273 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1274 {
1276 }
1277
1278 /* Expand SubLinks to SubPlans */
1279 if (root->parse->hasSubLinks)
1280 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1281
1282 /*
1283 * XXX do not insert anything here unless you have grokked the comments in
1284 * SS_replace_correlation_vars ...
1285 */
1286
1287 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1288 if (root->query_level > 1)
1289 expr = SS_replace_correlation_vars(root, expr);
1290
1291 /*
1292 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1293 * don't want to do this before eval_const_expressions, since the latter
1294 * would be unable to simplify a top-level AND correctly. Also,
1295 * SS_process_sublinks expects explicit-AND format.)
1296 */
1297 if (kind == EXPRKIND_QUAL)
1298 expr = (Node *) make_ands_implicit((Expr *) expr);
1299
1300 return expr;
1301}
1302
1303/*
1304 * preprocess_qual_conditions
1305 * Recursively scan the query's jointree and do subquery_planner's
1306 * preprocessing work on each qual condition found therein.
1307 */
1308static void
1310{
1311 if (jtnode == NULL)
1312 return;
1313 if (IsA(jtnode, RangeTblRef))
1314 {
1315 /* nothing to do here */
1316 }
1317 else if (IsA(jtnode, FromExpr))
1318 {
1319 FromExpr *f = (FromExpr *) jtnode;
1320 ListCell *l;
1321
1322 foreach(l, f->fromlist)
1324
1326 }
1327 else if (IsA(jtnode, JoinExpr))
1328 {
1329 JoinExpr *j = (JoinExpr *) jtnode;
1330
1333
1334 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1335 }
1336 else
1337 elog(ERROR, "unrecognized node type: %d",
1338 (int) nodeTag(jtnode));
1339}
1340
1341/*
1342 * preprocess_phv_expression
1343 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1344 *
1345 * If a LATERAL subquery references an output of another subquery, and that
1346 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1347 * join, then we'll push the PlaceHolderVar expression down into the subquery
1348 * and later pull it back up during find_lateral_references, which runs after
1349 * subquery_planner has preprocessed all the expressions that were in the
1350 * current query level to start with. So we need to preprocess it then.
1351 */
1352Expr *
1354{
1355 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1356}
1357
1358/*--------------------
1359 * grouping_planner
1360 * Perform planning steps related to grouping, aggregation, etc.
1361 *
1362 * This function adds all required top-level processing to the scan/join
1363 * Path(s) produced by query_planner.
1364 *
1365 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1366 * tuple_fraction is interpreted as follows:
1367 * 0: expect all tuples to be retrieved (normal case)
1368 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1369 * from the plan to be retrieved
1370 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1371 * expected to be retrieved (ie, a LIMIT specification).
1372 * setops is used for set operation subqueries to provide the subquery with
1373 * the context in which it's being used so that Paths correctly sorted for the
1374 * set operation can be generated. NULL when not planning a set operation
1375 * child, or when a child of a set op that isn't interested in sorted input.
1376 *
1377 * Returns nothing; the useful output is in the Paths we attach to the
1378 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1379 * root->processed_tlist contains the final processed targetlist.
1380 *
1381 * Note that we have not done set_cheapest() on the final rel; it's convenient
1382 * to leave this to the caller.
1383 *--------------------
1384 */
1385static void
1386grouping_planner(PlannerInfo *root, double tuple_fraction,
1387 SetOperationStmt *setops)
1388{
1389 Query *parse = root->parse;
1390 int64 offset_est = 0;
1391 int64 count_est = 0;
1392 double limit_tuples = -1.0;
1393 bool have_postponed_srfs = false;
1394 PathTarget *final_target;
1395 List *final_targets;
1396 List *final_targets_contain_srfs;
1397 bool final_target_parallel_safe;
1398 RelOptInfo *current_rel;
1399 RelOptInfo *final_rel;
1400 FinalPathExtraData extra;
1401 ListCell *lc;
1402
1403 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1404 if (parse->limitCount || parse->limitOffset)
1405 {
1406 tuple_fraction = preprocess_limit(root, tuple_fraction,
1407 &offset_est, &count_est);
1408
1409 /*
1410 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1411 * estimate the effects of using a bounded sort.
1412 */
1413 if (count_est > 0 && offset_est >= 0)
1414 limit_tuples = (double) count_est + (double) offset_est;
1415 }
1416
1417 /* Make tuple_fraction accessible to lower-level routines */
1418 root->tuple_fraction = tuple_fraction;
1419
1420 if (parse->setOperations)
1421 {
1422 /*
1423 * Construct Paths for set operations. The results will not need any
1424 * work except perhaps a top-level sort and/or LIMIT. Note that any
1425 * special work for recursive unions is the responsibility of
1426 * plan_set_operations.
1427 */
1428 current_rel = plan_set_operations(root);
1429
1430 /*
1431 * We should not need to call preprocess_targetlist, since we must be
1432 * in a SELECT query node. Instead, use the processed_tlist returned
1433 * by plan_set_operations (since this tells whether it returned any
1434 * resjunk columns!), and transfer any sort key information from the
1435 * original tlist.
1436 */
1437 Assert(parse->commandType == CMD_SELECT);
1438
1439 /* for safety, copy processed_tlist instead of modifying in-place */
1440 root->processed_tlist =
1441 postprocess_setop_tlist(copyObject(root->processed_tlist),
1442 parse->targetList);
1443
1444 /* Also extract the PathTarget form of the setop result tlist */
1445 final_target = current_rel->cheapest_total_path->pathtarget;
1446
1447 /* And check whether it's parallel safe */
1448 final_target_parallel_safe =
1449 is_parallel_safe(root, (Node *) final_target->exprs);
1450
1451 /* The setop result tlist couldn't contain any SRFs */
1452 Assert(!parse->hasTargetSRFs);
1453 final_targets = final_targets_contain_srfs = NIL;
1454
1455 /*
1456 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1457 * checked already, but let's make sure).
1458 */
1459 if (parse->rowMarks)
1460 ereport(ERROR,
1461 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1462 /*------
1463 translator: %s is a SQL row locking clause such as FOR UPDATE */
1464 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1466 parse->rowMarks)->strength))));
1467
1468 /*
1469 * Calculate pathkeys that represent result ordering requirements
1470 */
1471 Assert(parse->distinctClause == NIL);
1472 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1473 parse->sortClause,
1474 root->processed_tlist);
1475 }
1476 else
1477 {
1478 /* No set operations, do regular planning */
1479 PathTarget *sort_input_target;
1480 List *sort_input_targets;
1481 List *sort_input_targets_contain_srfs;
1482 bool sort_input_target_parallel_safe;
1483 PathTarget *grouping_target;
1484 List *grouping_targets;
1485 List *grouping_targets_contain_srfs;
1486 bool grouping_target_parallel_safe;
1487 PathTarget *scanjoin_target;
1488 List *scanjoin_targets;
1489 List *scanjoin_targets_contain_srfs;
1490 bool scanjoin_target_parallel_safe;
1491 bool scanjoin_target_same_exprs;
1492 bool have_grouping;
1493 WindowFuncLists *wflists = NULL;
1494 List *activeWindows = NIL;
1495 grouping_sets_data *gset_data = NULL;
1496 standard_qp_extra qp_extra;
1497
1498 /* A recursive query should always have setOperations */
1499 Assert(!root->hasRecursion);
1500
1501 /* Preprocess grouping sets and GROUP BY clause, if any */
1502 if (parse->groupingSets)
1503 {
1504 gset_data = preprocess_grouping_sets(root);
1505 }
1506 else if (parse->groupClause)
1507 {
1508 /* Preprocess regular GROUP BY clause, if any */
1509 root->processed_groupClause = preprocess_groupclause(root, NIL);
1510 }
1511
1512 /*
1513 * Preprocess targetlist. Note that much of the remaining planning
1514 * work will be done with the PathTarget representation of tlists, but
1515 * we must also maintain the full representation of the final tlist so
1516 * that we can transfer its decoration (resnames etc) to the topmost
1517 * tlist of the finished Plan. This is kept in processed_tlist.
1518 */
1520
1521 /*
1522 * Mark all the aggregates with resolved aggtranstypes, and detect
1523 * aggregates that are duplicates or can share transition state. We
1524 * must do this before slicing and dicing the tlist into various
1525 * pathtargets, else some copies of the Aggref nodes might escape
1526 * being marked.
1527 */
1528 if (parse->hasAggs)
1529 {
1530 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1531 preprocess_aggrefs(root, (Node *) parse->havingQual);
1532 }
1533
1534 /*
1535 * Locate any window functions in the tlist. (We don't need to look
1536 * anywhere else, since expressions used in ORDER BY will be in there
1537 * too.) Note that they could all have been eliminated by constant
1538 * folding, in which case we don't need to do any more work.
1539 */
1540 if (parse->hasWindowFuncs)
1541 {
1542 wflists = find_window_functions((Node *) root->processed_tlist,
1543 list_length(parse->windowClause));
1544 if (wflists->numWindowFuncs > 0)
1545 {
1546 /*
1547 * See if any modifications can be made to each WindowClause
1548 * to allow the executor to execute the WindowFuncs more
1549 * quickly.
1550 */
1551 optimize_window_clauses(root, wflists);
1552
1553 /* Extract the list of windows actually in use. */
1554 activeWindows = select_active_windows(root, wflists);
1555
1556 /* Make sure they all have names, for EXPLAIN's use. */
1557 name_active_windows(activeWindows);
1558 }
1559 else
1560 parse->hasWindowFuncs = false;
1561 }
1562
1563 /*
1564 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1565 * adding logic between here and the query_planner() call. Anything
1566 * that is needed in MIN/MAX-optimizable cases will have to be
1567 * duplicated in planagg.c.
1568 */
1569 if (parse->hasAggs)
1571
1572 /*
1573 * Figure out whether there's a hard limit on the number of rows that
1574 * query_planner's result subplan needs to return. Even if we know a
1575 * hard limit overall, it doesn't apply if the query has any
1576 * grouping/aggregation operations, or SRFs in the tlist.
1577 */
1578 if (parse->groupClause ||
1579 parse->groupingSets ||
1580 parse->distinctClause ||
1581 parse->hasAggs ||
1582 parse->hasWindowFuncs ||
1583 parse->hasTargetSRFs ||
1584 root->hasHavingQual)
1585 root->limit_tuples = -1.0;
1586 else
1587 root->limit_tuples = limit_tuples;
1588
1589 /* Set up data needed by standard_qp_callback */
1590 qp_extra.activeWindows = activeWindows;
1591 qp_extra.gset_data = gset_data;
1592
1593 /*
1594 * If we're a subquery for a set operation, store the SetOperationStmt
1595 * in qp_extra.
1596 */
1597 qp_extra.setop = setops;
1598
1599 /*
1600 * Generate the best unsorted and presorted paths for the scan/join
1601 * portion of this Query, ie the processing represented by the
1602 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1603 * We also generate (in standard_qp_callback) pathkey representations
1604 * of the query's sort clause, distinct clause, etc.
1605 */
1606 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1607
1608 /*
1609 * Convert the query's result tlist into PathTarget format.
1610 *
1611 * Note: this cannot be done before query_planner() has performed
1612 * appendrel expansion, because that might add resjunk entries to
1613 * root->processed_tlist. Waiting till afterwards is also helpful
1614 * because the target width estimates can use per-Var width numbers
1615 * that were obtained within query_planner().
1616 */
1617 final_target = create_pathtarget(root, root->processed_tlist);
1618 final_target_parallel_safe =
1619 is_parallel_safe(root, (Node *) final_target->exprs);
1620
1621 /*
1622 * If ORDER BY was given, consider whether we should use a post-sort
1623 * projection, and compute the adjusted target for preceding steps if
1624 * so.
1625 */
1626 if (parse->sortClause)
1627 {
1628 sort_input_target = make_sort_input_target(root,
1629 final_target,
1630 &have_postponed_srfs);
1631 sort_input_target_parallel_safe =
1632 is_parallel_safe(root, (Node *) sort_input_target->exprs);
1633 }
1634 else
1635 {
1636 sort_input_target = final_target;
1637 sort_input_target_parallel_safe = final_target_parallel_safe;
1638 }
1639
1640 /*
1641 * If we have window functions to deal with, the output from any
1642 * grouping step needs to be what the window functions want;
1643 * otherwise, it should be sort_input_target.
1644 */
1645 if (activeWindows)
1646 {
1647 grouping_target = make_window_input_target(root,
1648 final_target,
1649 activeWindows);
1650 grouping_target_parallel_safe =
1651 is_parallel_safe(root, (Node *) grouping_target->exprs);
1652 }
1653 else
1654 {
1655 grouping_target = sort_input_target;
1656 grouping_target_parallel_safe = sort_input_target_parallel_safe;
1657 }
1658
1659 /*
1660 * If we have grouping or aggregation to do, the topmost scan/join
1661 * plan node must emit what the grouping step wants; otherwise, it
1662 * should emit grouping_target.
1663 */
1664 have_grouping = (parse->groupClause || parse->groupingSets ||
1665 parse->hasAggs || root->hasHavingQual);
1666 if (have_grouping)
1667 {
1668 scanjoin_target = make_group_input_target(root, final_target);
1669 scanjoin_target_parallel_safe =
1670 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1671 }
1672 else
1673 {
1674 scanjoin_target = grouping_target;
1675 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1676 }
1677
1678 /*
1679 * If there are any SRFs in the targetlist, we must separate each of
1680 * these PathTargets into SRF-computing and SRF-free targets. Replace
1681 * each of the named targets with a SRF-free version, and remember the
1682 * list of additional projection steps we need to add afterwards.
1683 */
1684 if (parse->hasTargetSRFs)
1685 {
1686 /* final_target doesn't recompute any SRFs in sort_input_target */
1687 split_pathtarget_at_srfs(root, final_target, sort_input_target,
1688 &final_targets,
1689 &final_targets_contain_srfs);
1690 final_target = linitial_node(PathTarget, final_targets);
1691 Assert(!linitial_int(final_targets_contain_srfs));
1692 /* likewise for sort_input_target vs. grouping_target */
1693 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1694 &sort_input_targets,
1695 &sort_input_targets_contain_srfs);
1696 sort_input_target = linitial_node(PathTarget, sort_input_targets);
1697 Assert(!linitial_int(sort_input_targets_contain_srfs));
1698 /* likewise for grouping_target vs. scanjoin_target */
1699 split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1700 &grouping_targets,
1701 &grouping_targets_contain_srfs);
1702 grouping_target = linitial_node(PathTarget, grouping_targets);
1703 Assert(!linitial_int(grouping_targets_contain_srfs));
1704 /* scanjoin_target will not have any SRFs precomputed for it */
1705 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1706 &scanjoin_targets,
1707 &scanjoin_targets_contain_srfs);
1708 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1709 Assert(!linitial_int(scanjoin_targets_contain_srfs));
1710 }
1711 else
1712 {
1713 /* initialize lists; for most of these, dummy values are OK */
1714 final_targets = final_targets_contain_srfs = NIL;
1715 sort_input_targets = sort_input_targets_contain_srfs = NIL;
1716 grouping_targets = grouping_targets_contain_srfs = NIL;
1717 scanjoin_targets = list_make1(scanjoin_target);
1718 scanjoin_targets_contain_srfs = NIL;
1719 }
1720
1721 /* Apply scan/join target. */
1722 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1723 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1724 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1725 scanjoin_targets_contain_srfs,
1726 scanjoin_target_parallel_safe,
1727 scanjoin_target_same_exprs);
1728
1729 /*
1730 * Save the various upper-rel PathTargets we just computed into
1731 * root->upper_targets[]. The core code doesn't use this, but it
1732 * provides a convenient place for extensions to get at the info. For
1733 * consistency, we save all the intermediate targets, even though some
1734 * of the corresponding upperrels might not be needed for this query.
1735 */
1736 root->upper_targets[UPPERREL_FINAL] = final_target;
1737 root->upper_targets[UPPERREL_ORDERED] = final_target;
1738 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1739 root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1740 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1741 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1742
1743 /*
1744 * If we have grouping and/or aggregation, consider ways to implement
1745 * that. We build a new upperrel representing the output of this
1746 * phase.
1747 */
1748 if (have_grouping)
1749 {
1750 current_rel = create_grouping_paths(root,
1751 current_rel,
1752 grouping_target,
1753 grouping_target_parallel_safe,
1754 gset_data);
1755 /* Fix things up if grouping_target contains SRFs */
1756 if (parse->hasTargetSRFs)
1757 adjust_paths_for_srfs(root, current_rel,
1758 grouping_targets,
1759 grouping_targets_contain_srfs);
1760 }
1761
1762 /*
1763 * If we have window functions, consider ways to implement those. We
1764 * build a new upperrel representing the output of this phase.
1765 */
1766 if (activeWindows)
1767 {
1768 current_rel = create_window_paths(root,
1769 current_rel,
1770 grouping_target,
1771 sort_input_target,
1772 sort_input_target_parallel_safe,
1773 wflists,
1774 activeWindows);
1775 /* Fix things up if sort_input_target contains SRFs */
1776 if (parse->hasTargetSRFs)
1777 adjust_paths_for_srfs(root, current_rel,
1778 sort_input_targets,
1779 sort_input_targets_contain_srfs);
1780 }
1781
1782 /*
1783 * If there is a DISTINCT clause, consider ways to implement that. We
1784 * build a new upperrel representing the output of this phase.
1785 */
1786 if (parse->distinctClause)
1787 {
1788 current_rel = create_distinct_paths(root,
1789 current_rel,
1790 sort_input_target);
1791 }
1792 } /* end of if (setOperations) */
1793
1794 /*
1795 * If ORDER BY was given, consider ways to implement that, and generate a
1796 * new upperrel containing only paths that emit the correct ordering and
1797 * project the correct final_target. We can apply the original
1798 * limit_tuples limit in sort costing here, but only if there are no
1799 * postponed SRFs.
1800 */
1801 if (parse->sortClause)
1802 {
1803 current_rel = create_ordered_paths(root,
1804 current_rel,
1805 final_target,
1806 final_target_parallel_safe,
1807 have_postponed_srfs ? -1.0 :
1808 limit_tuples);
1809 /* Fix things up if final_target contains SRFs */
1810 if (parse->hasTargetSRFs)
1811 adjust_paths_for_srfs(root, current_rel,
1812 final_targets,
1813 final_targets_contain_srfs);
1814 }
1815
1816 /*
1817 * Now we are prepared to build the final-output upperrel.
1818 */
1819 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1820
1821 /*
1822 * If the input rel is marked consider_parallel and there's nothing that's
1823 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1824 * consider_parallel as well. Note that if the query has rowMarks or is
1825 * not a SELECT, consider_parallel will be false for every relation in the
1826 * query.
1827 */
1828 if (current_rel->consider_parallel &&
1829 is_parallel_safe(root, parse->limitOffset) &&
1830 is_parallel_safe(root, parse->limitCount))
1831 final_rel->consider_parallel = true;
1832
1833 /*
1834 * If the current_rel belongs to a single FDW, so does the final_rel.
1835 */
1836 final_rel->serverid = current_rel->serverid;
1837 final_rel->userid = current_rel->userid;
1838 final_rel->useridiscurrent = current_rel->useridiscurrent;
1839 final_rel->fdwroutine = current_rel->fdwroutine;
1840
1841 /*
1842 * Generate paths for the final_rel. Insert all surviving paths, with
1843 * LockRows, Limit, and/or ModifyTable steps added if needed.
1844 */
1845 foreach(lc, current_rel->pathlist)
1846 {
1847 Path *path = (Path *) lfirst(lc);
1848
1849 /*
1850 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1851 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1852 * here. If there are only non-locking rowmarks, they should be
1853 * handled by the ModifyTable node instead. However, root->rowMarks
1854 * is what goes into the LockRows node.)
1855 */
1856 if (parse->rowMarks)
1857 {
1858 path = (Path *) create_lockrows_path(root, final_rel, path,
1859 root->rowMarks,
1861 }
1862
1863 /*
1864 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1865 */
1866 if (limit_needed(parse))
1867 {
1868 path = (Path *) create_limit_path(root, final_rel, path,
1869 parse->limitOffset,
1870 parse->limitCount,
1871 parse->limitOption,
1872 offset_est, count_est);
1873 }
1874
1875 /*
1876 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1877 */
1878 if (parse->commandType != CMD_SELECT)
1879 {
1880 Index rootRelation;
1881 List *resultRelations = NIL;
1882 List *updateColnosLists = NIL;
1883 List *withCheckOptionLists = NIL;
1884 List *returningLists = NIL;
1885 List *mergeActionLists = NIL;
1886 List *mergeJoinConditions = NIL;
1887 List *rowMarks;
1888
1889 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1890 {
1891 /* Inherited UPDATE/DELETE/MERGE */
1892 RelOptInfo *top_result_rel = find_base_rel(root,
1893 parse->resultRelation);
1894 int resultRelation = -1;
1895
1896 /* Pass the root result rel forward to the executor. */
1897 rootRelation = parse->resultRelation;
1898
1899 /* Add only leaf children to ModifyTable. */
1900 while ((resultRelation = bms_next_member(root->leaf_result_relids,
1901 resultRelation)) >= 0)
1902 {
1903 RelOptInfo *this_result_rel = find_base_rel(root,
1904 resultRelation);
1905
1906 /*
1907 * Also exclude any leaf rels that have turned dummy since
1908 * being added to the list, for example, by being excluded
1909 * by constraint exclusion.
1910 */
1911 if (IS_DUMMY_REL(this_result_rel))
1912 continue;
1913
1914 /* Build per-target-rel lists needed by ModifyTable */
1915 resultRelations = lappend_int(resultRelations,
1916 resultRelation);
1917 if (parse->commandType == CMD_UPDATE)
1918 {
1919 List *update_colnos = root->update_colnos;
1920
1921 if (this_result_rel != top_result_rel)
1922 update_colnos =
1924 update_colnos,
1925 this_result_rel->relid,
1926 top_result_rel->relid);
1927 updateColnosLists = lappend(updateColnosLists,
1928 update_colnos);
1929 }
1930 if (parse->withCheckOptions)
1931 {
1932 List *withCheckOptions = parse->withCheckOptions;
1933
1934 if (this_result_rel != top_result_rel)
1935 withCheckOptions = (List *)
1937 (Node *) withCheckOptions,
1938 this_result_rel,
1939 top_result_rel);
1940 withCheckOptionLists = lappend(withCheckOptionLists,
1941 withCheckOptions);
1942 }
1943 if (parse->returningList)
1944 {
1945 List *returningList = parse->returningList;
1946
1947 if (this_result_rel != top_result_rel)
1948 returningList = (List *)
1950 (Node *) returningList,
1951 this_result_rel,
1952 top_result_rel);
1953 returningLists = lappend(returningLists,
1954 returningList);
1955 }
1956 if (parse->mergeActionList)
1957 {
1958 ListCell *l;
1959 List *mergeActionList = NIL;
1960
1961 /*
1962 * Copy MergeActions and translate stuff that
1963 * references attribute numbers.
1964 */
1965 foreach(l, parse->mergeActionList)
1966 {
1968 *leaf_action = copyObject(action);
1969
1970 leaf_action->qual =
1972 (Node *) action->qual,
1973 this_result_rel,
1974 top_result_rel);
1975 leaf_action->targetList = (List *)
1977 (Node *) action->targetList,
1978 this_result_rel,
1979 top_result_rel);
1980 if (leaf_action->commandType == CMD_UPDATE)
1981 leaf_action->updateColnos =
1983 action->updateColnos,
1984 this_result_rel->relid,
1985 top_result_rel->relid);
1986 mergeActionList = lappend(mergeActionList,
1987 leaf_action);
1988 }
1989
1990 mergeActionLists = lappend(mergeActionLists,
1991 mergeActionList);
1992 }
1993 if (parse->commandType == CMD_MERGE)
1994 {
1995 Node *mergeJoinCondition = parse->mergeJoinCondition;
1996
1997 if (this_result_rel != top_result_rel)
1998 mergeJoinCondition =
2000 mergeJoinCondition,
2001 this_result_rel,
2002 top_result_rel);
2003 mergeJoinConditions = lappend(mergeJoinConditions,
2004 mergeJoinCondition);
2005 }
2006 }
2007
2008 if (resultRelations == NIL)
2009 {
2010 /*
2011 * We managed to exclude every child rel, so generate a
2012 * dummy one-relation plan using info for the top target
2013 * rel (even though that may not be a leaf target).
2014 * Although it's clear that no data will be updated or
2015 * deleted, we still need to have a ModifyTable node so
2016 * that any statement triggers will be executed. (This
2017 * could be cleaner if we fixed nodeModifyTable.c to allow
2018 * zero target relations, but that probably wouldn't be a
2019 * net win.)
2020 */
2021 resultRelations = list_make1_int(parse->resultRelation);
2022 if (parse->commandType == CMD_UPDATE)
2023 updateColnosLists = list_make1(root->update_colnos);
2024 if (parse->withCheckOptions)
2025 withCheckOptionLists = list_make1(parse->withCheckOptions);
2026 if (parse->returningList)
2027 returningLists = list_make1(parse->returningList);
2028 if (parse->mergeActionList)
2029 mergeActionLists = list_make1(parse->mergeActionList);
2030 if (parse->commandType == CMD_MERGE)
2031 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2032 }
2033 }
2034 else
2035 {
2036 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2037 rootRelation = 0; /* there's no separate root rel */
2038 resultRelations = list_make1_int(parse->resultRelation);
2039 if (parse->commandType == CMD_UPDATE)
2040 updateColnosLists = list_make1(root->update_colnos);
2041 if (parse->withCheckOptions)
2042 withCheckOptionLists = list_make1(parse->withCheckOptions);
2043 if (parse->returningList)
2044 returningLists = list_make1(parse->returningList);
2045 if (parse->mergeActionList)
2046 mergeActionLists = list_make1(parse->mergeActionList);
2047 if (parse->commandType == CMD_MERGE)
2048 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2049 }
2050
2051 /*
2052 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2053 * will have dealt with fetching non-locked marked rows, else we
2054 * need to have ModifyTable do that.
2055 */
2056 if (parse->rowMarks)
2057 rowMarks = NIL;
2058 else
2059 rowMarks = root->rowMarks;
2060
2061 path = (Path *)
2062 create_modifytable_path(root, final_rel,
2063 path,
2064 parse->commandType,
2065 parse->canSetTag,
2066 parse->resultRelation,
2067 rootRelation,
2068 root->partColsUpdated,
2069 resultRelations,
2070 updateColnosLists,
2071 withCheckOptionLists,
2072 returningLists,
2073 rowMarks,
2074 parse->onConflict,
2075 mergeActionLists,
2076 mergeJoinConditions,
2078 }
2079
2080 /* And shove it into final_rel */
2081 add_path(final_rel, path);
2082 }
2083
2084 /*
2085 * Generate partial paths for final_rel, too, if outer query levels might
2086 * be able to make use of them.
2087 */
2088 if (final_rel->consider_parallel && root->query_level > 1 &&
2090 {
2091 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2092 foreach(lc, current_rel->partial_pathlist)
2093 {
2094 Path *partial_path = (Path *) lfirst(lc);
2095
2096 add_partial_path(final_rel, partial_path);
2097 }
2098 }
2099
2101 extra.limit_tuples = limit_tuples;
2102 extra.count_est = count_est;
2103 extra.offset_est = offset_est;
2104
2105 /*
2106 * If there is an FDW that's responsible for all baserels of the query,
2107 * let it consider adding ForeignPaths.
2108 */
2109 if (final_rel->fdwroutine &&
2110 final_rel->fdwroutine->GetForeignUpperPaths)
2111 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2112 current_rel, final_rel,
2113 &extra);
2114
2115 /* Let extensions possibly add some more paths */
2117 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2118 current_rel, final_rel, &extra);
2119
2120 /* Note: currently, we leave it to callers to do set_cheapest() */
2121}
2122
2123/*
2124 * Do preprocessing for groupingSets clause and related data. This handles the
2125 * preliminary steps of expanding the grouping sets, organizing them into lists
2126 * of rollups, and preparing annotations which will later be filled in with
2127 * size estimates.
2128 */
2129static grouping_sets_data *
2131{
2132 Query *parse = root->parse;
2133 List *sets;
2134 int maxref = 0;
2135 ListCell *lc_set;
2137
2138 parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2139
2140 gd->any_hashable = false;
2141 gd->unhashable_refs = NULL;
2142 gd->unsortable_refs = NULL;
2143 gd->unsortable_sets = NIL;
2144
2145 /*
2146 * We don't currently make any attempt to optimize the groupClause when
2147 * there are grouping sets, so just duplicate it in processed_groupClause.
2148 */
2149 root->processed_groupClause = parse->groupClause;
2150
2151 if (parse->groupClause)
2152 {
2153 ListCell *lc;
2154
2155 foreach(lc, parse->groupClause)
2156 {
2158 Index ref = gc->tleSortGroupRef;
2159
2160 if (ref > maxref)
2161 maxref = ref;
2162
2163 if (!gc->hashable)
2165
2166 if (!OidIsValid(gc->sortop))
2168 }
2169 }
2170
2171 /* Allocate workspace array for remapping */
2172 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2173
2174 /*
2175 * If we have any unsortable sets, we must extract them before trying to
2176 * prepare rollups. Unsortable sets don't go through
2177 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2178 * here.
2179 */
2180 if (!bms_is_empty(gd->unsortable_refs))
2181 {
2182 List *sortable_sets = NIL;
2183 ListCell *lc;
2184
2185 foreach(lc, parse->groupingSets)
2186 {
2187 List *gset = (List *) lfirst(lc);
2188
2189 if (bms_overlap_list(gd->unsortable_refs, gset))
2190 {
2192
2193 gs->set = gset;
2195
2196 /*
2197 * We must enforce here that an unsortable set is hashable;
2198 * later code assumes this. Parse analysis only checks that
2199 * every individual column is either hashable or sortable.
2200 *
2201 * Note that passing this test doesn't guarantee we can
2202 * generate a plan; there might be other showstoppers.
2203 */
2204 if (bms_overlap_list(gd->unhashable_refs, gset))
2205 ereport(ERROR,
2206 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2207 errmsg("could not implement GROUP BY"),
2208 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2209 }
2210 else
2211 sortable_sets = lappend(sortable_sets, gset);
2212 }
2213
2214 if (sortable_sets)
2215 sets = extract_rollup_sets(sortable_sets);
2216 else
2217 sets = NIL;
2218 }
2219 else
2220 sets = extract_rollup_sets(parse->groupingSets);
2221
2222 foreach(lc_set, sets)
2223 {
2224 List *current_sets = (List *) lfirst(lc_set);
2225 RollupData *rollup = makeNode(RollupData);
2226 GroupingSetData *gs;
2227
2228 /*
2229 * Reorder the current list of grouping sets into correct prefix
2230 * order. If only one aggregation pass is needed, try to make the
2231 * list match the ORDER BY clause; if more than one pass is needed, we
2232 * don't bother with that.
2233 *
2234 * Note that this reorders the sets from smallest-member-first to
2235 * largest-member-first, and applies the GroupingSetData annotations,
2236 * though the data will be filled in later.
2237 */
2238 current_sets = reorder_grouping_sets(current_sets,
2239 (list_length(sets) == 1
2240 ? parse->sortClause
2241 : NIL));
2242
2243 /*
2244 * Get the initial (and therefore largest) grouping set.
2245 */
2246 gs = linitial_node(GroupingSetData, current_sets);
2247
2248 /*
2249 * Order the groupClause appropriately. If the first grouping set is
2250 * empty, then the groupClause must also be empty; otherwise we have
2251 * to force the groupClause to match that grouping set's order.
2252 *
2253 * (The first grouping set can be empty even though parse->groupClause
2254 * is not empty only if all non-empty grouping sets are unsortable.
2255 * The groupClauses for hashed grouping sets are built later on.)
2256 */
2257 if (gs->set)
2259 else
2260 rollup->groupClause = NIL;
2261
2262 /*
2263 * Is it hashable? We pretend empty sets are hashable even though we
2264 * actually force them not to be hashed later. But don't bother if
2265 * there's nothing but empty sets (since in that case we can't hash
2266 * anything).
2267 */
2268 if (gs->set &&
2270 {
2271 rollup->hashable = true;
2272 gd->any_hashable = true;
2273 }
2274
2275 /*
2276 * Now that we've pinned down an order for the groupClause for this
2277 * list of grouping sets, we need to remap the entries in the grouping
2278 * sets from sortgrouprefs to plain indices (0-based) into the
2279 * groupClause for this collection of grouping sets. We keep the
2280 * original form for later use, though.
2281 */
2282 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2283 current_sets,
2285 rollup->gsets_data = current_sets;
2286
2287 gd->rollups = lappend(gd->rollups, rollup);
2288 }
2289
2290 if (gd->unsortable_sets)
2291 {
2292 /*
2293 * We have not yet pinned down a groupclause for this, but we will
2294 * need index-based lists for estimation purposes. Construct
2295 * hash_sets_idx based on the entire original groupclause for now.
2296 */
2297 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2298 gd->unsortable_sets,
2300 gd->any_hashable = true;
2301 }
2302
2303 return gd;
2304}
2305
2306/*
2307 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2308 * (without annotation) mapped to indexes into the given groupclause.
2309 */
2310static List *
2312 List *gsets,
2313 int *tleref_to_colnum_map)
2314{
2315 int ref = 0;
2316 List *result = NIL;
2317 ListCell *lc;
2318
2319 foreach(lc, groupClause)
2320 {
2322
2323 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2324 }
2325
2326 foreach(lc, gsets)
2327 {
2328 List *set = NIL;
2329 ListCell *lc2;
2331
2332 foreach(lc2, gs->set)
2333 {
2334 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2335 }
2336
2337 result = lappend(result, set);
2338 }
2339
2340 return result;
2341}
2342
2343
2344/*
2345 * preprocess_rowmarks - set up PlanRowMarks if needed
2346 */
2347static void
2349{
2350 Query *parse = root->parse;
2351 Bitmapset *rels;
2352 List *prowmarks;
2353 ListCell *l;
2354 int i;
2355
2356 if (parse->rowMarks)
2357 {
2358 /*
2359 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2360 * grouping, since grouping renders a reference to individual tuple
2361 * CTIDs invalid. This is also checked at parse time, but that's
2362 * insufficient because of rule substitution, query pullup, etc.
2363 */
2365 parse->rowMarks)->strength);
2366 }
2367 else
2368 {
2369 /*
2370 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2371 * UPDATE/SHARE.
2372 */
2373 if (parse->commandType != CMD_UPDATE &&
2374 parse->commandType != CMD_DELETE &&
2375 parse->commandType != CMD_MERGE)
2376 return;
2377 }
2378
2379 /*
2380 * We need to have rowmarks for all base relations except the target. We
2381 * make a bitmapset of all base rels and then remove the items we don't
2382 * need or have FOR [KEY] UPDATE/SHARE marks for.
2383 */
2384 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2385 if (parse->resultRelation)
2386 rels = bms_del_member(rels, parse->resultRelation);
2387
2388 /*
2389 * Convert RowMarkClauses to PlanRowMark representation.
2390 */
2391 prowmarks = NIL;
2392 foreach(l, parse->rowMarks)
2393 {
2395 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2396 PlanRowMark *newrc;
2397
2398 /*
2399 * Currently, it is syntactically impossible to have FOR UPDATE et al
2400 * applied to an update/delete target rel. If that ever becomes
2401 * possible, we should drop the target from the PlanRowMark list.
2402 */
2403 Assert(rc->rti != parse->resultRelation);
2404
2405 /*
2406 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2407 * can't support true locking. Subqueries that got flattened into the
2408 * main query should be ignored completely. Any that didn't will get
2409 * ROW_MARK_COPY items in the next loop.
2410 */
2411 if (rte->rtekind != RTE_RELATION)
2412 continue;
2413
2414 rels = bms_del_member(rels, rc->rti);
2415
2416 newrc = makeNode(PlanRowMark);
2417 newrc->rti = newrc->prti = rc->rti;
2418 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2419 newrc->markType = select_rowmark_type(rte, rc->strength);
2420 newrc->allMarkTypes = (1 << newrc->markType);
2421 newrc->strength = rc->strength;
2422 newrc->waitPolicy = rc->waitPolicy;
2423 newrc->isParent = false;
2424
2425 prowmarks = lappend(prowmarks, newrc);
2426 }
2427
2428 /*
2429 * Now, add rowmarks for any non-target, non-locked base relations.
2430 */
2431 i = 0;
2432 foreach(l, parse->rtable)
2433 {
2435 PlanRowMark *newrc;
2436
2437 i++;
2438 if (!bms_is_member(i, rels))
2439 continue;
2440
2441 newrc = makeNode(PlanRowMark);
2442 newrc->rti = newrc->prti = i;
2443 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2444 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2445 newrc->allMarkTypes = (1 << newrc->markType);
2446 newrc->strength = LCS_NONE;
2447 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2448 newrc->isParent = false;
2449
2450 prowmarks = lappend(prowmarks, newrc);
2451 }
2452
2453 root->rowMarks = prowmarks;
2454}
2455
2456/*
2457 * Select RowMarkType to use for a given table
2458 */
2461{
2462 if (rte->rtekind != RTE_RELATION)
2463 {
2464 /* If it's not a table at all, use ROW_MARK_COPY */
2465 return ROW_MARK_COPY;
2466 }
2467 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2468 {
2469 /* Let the FDW select the rowmark type, if it wants to */
2470 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2471
2472 if (fdwroutine->GetForeignRowMarkType != NULL)
2473 return fdwroutine->GetForeignRowMarkType(rte, strength);
2474 /* Otherwise, use ROW_MARK_COPY by default */
2475 return ROW_MARK_COPY;
2476 }
2477 else
2478 {
2479 /* Regular table, apply the appropriate lock type */
2480 switch (strength)
2481 {
2482 case LCS_NONE:
2483
2484 /*
2485 * We don't need a tuple lock, only the ability to re-fetch
2486 * the row.
2487 */
2488 return ROW_MARK_REFERENCE;
2489 break;
2490 case LCS_FORKEYSHARE:
2491 return ROW_MARK_KEYSHARE;
2492 break;
2493 case LCS_FORSHARE:
2494 return ROW_MARK_SHARE;
2495 break;
2496 case LCS_FORNOKEYUPDATE:
2498 break;
2499 case LCS_FORUPDATE:
2500 return ROW_MARK_EXCLUSIVE;
2501 break;
2502 }
2503 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2504 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2505 }
2506}
2507
2508/*
2509 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2510 *
2511 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2512 * results back in *count_est and *offset_est. These variables are set to
2513 * 0 if the corresponding clause is not present, and -1 if it's present
2514 * but we couldn't estimate the value for it. (The "0" convention is OK
2515 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2516 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2517 * usual practice of never estimating less than one row.) These values will
2518 * be passed to create_limit_path, which see if you change this code.
2519 *
2520 * The return value is the suitably adjusted tuple_fraction to use for
2521 * planning the query. This adjustment is not overridable, since it reflects
2522 * plan actions that grouping_planner() will certainly take, not assumptions
2523 * about context.
2524 */
2525static double
2526preprocess_limit(PlannerInfo *root, double tuple_fraction,
2527 int64 *offset_est, int64 *count_est)
2528{
2529 Query *parse = root->parse;
2530 Node *est;
2531 double limit_fraction;
2532
2533 /* Should not be called unless LIMIT or OFFSET */
2534 Assert(parse->limitCount || parse->limitOffset);
2535
2536 /*
2537 * Try to obtain the clause values. We use estimate_expression_value
2538 * primarily because it can sometimes do something useful with Params.
2539 */
2540 if (parse->limitCount)
2541 {
2542 est = estimate_expression_value(root, parse->limitCount);
2543 if (est && IsA(est, Const))
2544 {
2545 if (((Const *) est)->constisnull)
2546 {
2547 /* NULL indicates LIMIT ALL, ie, no limit */
2548 *count_est = 0; /* treat as not present */
2549 }
2550 else
2551 {
2552 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2553 if (*count_est <= 0)
2554 *count_est = 1; /* force to at least 1 */
2555 }
2556 }
2557 else
2558 *count_est = -1; /* can't estimate */
2559 }
2560 else
2561 *count_est = 0; /* not present */
2562
2563 if (parse->limitOffset)
2564 {
2565 est = estimate_expression_value(root, parse->limitOffset);
2566 if (est && IsA(est, Const))
2567 {
2568 if (((Const *) est)->constisnull)
2569 {
2570 /* Treat NULL as no offset; the executor will too */
2571 *offset_est = 0; /* treat as not present */
2572 }
2573 else
2574 {
2575 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2576 if (*offset_est < 0)
2577 *offset_est = 0; /* treat as not present */
2578 }
2579 }
2580 else
2581 *offset_est = -1; /* can't estimate */
2582 }
2583 else
2584 *offset_est = 0; /* not present */
2585
2586 if (*count_est != 0)
2587 {
2588 /*
2589 * A LIMIT clause limits the absolute number of tuples returned.
2590 * However, if it's not a constant LIMIT then we have to guess; for
2591 * lack of a better idea, assume 10% of the plan's result is wanted.
2592 */
2593 if (*count_est < 0 || *offset_est < 0)
2594 {
2595 /* LIMIT or OFFSET is an expression ... punt ... */
2596 limit_fraction = 0.10;
2597 }
2598 else
2599 {
2600 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2601 limit_fraction = (double) *count_est + (double) *offset_est;
2602 }
2603
2604 /*
2605 * If we have absolute limits from both caller and LIMIT, use the
2606 * smaller value; likewise if they are both fractional. If one is
2607 * fractional and the other absolute, we can't easily determine which
2608 * is smaller, but we use the heuristic that the absolute will usually
2609 * be smaller.
2610 */
2611 if (tuple_fraction >= 1.0)
2612 {
2613 if (limit_fraction >= 1.0)
2614 {
2615 /* both absolute */
2616 tuple_fraction = Min(tuple_fraction, limit_fraction);
2617 }
2618 else
2619 {
2620 /* caller absolute, limit fractional; use caller's value */
2621 }
2622 }
2623 else if (tuple_fraction > 0.0)
2624 {
2625 if (limit_fraction >= 1.0)
2626 {
2627 /* caller fractional, limit absolute; use limit */
2628 tuple_fraction = limit_fraction;
2629 }
2630 else
2631 {
2632 /* both fractional */
2633 tuple_fraction = Min(tuple_fraction, limit_fraction);
2634 }
2635 }
2636 else
2637 {
2638 /* no info from caller, just use limit */
2639 tuple_fraction = limit_fraction;
2640 }
2641 }
2642 else if (*offset_est != 0 && tuple_fraction > 0.0)
2643 {
2644 /*
2645 * We have an OFFSET but no LIMIT. This acts entirely differently
2646 * from the LIMIT case: here, we need to increase rather than decrease
2647 * the caller's tuple_fraction, because the OFFSET acts to cause more
2648 * tuples to be fetched instead of fewer. This only matters if we got
2649 * a tuple_fraction > 0, however.
2650 *
2651 * As above, use 10% if OFFSET is present but unestimatable.
2652 */
2653 if (*offset_est < 0)
2654 limit_fraction = 0.10;
2655 else
2656 limit_fraction = (double) *offset_est;
2657
2658 /*
2659 * If we have absolute counts from both caller and OFFSET, add them
2660 * together; likewise if they are both fractional. If one is
2661 * fractional and the other absolute, we want to take the larger, and
2662 * we heuristically assume that's the fractional one.
2663 */
2664 if (tuple_fraction >= 1.0)
2665 {
2666 if (limit_fraction >= 1.0)
2667 {
2668 /* both absolute, so add them together */
2669 tuple_fraction += limit_fraction;
2670 }
2671 else
2672 {
2673 /* caller absolute, limit fractional; use limit */
2674 tuple_fraction = limit_fraction;
2675 }
2676 }
2677 else
2678 {
2679 if (limit_fraction >= 1.0)
2680 {
2681 /* caller fractional, limit absolute; use caller's value */
2682 }
2683 else
2684 {
2685 /* both fractional, so add them together */
2686 tuple_fraction += limit_fraction;
2687 if (tuple_fraction >= 1.0)
2688 tuple_fraction = 0.0; /* assume fetch all */
2689 }
2690 }
2691 }
2692
2693 return tuple_fraction;
2694}
2695
2696/*
2697 * limit_needed - do we actually need a Limit plan node?
2698 *
2699 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2700 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2701 * locution for an optimization fence. (Because other places in the planner
2702 * merely check whether parse->limitOffset isn't NULL, it will still work as
2703 * an optimization fence --- we're just suppressing unnecessary run-time
2704 * overhead.)
2705 *
2706 * This might look like it could be merged into preprocess_limit, but there's
2707 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2708 * in preprocess_limit it's good enough to consider estimated values.
2709 */
2710bool
2712{
2713 Node *node;
2714
2715 node = parse->limitCount;
2716 if (node)
2717 {
2718 if (IsA(node, Const))
2719 {
2720 /* NULL indicates LIMIT ALL, ie, no limit */
2721 if (!((Const *) node)->constisnull)
2722 return true; /* LIMIT with a constant value */
2723 }
2724 else
2725 return true; /* non-constant LIMIT */
2726 }
2727
2728 node = parse->limitOffset;
2729 if (node)
2730 {
2731 if (IsA(node, Const))
2732 {
2733 /* Treat NULL as no offset; the executor would too */
2734 if (!((Const *) node)->constisnull)
2735 {
2736 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2737
2738 if (offset != 0)
2739 return true; /* OFFSET with a nonzero value */
2740 }
2741 }
2742 else
2743 return true; /* non-constant OFFSET */
2744 }
2745
2746 return false; /* don't need a Limit plan node */
2747}
2748
2749/*
2750 * preprocess_groupclause - do preparatory work on GROUP BY clause
2751 *
2752 * The idea here is to adjust the ordering of the GROUP BY elements
2753 * (which in itself is semantically insignificant) to match ORDER BY,
2754 * thereby allowing a single sort operation to both implement the ORDER BY
2755 * requirement and set up for a Unique step that implements GROUP BY.
2756 * We also consider partial match between GROUP BY and ORDER BY elements,
2757 * which could allow to implement ORDER BY using the incremental sort.
2758 *
2759 * We also consider other orderings of the GROUP BY elements, which could
2760 * match the sort ordering of other possible plans (eg an indexscan) and
2761 * thereby reduce cost. This is implemented during the generation of grouping
2762 * paths. See get_useful_group_keys_orderings() for details.
2763 *
2764 * Note: we need no comparable processing of the distinctClause because
2765 * the parser already enforced that that matches ORDER BY.
2766 *
2767 * Note: we return a fresh List, but its elements are the same
2768 * SortGroupClauses appearing in parse->groupClause. This is important
2769 * because later processing may modify the processed_groupClause list.
2770 *
2771 * For grouping sets, the order of items is instead forced to agree with that
2772 * of the grouping set (and items not in the grouping set are skipped). The
2773 * work of sorting the order of grouping set elements to match the ORDER BY if
2774 * possible is done elsewhere.
2775 */
2776static List *
2778{
2779 Query *parse = root->parse;
2780 List *new_groupclause = NIL;
2781 ListCell *sl;
2782 ListCell *gl;
2783
2784 /* For grouping sets, we need to force the ordering */
2785 if (force)
2786 {
2787 foreach(sl, force)
2788 {
2789 Index ref = lfirst_int(sl);
2790 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2791
2792 new_groupclause = lappend(new_groupclause, cl);
2793 }
2794
2795 return new_groupclause;
2796 }
2797
2798 /* If no ORDER BY, nothing useful to do here */
2799 if (parse->sortClause == NIL)
2800 return list_copy(parse->groupClause);
2801
2802 /*
2803 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2804 * items, but only as far as we can make a matching prefix.
2805 *
2806 * This code assumes that the sortClause contains no duplicate items.
2807 */
2808 foreach(sl, parse->sortClause)
2809 {
2811
2812 foreach(gl, parse->groupClause)
2813 {
2815
2816 if (equal(gc, sc))
2817 {
2818 new_groupclause = lappend(new_groupclause, gc);
2819 break;
2820 }
2821 }
2822 if (gl == NULL)
2823 break; /* no match, so stop scanning */
2824 }
2825
2826
2827 /* If no match at all, no point in reordering GROUP BY */
2828 if (new_groupclause == NIL)
2829 return list_copy(parse->groupClause);
2830
2831 /*
2832 * Add any remaining GROUP BY items to the new list. We don't require a
2833 * complete match, because even partial match allows ORDER BY to be
2834 * implemented using incremental sort. Also, give up if there are any
2835 * non-sortable GROUP BY items, since then there's no hope anyway.
2836 */
2837 foreach(gl, parse->groupClause)
2838 {
2840
2841 if (list_member_ptr(new_groupclause, gc))
2842 continue; /* it matched an ORDER BY item */
2843 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2844 return list_copy(parse->groupClause);
2845 new_groupclause = lappend(new_groupclause, gc);
2846 }
2847
2848 /* Success --- install the rearranged GROUP BY list */
2849 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2850 return new_groupclause;
2851}
2852
2853/*
2854 * Extract lists of grouping sets that can be implemented using a single
2855 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2856 *
2857 * Input must be sorted with smallest sets first. Result has each sublist
2858 * sorted with smallest sets first.
2859 *
2860 * We want to produce the absolute minimum possible number of lists here to
2861 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2862 * of finding the minimal partition of a partially-ordered set into chains
2863 * (which is what we need, taking the list of grouping sets as a poset ordered
2864 * by set inclusion) can be mapped to the problem of finding the maximum
2865 * cardinality matching on a bipartite graph, which is solvable in polynomial
2866 * time with a worst case of no worse than O(n^2.5) and usually much
2867 * better. Since our N is at most 4096, we don't need to consider fallbacks to
2868 * heuristic or approximate methods. (Planning time for a 12-d cube is under
2869 * half a second on my modest system even with optimization off and assertions
2870 * on.)
2871 */
2872static List *
2874{
2875 int num_sets_raw = list_length(groupingSets);
2876 int num_empty = 0;
2877 int num_sets = 0; /* distinct sets */
2878 int num_chains = 0;
2879 List *result = NIL;
2880 List **results;
2881 List **orig_sets;
2882 Bitmapset **set_masks;
2883 int *chains;
2884 short **adjacency;
2885 short *adjacency_buf;
2887 int i;
2888 int j;
2889 int j_size;
2890 ListCell *lc1 = list_head(groupingSets);
2891 ListCell *lc;
2892
2893 /*
2894 * Start by stripping out empty sets. The algorithm doesn't require this,
2895 * but the planner currently needs all empty sets to be returned in the
2896 * first list, so we strip them here and add them back after.
2897 */
2898 while (lc1 && lfirst(lc1) == NIL)
2899 {
2900 ++num_empty;
2901 lc1 = lnext(groupingSets, lc1);
2902 }
2903
2904 /* bail out now if it turns out that all we had were empty sets. */
2905 if (!lc1)
2906 return list_make1(groupingSets);
2907
2908 /*----------
2909 * We don't strictly need to remove duplicate sets here, but if we don't,
2910 * they tend to become scattered through the result, which is a bit
2911 * confusing (and irritating if we ever decide to optimize them out).
2912 * So we remove them here and add them back after.
2913 *
2914 * For each non-duplicate set, we fill in the following:
2915 *
2916 * orig_sets[i] = list of the original set lists
2917 * set_masks[i] = bitmapset for testing inclusion
2918 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2919 *
2920 * chains[i] will be the result group this set is assigned to.
2921 *
2922 * We index all of these from 1 rather than 0 because it is convenient
2923 * to leave 0 free for the NIL node in the graph algorithm.
2924 *----------
2925 */
2926 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2927 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2928 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2929 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2930
2931 j_size = 0;
2932 j = 0;
2933 i = 1;
2934
2935 for_each_cell(lc, groupingSets, lc1)
2936 {
2937 List *candidate = (List *) lfirst(lc);
2938 Bitmapset *candidate_set = NULL;
2939 ListCell *lc2;
2940 int dup_of = 0;
2941
2942 foreach(lc2, candidate)
2943 {
2944 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2945 }
2946
2947 /* we can only be a dup if we're the same length as a previous set */
2948 if (j_size == list_length(candidate))
2949 {
2950 int k;
2951
2952 for (k = j; k < i; ++k)
2953 {
2954 if (bms_equal(set_masks[k], candidate_set))
2955 {
2956 dup_of = k;
2957 break;
2958 }
2959 }
2960 }
2961 else if (j_size < list_length(candidate))
2962 {
2963 j_size = list_length(candidate);
2964 j = i;
2965 }
2966
2967 if (dup_of > 0)
2968 {
2969 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2970 bms_free(candidate_set);
2971 }
2972 else
2973 {
2974 int k;
2975 int n_adj = 0;
2976
2977 orig_sets[i] = list_make1(candidate);
2978 set_masks[i] = candidate_set;
2979
2980 /* fill in adjacency list; no need to compare equal-size sets */
2981
2982 for (k = j - 1; k > 0; --k)
2983 {
2984 if (bms_is_subset(set_masks[k], candidate_set))
2985 adjacency_buf[++n_adj] = k;
2986 }
2987
2988 if (n_adj > 0)
2989 {
2990 adjacency_buf[0] = n_adj;
2991 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2992 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2993 }
2994 else
2995 adjacency[i] = NULL;
2996
2997 ++i;
2998 }
2999 }
3000
3001 num_sets = i - 1;
3002
3003 /*
3004 * Apply the graph matching algorithm to do the work.
3005 */
3006 state = BipartiteMatch(num_sets, num_sets, adjacency);
3007
3008 /*
3009 * Now, the state->pair* fields have the info we need to assign sets to
3010 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3011 * pair_vu[v] = u (both will be true, but we check both so that we can do
3012 * it in one pass)
3013 */
3014 chains = palloc0((num_sets + 1) * sizeof(int));
3015
3016 for (i = 1; i <= num_sets; ++i)
3017 {
3018 int u = state->pair_vu[i];
3019 int v = state->pair_uv[i];
3020
3021 if (u > 0 && u < i)
3022 chains[i] = chains[u];
3023 else if (v > 0 && v < i)
3024 chains[i] = chains[v];
3025 else
3026 chains[i] = ++num_chains;
3027 }
3028
3029 /* build result lists. */
3030 results = palloc0((num_chains + 1) * sizeof(List *));
3031
3032 for (i = 1; i <= num_sets; ++i)
3033 {
3034 int c = chains[i];
3035
3036 Assert(c > 0);
3037
3038 results[c] = list_concat(results[c], orig_sets[i]);
3039 }
3040
3041 /* push any empty sets back on the first list. */
3042 while (num_empty-- > 0)
3043 results[1] = lcons(NIL, results[1]);
3044
3045 /* make result list */
3046 for (i = 1; i <= num_chains; ++i)
3047 result = lappend(result, results[i]);
3048
3049 /*
3050 * Free all the things.
3051 *
3052 * (This is over-fussy for small sets but for large sets we could have
3053 * tied up a nontrivial amount of memory.)
3054 */
3056 pfree(results);
3057 pfree(chains);
3058 for (i = 1; i <= num_sets; ++i)
3059 if (adjacency[i])
3060 pfree(adjacency[i]);
3061 pfree(adjacency);
3062 pfree(adjacency_buf);
3063 pfree(orig_sets);
3064 for (i = 1; i <= num_sets; ++i)
3065 bms_free(set_masks[i]);
3066 pfree(set_masks);
3067
3068 return result;
3069}
3070
3071/*
3072 * Reorder the elements of a list of grouping sets such that they have correct
3073 * prefix relationships. Also inserts the GroupingSetData annotations.
3074 *
3075 * The input must be ordered with smallest sets first; the result is returned
3076 * with largest sets first. Note that the result shares no list substructure
3077 * with the input, so it's safe for the caller to modify it later.
3078 *
3079 * If we're passed in a sortclause, we follow its order of columns to the
3080 * extent possible, to minimize the chance that we add unnecessary sorts.
3081 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3082 * gets implemented in one pass.)
3083 */
3084static List *
3085reorder_grouping_sets(List *groupingSets, List *sortclause)
3086{
3087 ListCell *lc;
3088 List *previous = NIL;
3089 List *result = NIL;
3090
3091 foreach(lc, groupingSets)
3092 {
3093 List *candidate = (List *) lfirst(lc);
3094 List *new_elems = list_difference_int(candidate, previous);
3096
3097 while (list_length(sortclause) > list_length(previous) &&
3098 new_elems != NIL)
3099 {
3100 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3101 int ref = sc->tleSortGroupRef;
3102
3103 if (list_member_int(new_elems, ref))
3104 {
3105 previous = lappend_int(previous, ref);
3106 new_elems = list_delete_int(new_elems, ref);
3107 }
3108 else
3109 {
3110 /* diverged from the sortclause; give up on it */
3111 sortclause = NIL;
3112 break;
3113 }
3114 }
3115
3116 previous = list_concat(previous, new_elems);
3117
3118 gs->set = list_copy(previous);
3119 result = lcons(gs, result);
3120 }
3121
3122 list_free(previous);
3123
3124 return result;
3125}
3126
3127/*
3128 * has_volatile_pathkey
3129 * Returns true if any PathKey in 'keys' has an EquivalenceClass
3130 * containing a volatile function. Otherwise returns false.
3131 */
3132static bool
3134{
3135 ListCell *lc;
3136
3137 foreach(lc, keys)
3138 {
3139 PathKey *pathkey = lfirst_node(PathKey, lc);
3140
3141 if (pathkey->pk_eclass->ec_has_volatile)
3142 return true;
3143 }
3144
3145 return false;
3146}
3147
3148/*
3149 * adjust_group_pathkeys_for_groupagg
3150 * Add pathkeys to root->group_pathkeys to reflect the best set of
3151 * pre-ordered input for ordered aggregates.
3152 *
3153 * We define "best" as the pathkeys that suit the largest number of
3154 * aggregate functions. We find these by looking at the first ORDER BY /
3155 * DISTINCT aggregate and take the pathkeys for that before searching for
3156 * other aggregates that require the same or a more strict variation of the
3157 * same pathkeys. We then repeat that process for any remaining aggregates
3158 * with different pathkeys and if we find another set of pathkeys that suits a
3159 * larger number of aggregates then we select those pathkeys instead.
3160 *
3161 * When the best pathkeys are found we also mark each Aggref that can use
3162 * those pathkeys as aggpresorted = true.
3163 *
3164 * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3165 * volatile functions, we never make use of these pathkeys. We want to ensure
3166 * that sorts using volatile functions are done independently in each Aggref
3167 * rather than once at the query level. If we were to allow this then Aggrefs
3168 * with compatible sort orders would all transition their rows in the same
3169 * order if those pathkeys were deemed to be the best pathkeys to sort on.
3170 * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3171 * better pathkeys to sort on, then the volatile function Aggrefs would be
3172 * left to perform their sorts individually. To avoid this inconsistent
3173 * behavior which could make Aggref results depend on what other Aggrefs the
3174 * query contains, we always force Aggrefs with volatile functions to perform
3175 * their own sorts.
3176 */
3177static void
3179{
3180 List *grouppathkeys = root->group_pathkeys;
3181 List *bestpathkeys;
3182 Bitmapset *bestaggs;
3183 Bitmapset *unprocessed_aggs;
3184 ListCell *lc;
3185 int i;
3186
3187 /* Shouldn't be here if there are grouping sets */
3188 Assert(root->parse->groupingSets == NIL);
3189 /* Shouldn't be here unless there are some ordered aggregates */
3190 Assert(root->numOrderedAggs > 0);
3191
3192 /* Do nothing if disabled */
3194 return;
3195
3196 /*
3197 * Make a first pass over all AggInfos to collect a Bitmapset containing
3198 * the indexes of all AggInfos to be processed below.
3199 */
3200 unprocessed_aggs = NULL;
3201 foreach(lc, root->agginfos)
3202 {
3203 AggInfo *agginfo = lfirst_node(AggInfo, lc);
3204 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3205
3206 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3207 continue;
3208
3209 /* Skip unless there's a DISTINCT or ORDER BY clause */
3210 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3211 continue;
3212
3213 /* Additional safety checks are needed if there's a FILTER clause */
3214 if (aggref->aggfilter != NULL)
3215 {
3216 ListCell *lc2;
3217 bool allow_presort = true;
3218
3219 /*
3220 * When the Aggref has a FILTER clause, it's possible that the
3221 * filter removes rows that cannot be sorted because the
3222 * expression to sort by results in an error during its
3223 * evaluation. This is a problem for presorting as that happens
3224 * before the FILTER, whereas without presorting, the Aggregate
3225 * node will apply the FILTER *before* sorting. So that we never
3226 * try to sort anything that might error, here we aim to skip over
3227 * any Aggrefs with arguments with expressions which, when
3228 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3229 * may be more cases that should be allowed, but more thought
3230 * needs to be given. Err on the side of caution.
3231 */
3232 foreach(lc2, aggref->args)
3233 {
3234 TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3235 Expr *expr = tle->expr;
3236
3237 while (IsA(expr, RelabelType))
3238 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3239
3240 /* Common case, Vars and Consts are ok */
3241 if (IsA(expr, Var) || IsA(expr, Const))
3242 continue;
3243
3244 /* Unsupported. Don't try to presort for this Aggref */
3245 allow_presort = false;
3246 break;
3247 }
3248
3249 /* Skip unsupported Aggrefs */
3250 if (!allow_presort)
3251 continue;
3252 }
3253
3254 unprocessed_aggs = bms_add_member(unprocessed_aggs,
3256 }
3257
3258 /*
3259 * Now process all the unprocessed_aggs to find the best set of pathkeys
3260 * for the given set of aggregates.
3261 *
3262 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3263 * this during the first loop using the pathkeys for the very first
3264 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3265 * a more strict set of compatible pathkeys. Once the outer loop is
3266 * complete, we mark off all the aggregates with compatible pathkeys then
3267 * remove those from the unprocessed_aggs and repeat the process to try to
3268 * find another set of pathkeys that are suitable for a larger number of
3269 * aggregates. The outer loop will stop when there are not enough
3270 * unprocessed aggregates for it to be possible to find a set of pathkeys
3271 * to suit a larger number of aggregates.
3272 */
3273 bestpathkeys = NIL;
3274 bestaggs = NULL;
3275 while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3276 {
3277 Bitmapset *aggindexes = NULL;
3278 List *currpathkeys = NIL;
3279
3280 i = -1;
3281 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3282 {
3283 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3284 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3285 List *sortlist;
3286 List *pathkeys;
3287
3288 if (aggref->aggdistinct != NIL)
3289 sortlist = aggref->aggdistinct;
3290 else
3291 sortlist = aggref->aggorder;
3292
3293 pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3294 aggref->args);
3295
3296 /*
3297 * Ignore Aggrefs which have volatile functions in their ORDER BY
3298 * or DISTINCT clause.
3299 */
3300 if (has_volatile_pathkey(pathkeys))
3301 {
3302 unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3303 continue;
3304 }
3305
3306 /*
3307 * When not set yet, take the pathkeys from the first unprocessed
3308 * aggregate.
3309 */
3310 if (currpathkeys == NIL)
3311 {
3312 currpathkeys = pathkeys;
3313
3314 /* include the GROUP BY pathkeys, if they exist */
3315 if (grouppathkeys != NIL)
3316 currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3317 currpathkeys);
3318
3319 /* record that we found pathkeys for this aggregate */
3320 aggindexes = bms_add_member(aggindexes, i);
3321 }
3322 else
3323 {
3324 /* now look for a stronger set of matching pathkeys */
3325
3326 /* include the GROUP BY pathkeys, if they exist */
3327 if (grouppathkeys != NIL)
3328 pathkeys = append_pathkeys(list_copy(grouppathkeys),
3329 pathkeys);
3330
3331 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3332 switch (compare_pathkeys(currpathkeys, pathkeys))
3333 {
3334 case PATHKEYS_BETTER2:
3335 /* 'pathkeys' are stronger, use these ones instead */
3336 currpathkeys = pathkeys;
3337 /* FALLTHROUGH */
3338
3339 case PATHKEYS_BETTER1:
3340 /* 'pathkeys' are less strict */
3341 /* FALLTHROUGH */
3342
3343 case PATHKEYS_EQUAL:
3344 /* mark this aggregate as covered by 'currpathkeys' */
3345 aggindexes = bms_add_member(aggindexes, i);
3346 break;
3347
3348 case PATHKEYS_DIFFERENT:
3349 break;
3350 }
3351 }
3352 }
3353
3354 /* remove the aggregates that we've just processed */
3355 unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3356
3357 /*
3358 * If this pass included more aggregates than the previous best then
3359 * use these ones as the best set.
3360 */
3361 if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3362 {
3363 bestaggs = aggindexes;
3364 bestpathkeys = currpathkeys;
3365 }
3366 }
3367
3368 /*
3369 * If we found any ordered aggregates, update root->group_pathkeys to add
3370 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3371 * the original GROUP BY pathkeys already.
3372 */
3373 if (bestpathkeys != NIL)
3374 root->group_pathkeys = bestpathkeys;
3375
3376 /*
3377 * Now that we've found the best set of aggregates we can set the
3378 * presorted flag to indicate to the executor that it needn't bother
3379 * performing a sort for these Aggrefs. We're able to do this now as
3380 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3381 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3382 * of ordered aggregates.
3383 */
3384 i = -1;
3385 while ((i = bms_next_member(bestaggs, i)) >= 0)
3386 {
3387 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3388
3389 foreach(lc, agginfo->aggrefs)
3390 {
3391 Aggref *aggref = lfirst_node(Aggref, lc);
3392
3393 aggref->aggpresorted = true;
3394 }
3395 }
3396}
3397
3398/*
3399 * Compute query_pathkeys and other pathkeys during plan generation
3400 */
3401static void
3403{
3404 Query *parse = root->parse;
3405 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3406 List *tlist = root->processed_tlist;
3407 List *activeWindows = qp_extra->activeWindows;
3408
3409 /*
3410 * Calculate pathkeys that represent grouping/ordering and/or ordered
3411 * aggregate requirements.
3412 */
3413 if (qp_extra->gset_data)
3414 {
3415 /*
3416 * With grouping sets, just use the first RollupData's groupClause. We
3417 * don't make any effort to optimize grouping clauses when there are
3418 * grouping sets, nor can we combine aggregate ordering keys with
3419 * grouping.
3420 */
3421 List *rollups = qp_extra->gset_data->rollups;
3422 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3423
3424 if (grouping_is_sortable(groupClause))
3425 {
3426 bool sortable;
3427
3428 /*
3429 * The groupClause is logically below the grouping step. So if
3430 * there is an RTE entry for the grouping step, we need to remove
3431 * its RT index from the sort expressions before we make PathKeys
3432 * for them.
3433 */
3434 root->group_pathkeys =
3436 &groupClause,
3437 tlist,
3438 false,
3439 parse->hasGroupRTE,
3440 &sortable,
3441 false);
3442 Assert(sortable);
3443 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3444 }
3445 else
3446 {
3447 root->group_pathkeys = NIL;
3448 root->num_groupby_pathkeys = 0;
3449 }
3450 }
3451 else if (parse->groupClause || root->numOrderedAggs > 0)
3452 {
3453 /*
3454 * With a plain GROUP BY list, we can remove any grouping items that
3455 * are proven redundant by EquivalenceClass processing. For example,
3456 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3457 * especially common cases, but they're nearly free to detect. Note
3458 * that we remove redundant items from processed_groupClause but not
3459 * the original parse->groupClause.
3460 */
3461 bool sortable;
3462
3463 /*
3464 * Convert group clauses into pathkeys. Set the ec_sortref field of
3465 * EquivalenceClass'es if it's not set yet.
3466 */
3467 root->group_pathkeys =
3469 &root->processed_groupClause,
3470 tlist,
3471 true,
3472 false,
3473 &sortable,
3474 true);
3475 if (!sortable)
3476 {
3477 /* Can't sort; no point in considering aggregate ordering either */
3478 root->group_pathkeys = NIL;
3479 root->num_groupby_pathkeys = 0;
3480 }
3481 else
3482 {
3483 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3484 /* If we have ordered aggs, consider adding onto group_pathkeys */
3485 if (root->numOrderedAggs > 0)
3487 }
3488 }
3489 else
3490 {
3491 root->group_pathkeys = NIL;
3492 root->num_groupby_pathkeys = 0;
3493 }
3494
3495 /* We consider only the first (bottom) window in pathkeys logic */
3496 if (activeWindows != NIL)
3497 {
3498 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3499
3500 root->window_pathkeys = make_pathkeys_for_window(root,
3501 wc,
3502 tlist);
3503 }
3504 else
3505 root->window_pathkeys = NIL;
3506
3507 /*
3508 * As with GROUP BY, we can discard any DISTINCT items that are proven
3509 * redundant by EquivalenceClass processing. The non-redundant list is
3510 * kept in root->processed_distinctClause, leaving the original
3511 * parse->distinctClause alone.
3512 */
3513 if (parse->distinctClause)
3514 {
3515 bool sortable;
3516
3517 /* Make a copy since pathkey processing can modify the list */
3518 root->processed_distinctClause = list_copy(parse->distinctClause);
3519 root->distinct_pathkeys =
3521 &root->processed_distinctClause,
3522 tlist,
3523 true,
3524 false,
3525 &sortable,
3526 false);
3527 if (!sortable)
3528 root->distinct_pathkeys = NIL;
3529 }
3530 else
3531 root->distinct_pathkeys = NIL;
3532
3533 root->sort_pathkeys =
3535 parse->sortClause,
3536 tlist);
3537
3538 /* setting setop_pathkeys might be useful to the union planner */
3539 if (qp_extra->setop != NULL)
3540 {
3541 List *groupClauses;
3542 bool sortable;
3543
3544 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3545
3546 root->setop_pathkeys =
3548 &groupClauses,
3549 tlist,
3550 false,
3551 false,
3552 &sortable,
3553 false);
3554 if (!sortable)
3555 root->setop_pathkeys = NIL;
3556 }
3557 else
3558 root->setop_pathkeys = NIL;
3559
3560 /*
3561 * Figure out whether we want a sorted result from query_planner.
3562 *
3563 * If we have a sortable GROUP BY clause, then we want a result sorted
3564 * properly for grouping. Otherwise, if we have window functions to
3565 * evaluate, we try to sort for the first window. Otherwise, if there's a
3566 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3567 * we try to produce output that's sufficiently well sorted for the
3568 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3569 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3570 * for a set operation which can benefit from presorted results and have a
3571 * sortable targetlist, we want to sort by the target list.
3572 *
3573 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3574 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3575 * that might just leave us failing to exploit an available sort order at
3576 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3577 * much easier, since we know that the parser ensured that one is a
3578 * superset of the other.
3579 */
3580 if (root->group_pathkeys)
3581 root->query_pathkeys = root->group_pathkeys;
3582 else if (root->window_pathkeys)
3583 root->query_pathkeys = root->window_pathkeys;
3584 else if (list_length(root->distinct_pathkeys) >
3585 list_length(root->sort_pathkeys))
3586 root->query_pathkeys = root->distinct_pathkeys;
3587 else if (root->sort_pathkeys)
3588 root->query_pathkeys = root->sort_pathkeys;
3589 else if (root->setop_pathkeys != NIL)
3590 root->query_pathkeys = root->setop_pathkeys;
3591 else
3592 root->query_pathkeys = NIL;
3593}
3594
3595/*
3596 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3597 *
3598 * path_rows: number of output rows from scan/join step
3599 * gd: grouping sets data including list of grouping sets and their clauses
3600 * target_list: target list containing group clause references
3601 *
3602 * If doing grouping sets, we also annotate the gsets data with the estimates
3603 * for each set and each individual rollup list, with a view to later
3604 * determining whether some combination of them could be hashed instead.
3605 */
3606static double
3608 double path_rows,
3610 List *target_list)
3611{
3612 Query *parse = root->parse;
3613 double dNumGroups;
3614
3615 if (parse->groupClause)
3616 {
3617 List *groupExprs;
3618
3619 if (parse->groupingSets)
3620 {
3621 /* Add up the estimates for each grouping set */
3622 ListCell *lc;
3623
3624 Assert(gd); /* keep Coverity happy */
3625
3626 dNumGroups = 0;
3627
3628 foreach(lc, gd->rollups)
3629 {
3630 RollupData *rollup = lfirst_node(RollupData, lc);
3631 ListCell *lc2;
3632 ListCell *lc3;
3633
3634 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3635 target_list);
3636
3637 rollup->numGroups = 0.0;
3638
3639 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3640 {
3641 List *gset = (List *) lfirst(lc2);
3643 double numGroups = estimate_num_groups(root,
3644 groupExprs,
3645 path_rows,
3646 &gset,
3647 NULL);
3648
3649 gs->numGroups = numGroups;
3650 rollup->numGroups += numGroups;
3651 }
3652
3653 dNumGroups += rollup->numGroups;
3654 }
3655
3656 if (gd->hash_sets_idx)
3657 {
3658 ListCell *lc2;
3659
3660 gd->dNumHashGroups = 0;
3661
3662 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3663 target_list);
3664
3665 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3666 {
3667 List *gset = (List *) lfirst(lc);
3669 double numGroups = estimate_num_groups(root,
3670 groupExprs,
3671 path_rows,
3672 &gset,
3673 NULL);
3674
3675 gs->numGroups = numGroups;
3676 gd->dNumHashGroups += numGroups;
3677 }
3678
3679 dNumGroups += gd->dNumHashGroups;
3680 }
3681 }
3682 else
3683 {
3684 /* Plain GROUP BY -- estimate based on optimized groupClause */
3685 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3686 target_list);
3687
3688 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3689 NULL, NULL);
3690 }
3691 }
3692 else if (parse->groupingSets)
3693 {
3694 /* Empty grouping sets ... one result row for each one */
3695 dNumGroups = list_length(parse->groupingSets);
3696 }
3697 else if (parse->hasAggs || root->hasHavingQual)
3698 {
3699 /* Plain aggregation, one result row */
3700 dNumGroups = 1;
3701 }
3702 else
3703 {
3704 /* Not grouping */
3705 dNumGroups = 1;
3706 }
3707
3708 return dNumGroups;
3709}
3710
3711/*
3712 * create_grouping_paths
3713 *
3714 * Build a new upperrel containing Paths for grouping and/or aggregation.
3715 * Along the way, we also build an upperrel for Paths which are partially
3716 * grouped and/or aggregated. A partially grouped and/or aggregated path
3717 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3718 * the only partially grouped paths we build are also partial paths; that
3719 * is, they need a Gather and then a FinalizeAggregate.
3720 *
3721 * input_rel: contains the source-data Paths
3722 * target: the pathtarget for the result Paths to compute
3723 * gd: grouping sets data including list of grouping sets and their clauses
3724 *
3725 * Note: all Paths in input_rel are expected to return the target computed
3726 * by make_group_input_target.
3727 */
3728static RelOptInfo *
3730 RelOptInfo *input_rel,
3731 PathTarget *target,
3732 bool target_parallel_safe,
3734{
3735 Query *parse = root->parse;
3736 RelOptInfo *grouped_rel;
3737 RelOptInfo *partially_grouped_rel;
3738 AggClauseCosts agg_costs;
3739
3740 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3742
3743 /*
3744 * Create grouping relation to hold fully aggregated grouping and/or
3745 * aggregation paths.
3746 */
3747 grouped_rel = make_grouping_rel(root, input_rel, target,
3748 target_parallel_safe, parse->havingQual);
3749
3750 /*
3751 * Create either paths for a degenerate grouping or paths for ordinary
3752 * grouping, as appropriate.
3753 */
3755 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3756 else
3757 {
3758 int flags = 0;
3759 GroupPathExtraData extra;
3760
3761 /*
3762 * Determine whether it's possible to perform sort-based
3763 * implementations of grouping. (Note that if processed_groupClause
3764 * is empty, grouping_is_sortable() is trivially true, and all the
3765 * pathkeys_contained_in() tests will succeed too, so that we'll
3766 * consider every surviving input path.)
3767 *
3768 * If we have grouping sets, we might be able to sort some but not all
3769 * of them; in this case, we need can_sort to be true as long as we
3770 * must consider any sorted-input plan.
3771 */
3772 if ((gd && gd->rollups != NIL)
3773 || grouping_is_sortable(root->processed_groupClause))
3774 flags |= GROUPING_CAN_USE_SORT;
3775
3776 /*
3777 * Determine whether we should consider hash-based implementations of
3778 * grouping.
3779 *
3780 * Hashed aggregation only applies if we're grouping. If we have
3781 * grouping sets, some groups might be hashable but others not; in
3782 * this case we set can_hash true as long as there is nothing globally
3783 * preventing us from hashing (and we should therefore consider plans
3784 * with hashes).
3785 *
3786 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3787 * BY aggregates. (Doing so would imply storing *all* the input
3788 * values in the hash table, and/or running many sorts in parallel,
3789 * either of which seems like a certain loser.) We similarly don't
3790 * support ordered-set aggregates in hashed aggregation, but that case
3791 * is also included in the numOrderedAggs count.
3792 *
3793 * Note: grouping_is_hashable() is much more expensive to check than
3794 * the other gating conditions, so we want to do it last.
3795 */
3796 if ((parse->groupClause != NIL &&
3797 root->numOrderedAggs == 0 &&
3798 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3799 flags |= GROUPING_CAN_USE_HASH;
3800
3801 /*
3802 * Determine whether partial aggregation is possible.
3803 */
3804 if (can_partial_agg(root))
3805 flags |= GROUPING_CAN_PARTIAL_AGG;
3806
3807 extra.flags = flags;
3808 extra.target_parallel_safe = target_parallel_safe;
3809 extra.havingQual = parse->havingQual;
3810 extra.targetList = parse->targetList;
3811 extra.partial_costs_set = false;
3812
3813 /*
3814 * Determine whether partitionwise aggregation is in theory possible.
3815 * It can be disabled by the user, and for now, we don't try to
3816 * support grouping sets. create_ordinary_grouping_paths() will check
3817 * additional conditions, such as whether input_rel is partitioned.
3818 */
3819 if (enable_partitionwise_aggregate && !parse->groupingSets)
3821 else
3823
3824 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3825 &agg_costs, gd, &extra,
3826 &partially_grouped_rel);
3827 }
3828
3829 set_cheapest(grouped_rel);
3830 return grouped_rel;
3831}
3832
3833/*
3834 * make_grouping_rel
3835 *
3836 * Create a new grouping rel and set basic properties.
3837 *
3838 * input_rel represents the underlying scan/join relation.
3839 * target is the output expected from the grouping relation.
3840 */
3841static RelOptInfo *
3843 PathTarget *target, bool target_parallel_safe,
3844 Node *havingQual)
3845{
3846 RelOptInfo *grouped_rel;
3847
3848 if (IS_OTHER_REL(input_rel))
3849 {
3851 input_rel->relids);
3852 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3853 }
3854 else
3855 {
3856 /*
3857 * By tradition, the relids set for the main grouping relation is
3858 * NULL. (This could be changed, but might require adjustments
3859 * elsewhere.)
3860 */
3861 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3862 }
3863
3864 /* Set target. */
3865 grouped_rel->reltarget = target;
3866
3867 /*
3868 * If the input relation is not parallel-safe, then the grouped relation
3869 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3870 * target list and HAVING quals are parallel-safe.
3871 */
3872 if (input_rel->consider_parallel && target_parallel_safe &&
3873 is_parallel_safe(root, (Node *) havingQual))
3874 grouped_rel->consider_parallel = true;
3875
3876 /*
3877 * If the input rel belongs to a single FDW, so does the grouped rel.
3878 */
3879 grouped_rel->serverid = input_rel->serverid;
3880 grouped_rel->userid = input_rel->userid;
3881 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3882 grouped_rel->fdwroutine = input_rel->fdwroutine;
3883
3884 return grouped_rel;
3885}
3886
3887/*
3888 * is_degenerate_grouping
3889 *
3890 * A degenerate grouping is one in which the query has a HAVING qual and/or
3891 * grouping sets, but no aggregates and no GROUP BY (which implies that the
3892 * grouping sets are all empty).
3893 */
3894static bool
3896{
3897 Query *parse = root->parse;
3898
3899 return (root->hasHavingQual || parse->groupingSets) &&
3900 !parse->hasAggs && parse->groupClause == NIL;
3901}
3902
3903/*
3904 * create_degenerate_grouping_paths
3905 *
3906 * When the grouping is degenerate (see is_degenerate_grouping), we are
3907 * supposed to emit either zero or one row for each grouping set depending on
3908 * whether HAVING succeeds. Furthermore, there cannot be any variables in
3909 * either HAVING or the targetlist, so we actually do not need the FROM table
3910 * at all! We can just throw away the plan-so-far and generate a Result node.
3911 * This is a sufficiently unusual corner case that it's not worth contorting
3912 * the structure of this module to avoid having to generate the earlier paths
3913 * in the first place.
3914 */
3915static void
3917 RelOptInfo *grouped_rel)
3918{
3919 Query *parse = root->parse;
3920 int nrows;
3921 Path *path;
3922
3923 nrows = list_length(parse->groupingSets);
3924 if (nrows > 1)
3925 {
3926 /*
3927 * Doesn't seem worthwhile writing code to cons up a generate_series
3928 * or a values scan to emit multiple rows. Instead just make N clones
3929 * and append them. (With a volatile HAVING clause, this means you
3930 * might get between 0 and N output rows. Offhand I think that's
3931 * desired.)
3932 */
3933 List *paths = NIL;
3934
3935 while (--nrows >= 0)
3936 {
3937 path = (Path *)
3938 create_group_result_path(root, grouped_rel,
3939 grouped_rel->reltarget,
3940 (List *) parse->havingQual);
3941 paths = lappend(paths, path);
3942 }
3943 path = (Path *)
3945 grouped_rel,
3946 paths,
3947 NIL,
3948 NIL,
3949 NULL,
3950 0,
3951 false,
3952 -1);
3953 }
3954 else
3955 {
3956 /* No grouping sets, or just one, so one output row */
3957 path = (Path *)
3958 create_group_result_path(root, grouped_rel,
3959 grouped_rel->reltarget,
3960 (List *) parse->havingQual);
3961 }
3962
3963 add_path(grouped_rel, path);
3964}
3965
3966/*
3967 * create_ordinary_grouping_paths
3968 *
3969 * Create grouping paths for the ordinary (that is, non-degenerate) case.
3970 *
3971 * We need to consider sorted and hashed aggregation in the same function,
3972 * because otherwise (1) it would be harder to throw an appropriate error
3973 * message if neither way works, and (2) we should not allow hashtable size
3974 * considerations to dissuade us from using hashing if sorting is not possible.
3975 *
3976 * *partially_grouped_rel_p will be set to the partially grouped rel which this
3977 * function creates, or to NULL if it doesn't create one.
3978 */
3979static void
3981 RelOptInfo *grouped_rel,
3982 const AggClauseCosts *agg_costs,
3984 GroupPathExtraData *extra,
3985 RelOptInfo **partially_grouped_rel_p)
3986{
3987 Path *cheapest_path = input_rel->cheapest_total_path;
3988 RelOptInfo *partially_grouped_rel = NULL;
3989 double dNumGroups;
3991
3992 /*
3993 * If this is the topmost grouping relation or if the parent relation is
3994 * doing some form of partitionwise aggregation, then we may be able to do
3995 * it at this level also. However, if the input relation is not
3996 * partitioned, partitionwise aggregate is impossible.
3997 */
3998 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3999 IS_PARTITIONED_REL(input_rel))
4000 {
4001 /*
4002 * If this is the topmost relation or if the parent relation is doing
4003 * full partitionwise aggregation, then we can do full partitionwise
4004 * aggregation provided that the GROUP BY clause contains all of the
4005 * partitioning columns at this level and the collation used by GROUP
4006 * BY matches the partitioning collation. Otherwise, we can do at
4007 * most partial partitionwise aggregation. But if partial aggregation
4008 * is not supported in general then we can't use it for partitionwise
4009 * aggregation either.
4010 *
4011 * Check parse->groupClause not processed_groupClause, because it's
4012 * okay if some of the partitioning columns were proved redundant.
4013 */
4014 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4015 group_by_has_partkey(input_rel, extra->targetList,
4016 root->parse->groupClause))
4018 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4020 else
4022 }
4023
4024 /*
4025 * Before generating paths for grouped_rel, we first generate any possible
4026 * partially grouped paths; that way, later code can easily consider both
4027 * parallel and non-parallel approaches to grouping.
4028 */
4029 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4030 {
4031 bool force_rel_creation;
4032
4033 /*
4034 * If we're doing partitionwise aggregation at this level, force
4035 * creation of a partially_grouped_rel so we can add partitionwise
4036 * paths to it.
4037 */
4038 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4039
4040 partially_grouped_rel =
4042 grouped_rel,
4043 input_rel,
4044 gd,
4045 extra,
4046 force_rel_creation);
4047 }
4048
4049 /* Set out parameter. */
4050 *partially_grouped_rel_p = partially_grouped_rel;
4051
4052 /* Apply partitionwise aggregation technique, if possible. */
4053 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4054 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4055 partially_grouped_rel, agg_costs,
4056 gd, patype, extra);
4057
4058 /* If we are doing partial aggregation only, return. */
4060 {
4061 Assert(partially_grouped_rel);
4062
4063 if (partially_grouped_rel->pathlist)
4064 set_cheapest(partially_grouped_rel);
4065
4066 return;
4067 }
4068
4069 /* Gather any partially grouped partial paths. */
4070 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4071 {
4072 gather_grouping_paths(root, partially_grouped_rel);
4073 set_cheapest(partially_grouped_rel);
4074 }
4075
4076 /*
4077 * Estimate number of groups.
4078 */
4079 dNumGroups = get_number_of_groups(root,
4080 cheapest_path->rows,
4081 gd,
4082 extra->targetList);
4083
4084 /* Build final grouping paths */
4085 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4086 partially_grouped_rel, agg_costs, gd,
4087 dNumGroups, extra);
4088
4089 /* Give a helpful error if we failed to find any implementation */
4090 if (grouped_rel->pathlist == NIL)
4091 ereport(ERROR,
4092 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4093 errmsg("could not implement GROUP BY"),
4094 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4095
4096 /*
4097 * If there is an FDW that's responsible for all baserels of the query,
4098 * let it consider adding ForeignPaths.
4099 */
4100 if (grouped_rel->fdwroutine &&
4101 grouped_rel->fdwroutine->GetForeignUpperPaths)
4102 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4103 input_rel, grouped_rel,
4104 extra);
4105
4106 /* Let extensions possibly add some more paths */
4108 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4109 input_rel, grouped_rel,
4110 extra);
4111}
4112
4113/*
4114 * For a given input path, consider the possible ways of doing grouping sets on
4115 * it, by combinations of hashing and sorting. This can be called multiple
4116 * times, so it's important that it not scribble on input. No result is
4117 * returned, but any generated paths are added to grouped_rel.
4118 */
4119static void
4121 RelOptInfo *grouped_rel,
4122 Path *path,
4123 bool is_sorted,
4124 bool can_hash,
4126 const AggClauseCosts *agg_costs,
4127 double dNumGroups)
4128{
4129 Query *parse = root->parse;
4130 Size hash_mem_limit = get_hash_memory_limit();
4131
4132 /*
4133 * If we're not being offered sorted input, then only consider plans that
4134 * can be done entirely by hashing.
4135 *
4136 * We can hash everything if it looks like it'll fit in hash_mem. But if
4137 * the input is actually sorted despite not being advertised as such, we
4138 * prefer to make use of that in order to use less memory.
4139 *
4140 * If none of the grouping sets are sortable, then ignore the hash_mem
4141 * limit and generate a path anyway, since otherwise we'll just fail.
4142 */
4143 if (!is_sorted)
4144 {
4145 List *new_rollups = NIL;
4146 RollupData *unhashed_rollup = NULL;
4147 List *sets_data;
4148 List *empty_sets_data = NIL;
4149 List *empty_sets = NIL;
4150 ListCell *lc;
4151 ListCell *l_start = list_head(gd->rollups);
4152 AggStrategy strat = AGG_HASHED;
4153 double hashsize;
4154 double exclude_groups = 0.0;
4155
4156 Assert(can_hash);
4157
4158 /*
4159 * If the input is coincidentally sorted usefully (which can happen
4160 * even if is_sorted is false, since that only means that our caller
4161 * has set up the sorting for us), then save some hashtable space by
4162 * making use of that. But we need to watch out for degenerate cases:
4163 *
4164 * 1) If there are any empty grouping sets, then group_pathkeys might
4165 * be NIL if all non-empty grouping sets are unsortable. In this case,
4166 * there will be a rollup containing only empty groups, and the
4167 * pathkeys_contained_in test is vacuously true; this is ok.
4168 *
4169 * XXX: the above relies on the fact that group_pathkeys is generated
4170 * from the first rollup. If we add the ability to consider multiple
4171 * sort orders for grouping input, this assumption might fail.
4172 *
4173 * 2) If there are no empty sets and only unsortable sets, then the
4174 * rollups list will be empty (and thus l_start == NULL), and
4175 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4176 * pathkeys_contained_in test doesn't cause us to crash.
4177 */
4178 if (l_start != NULL &&
4179 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4180 {
4181 unhashed_rollup = lfirst_node(RollupData, l_start);
4182 exclude_groups = unhashed_rollup->numGroups;
4183 l_start = lnext(gd->rollups, l_start);
4184 }
4185
4187 path,
4188 agg_costs,
4189 dNumGroups - exclude_groups);
4190
4191 /*
4192 * gd->rollups is empty if we have only unsortable columns to work
4193 * with. Override hash_mem in that case; otherwise, we'll rely on the
4194 * sorted-input case to generate usable mixed paths.
4195 */
4196 if (hashsize > hash_mem_limit && gd->rollups)
4197 return; /* nope, won't fit */
4198
4199 /*
4200 * We need to burst the existing rollups list into individual grouping
4201 * sets and recompute a groupClause for each set.
4202 */
4203 sets_data = list_copy(gd->unsortable_sets);
4204
4205 for_each_cell(lc, gd->rollups, l_start)
4206 {
4207 RollupData *rollup = lfirst_node(RollupData, lc);
4208
4209 /*
4210 * If we find an unhashable rollup that's not been skipped by the
4211 * "actually sorted" check above, we can't cope; we'd need sorted
4212 * input (with a different sort order) but we can't get that here.
4213 * So bail out; we'll get a valid path from the is_sorted case
4214 * instead.
4215 *
4216 * The mere presence of empty grouping sets doesn't make a rollup
4217 * unhashable (see preprocess_grouping_sets), we handle those
4218 * specially below.
4219 */
4220 if (!rollup->hashable)
4221 return;
4222
4223 sets_data = list_concat(sets_data, rollup->gsets_data);
4224 }
4225 foreach(lc, sets_data)
4226 {
4228 List *gset = gs->set;
4229 RollupData *rollup;
4230
4231 if (gset == NIL)
4232 {
4233 /* Empty grouping sets can't be hashed. */
4234 empty_sets_data = lappend(empty_sets_data, gs);
4235 empty_sets = lappend(empty_sets, NIL);
4236 }
4237 else
4238 {
4239 rollup = makeNode(RollupData);
4240
4241 rollup->groupClause = preprocess_groupclause(root, gset);
4242 rollup->gsets_data = list_make1(gs);
4243 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4244 rollup->gsets_data,
4246 rollup->numGroups = gs->numGroups;
4247 rollup->hashable = true;
4248 rollup->is_hashed = true;
4249 new_rollups = lappend(new_rollups, rollup);
4250 }
4251 }
4252
4253 /*
4254 * If we didn't find anything nonempty to hash, then bail. We'll
4255 * generate a path from the is_sorted case.
4256 */
4257 if (new_rollups == NIL)
4258 return;
4259
4260 /*
4261 * If there were empty grouping sets they should have been in the
4262 * first rollup.
4263 */
4264 Assert(!unhashed_rollup || !empty_sets);
4265
4266 if (unhashed_rollup)
4267 {
4268 new_rollups = lappend(new_rollups, unhashed_rollup);
4269 strat = AGG_MIXED;
4270 }
4271 else if (empty_sets)
4272 {
4273 RollupData *rollup = makeNode(RollupData);
4274
4275 rollup->groupClause = NIL;
4276 rollup->gsets_data = empty_sets_data;
4277 rollup->gsets = empty_sets;
4278 rollup->numGroups = list_length(empty_sets);
4279 rollup->hashable = false;
4280 rollup->is_hashed = false;
4281 new_rollups = lappend(new_rollups, rollup);
4282 strat = AGG_MIXED;
4283 }
4284
4285 add_path(grouped_rel, (Path *)
4287 grouped_rel,
4288 path,
4289 (List *) parse->havingQual,
4290 strat,
4291 new_rollups,
4292 agg_costs));
4293 return;
4294 }
4295
4296 /*
4297 * If we have sorted input but nothing we can do with it, bail.
4298 */
4299 if (gd->rollups == NIL)
4300 return;
4301
4302 /*
4303 * Given sorted input, we try and make two paths: one sorted and one mixed
4304 * sort/hash. (We need to try both because hashagg might be disabled, or
4305 * some columns might not be sortable.)
4306 *
4307 * can_hash is passed in as false if some obstacle elsewhere (such as
4308 * ordered aggs) means that we shouldn't consider hashing at all.
4309 */
4310 if (can_hash && gd->any_hashable)
4311 {
4312 List *rollups = NIL;
4313 List *hash_sets = list_copy(gd->unsortable_sets);
4314 double availspace = hash_mem_limit;
4315 ListCell *lc;
4316
4317 /*
4318 * Account first for space needed for groups we can't sort at all.
4319 */
4320 availspace -= estimate_hashagg_tablesize(root,
4321 path,
4322 agg_costs,
4323 gd->dNumHashGroups);
4324
4325 if (availspace > 0 && list_length(gd->rollups) > 1)
4326 {
4327 double scale;
4328 int num_rollups = list_length(gd->rollups);
4329 int k_capacity;
4330 int *k_weights = palloc(num_rollups * sizeof(int));
4331 Bitmapset *hash_items = NULL;
4332 int i;
4333
4334 /*
4335 * We treat this as a knapsack problem: the knapsack capacity
4336 * represents hash_mem, the item weights are the estimated memory
4337 * usage of the hashtables needed to implement a single rollup,
4338 * and we really ought to use the cost saving as the item value;
4339 * however, currently the costs assigned to sort nodes don't
4340 * reflect the comparison costs well, and so we treat all items as
4341 * of equal value (each rollup we hash instead saves us one sort).
4342 *
4343 * To use the discrete knapsack, we need to scale the values to a
4344 * reasonably small bounded range. We choose to allow a 5% error
4345 * margin; we have no more than 4096 rollups in the worst possible
4346 * case, which with a 5% error margin will require a bit over 42MB
4347 * of workspace. (Anyone wanting to plan queries that complex had
4348 * better have the memory for it. In more reasonable cases, with
4349 * no more than a couple of dozen rollups, the memory usage will
4350 * be negligible.)
4351 *
4352 * k_capacity is naturally bounded, but we clamp the values for
4353 * scale and weight (below) to avoid overflows or underflows (or
4354 * uselessly trying to use a scale factor less than 1 byte).
4355 */
4356 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4357 k_capacity = (int) floor(availspace / scale);
4358
4359 /*
4360 * We leave the first rollup out of consideration since it's the
4361 * one that matches the input sort order. We assign indexes "i"
4362 * to only those entries considered for hashing; the second loop,
4363 * below, must use the same condition.
4364 */
4365 i = 0;
4366 for_each_from(lc, gd->rollups, 1)
4367 {
4368 RollupData *rollup = lfirst_node(RollupData, lc);
4369
4370 if (rollup->hashable)
4371 {
4372 double sz = estimate_hashagg_tablesize(root,
4373 path,
4374 agg_costs,
4375 rollup->numGroups);
4376
4377 /*
4378 * If sz is enormous, but hash_mem (and hence scale) is
4379 * small, avoid integer overflow here.
4380 */
4381 k_weights[i] = (int) Min(floor(sz / scale),
4382 k_capacity + 1.0);
4383 ++i;
4384 }
4385 }
4386
4387 /*
4388 * Apply knapsack algorithm; compute the set of items which
4389 * maximizes the value stored (in this case the number of sorts
4390 * saved) while keeping the total size (approximately) within
4391 * capacity.
4392 */
4393 if (i > 0)
4394 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4395
4396 if (!bms_is_empty(hash_items))
4397 {
4398 rollups = list_make1(linitial(gd->rollups));
4399
4400 i = 0;
4401 for_each_from(lc, gd->rollups, 1)
4402 {
4403 RollupData *rollup = lfirst_node(RollupData, lc);
4404
4405 if (rollup->hashable)
4406 {
4407 if (bms_is_member(i, hash_items))
4408 hash_sets = list_concat(hash_sets,
4409 rollup->gsets_data);
4410 else
4411 rollups = lappend(rollups, rollup);
4412 ++i;
4413 }
4414 else
4415 rollups = lappend(rollups, rollup);
4416 }
4417 }
4418 }
4419
4420 if (!rollups && hash_sets)
4421 rollups = list_copy(gd->rollups);
4422
4423 foreach(lc, hash_sets)
4424 {
4426 RollupData *rollup = makeNode(RollupData);
4427
4428 Assert(gs->set != NIL);
4429
4431 rollup->gsets_data = list_make1(gs);
4432 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4433 rollup->gsets_data,
4435 rollup->numGroups = gs->numGroups;
4436 rollup->hashable = true;
4437 rollup->is_hashed = true;
4438 rollups = lcons(rollup, rollups);
4439 }
4440
4441 if (rollups)
4442 {
4443 add_path(grouped_rel, (Path *)
4445 grouped_rel,
4446 path,
4447 (List *) parse->havingQual,
4448 AGG_MIXED,
4449 rollups,
4450 agg_costs));
4451 }
4452 }
4453
4454 /*
4455 * Now try the simple sorted case.
4456 */
4457 if (!gd->unsortable_sets)
4458 add_path(grouped_rel, (Path *)
4460 grouped_rel,
4461 path,
4462 (List *) parse->havingQual,
4463 AGG_SORTED,
4464 gd->rollups,
4465 agg_costs));
4466}
4467
4468/*
4469 * create_window_paths
4470 *
4471 * Build a new upperrel containing Paths for window-function evaluation.
4472 *
4473 * input_rel: contains the source-data Paths
4474 * input_target: result of make_window_input_target
4475 * output_target: what the topmost WindowAggPath should return
4476 * wflists: result of find_window_functions
4477 * activeWindows: result of select_active_windows
4478 *
4479 * Note: all Paths in input_rel are expected to return input_target.
4480 */
4481static RelOptInfo *
4483 RelOptInfo *input_rel,
4484 PathTarget *input_target,
4485 PathTarget *output_target,
4486 bool output_target_parallel_safe,
4487 WindowFuncLists *wflists,
4488 List *activeWindows)
4489{
4490 RelOptInfo *window_rel;
4491 ListCell *lc;
4492
4493 /* For now, do all work in the (WINDOW, NULL) upperrel */
4494 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4495
4496 /*
4497 * If the input relation is not parallel-safe, then the window relation
4498 * can't be parallel-safe, either. Otherwise, we need to examine the
4499 * target list and active windows for non-parallel-safe constructs.
4500 */
4501 if (input_rel->consider_parallel && output_target_parallel_safe &&
4502 is_parallel_safe(root, (Node *) activeWindows))
4503 window_rel->consider_parallel = true;
4504
4505 /*
4506 * If the input rel belongs to a single FDW, so does the window rel.
4507 */
4508 window_rel->serverid = input_rel->serverid;
4509 window_rel->userid = input_rel->userid;
4510 window_rel->useridiscurrent = input_rel->useridiscurrent;
4511 window_rel->fdwroutine = input_rel->fdwroutine;
4512
4513 /*
4514 * Consider computing window functions starting from the existing
4515 * cheapest-total path (which will likely require a sort) as well as any
4516 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4517 */
4518 foreach(lc, input_rel->pathlist)
4519 {
4520 Path *path = (Path *) lfirst(lc);
4521 int presorted_keys;
4522
4523 if (path == input_rel->cheapest_total_path ||
4524 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4525 &presorted_keys) ||
4526 presorted_keys > 0)
4528 window_rel,
4529 path,
4530 input_target,
4531 output_target,
4532 wflists,
4533 activeWindows);
4534 }
4535
4536 /*
4537 * If there is an FDW that's responsible for all baserels of the query,
4538 * let it consider adding ForeignPaths.
4539 */
4540 if (window_rel->fdwroutine &&
4541 window_rel->fdwroutine->GetForeignUpperPaths)
4542 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4543 input_rel, window_rel,
4544 NULL);
4545
4546 /* Let extensions possibly add some more paths */
4548 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4549 input_rel, window_rel, NULL);
4550
4551 /* Now choose the best path(s) */
4552 set_cheapest(window_rel);
4553
4554 return window_rel;
4555}
4556
4557/*
4558 * Stack window-function implementation steps atop the given Path, and
4559 * add the result to window_rel.
4560 *
4561 * window_rel: upperrel to contain result
4562 * path: input Path to use (must return input_target)
4563 * input_target: result of make_window_input_target
4564 * output_target: what the topmost WindowAggPath should return
4565 * wflists: result of find_window_functions
4566 * activeWindows: result of select_active_windows
4567 */
4568static void
4570 RelOptInfo *window_rel,
4571 Path *path,
4572 PathTarget *input_target,
4573 PathTarget *output_target,
4574 WindowFuncLists *wflists,
4575 List *activeWindows)
4576{
4577 PathTarget *window_target;
4578 ListCell *l;
4579 List *topqual = NIL;
4580
4581 /*
4582 * Since each window clause could require a different sort order, we stack
4583 * up a WindowAgg node for each clause, with sort steps between them as
4584 * needed. (We assume that select_active_windows chose a good order for
4585 * executing the clauses in.)
4586 *
4587 * input_target should contain all Vars and Aggs needed for the result.
4588 * (In some cases we wouldn't need to propagate all of these all the way
4589 * to the top, since they might only be needed as inputs to WindowFuncs.
4590 * It's probably not worth trying to optimize that though.) It must also
4591 * contain all window partitioning and sorting expressions, to ensure
4592 * they're computed only once at the bottom of the stack (that's critical
4593 * for volatile functions). As we climb up the stack, we'll add outputs
4594 * for the WindowFuncs computed at each level.
4595 */
4596 window_target = input_target;
4597
4598 foreach(l, activeWindows)
4599 {
4601 List *window_pathkeys;
4602 List *runcondition = NIL;
4603 int presorted_keys;
4604 bool is_sorted;
4605 bool topwindow;
4606 ListCell *lc2;
4607
4608 window_pathkeys = make_pathkeys_for_window(root,
4609 wc,
4610 root->processed_tlist);
4611
4612 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4613 path->pathkeys,
4614 &presorted_keys);
4615
4616 /* Sort if necessary */
4617 if (!is_sorted)
4618 {
4619 /*
4620 * No presorted keys or incremental sort disabled, just perform a
4621 * complete sort.
4622 */
4623 if (presorted_keys == 0 || !enable_incremental_sort)
4624 path = (Path *) create_sort_path(root, window_rel,
4625 path,
4626 window_pathkeys,
4627 -1.0);
4628 else
4629 {
4630 /*
4631 * Since we have presorted keys and incremental sort is
4632 * enabled, just use incremental sort.
4633 */
4635 window_rel,
4636 path,
4637 window_pathkeys,
4638 presorted_keys,
4639 -1.0);
4640 }
4641 }
4642
4643 if (lnext(activeWindows, l))
4644 {
4645 /*
4646 * Add the current WindowFuncs to the output target for this
4647 * intermediate WindowAggPath. We must copy window_target to
4648 * avoid changing the previous path's target.
4649 *
4650 * Note: a WindowFunc adds nothing to the target's eval costs; but
4651 * we do need to account for the increase in tlist width.
4652 */
4653 int64 tuple_width = window_target->width;
4654
4655 window_target = copy_pathtarget(window_target);
4656 foreach(lc2, wflists->windowFuncs[wc->winref])
4657 {
4658 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4659
4660 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4661 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4662 }
4663 window_target->width = clamp_width_est(tuple_width);
4664 }
4665 else
4666 {
4667 /* Install the goal target in the topmost WindowAgg */
4668 window_target = output_target;
4669 }
4670
4671 /* mark the final item in the list as the top-level window */
4672 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4673
4674 /*
4675 * Collect the WindowFuncRunConditions from each WindowFunc and
4676 * convert them into OpExprs
4677 */
4678 foreach(lc2, wflists->windowFuncs[wc->winref])
4679 {
4680 ListCell *lc3;
4681 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4682
4683 foreach(lc3, wfunc->runCondition)
4684 {
4685 WindowFuncRunCondition *wfuncrc =
4687 Expr *opexpr;
4688 Expr *leftop;
4689 Expr *rightop;
4690
4691 if (wfuncrc->wfunc_left)
4692 {
4693 leftop = (Expr *) copyObject(wfunc);
4694 rightop = copyObject(wfuncrc->arg);
4695 }
4696 else
4697 {
4698 leftop = copyObject(wfuncrc->arg);
4699 rightop = (Expr *) copyObject(wfunc);
4700 }
4701
4702 opexpr = make_opclause(wfuncrc->opno,
4703 BOOLOID,
4704 false,
4705 leftop,
4706 rightop,
4707 InvalidOid,
4708 wfuncrc->inputcollid);
4709
4710 runcondition = lappend(runcondition, opexpr);
4711
4712 if (!topwindow)
4713 topqual = lappend(topqual, opexpr);
4714 }
4715 }
4716
4717 path = (Path *)
4718 create_windowagg_path(root, window_rel, path, window_target,
4719 wflists->windowFuncs[wc->winref],
4720 runcondition, wc,
4721 topwindow ? topqual : NIL, topwindow);
4722 }
4723
4724 add_path(window_rel, path);
4725}
4726
4727/*
4728 * create_distinct_paths
4729 *
4730 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4731 *
4732 * input_rel: contains the source-data Paths
4733 * target: the pathtarget for the result Paths to compute
4734 *
4735 * Note: input paths should already compute the desired pathtarget, since
4736 * Sort/Unique won't project anything.
4737 */
4738static RelOptInfo *
4740 PathTarget *target)
4741{
4742 RelOptInfo *distinct_rel;
4743
4744 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4745 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4746
4747 /*
4748 * We don't compute anything at this level, so distinct_rel will be
4749 * parallel-safe if the input rel is parallel-safe. In particular, if
4750 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4751 * output those expressions, and will not be parallel-safe unless those
4752 * expressions are parallel-safe.
4753 */
4754 distinct_rel->consider_parallel = input_rel->consider_parallel;
4755
4756 /*
4757 * If the input rel belongs to a single FDW, so does the distinct_rel.
4758 */
4759 distinct_rel->serverid = input_rel->serverid;
4760 distinct_rel->userid = input_rel->userid;
4761 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4762 distinct_rel->fdwroutine = input_rel->fdwroutine;
4763
4764 /* build distinct paths based on input_rel's pathlist */
4765 create_final_distinct_paths(root, input_rel, distinct_rel);
4766
4767 /* now build distinct paths based on input_rel's partial_pathlist */
4768 create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4769
4770 /* Give a helpful error if we failed to create any paths */
4771 if (distinct_rel->pathlist == NIL)
4772 ereport(ERROR,
4773 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4774 errmsg("could not implement DISTINCT"),
4775 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4776
4777 /*
4778 * If there is an FDW that's responsible for all baserels of the query,
4779 * let it consider adding ForeignPaths.
4780 */
4781 if (distinct_rel->fdwroutine &&
4782 distinct_rel->fdwroutine->GetForeignUpperPaths)
4783 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4785 input_rel,
4786 distinct_rel,
4787 NULL);
4788
4789 /* Let extensions possibly add some more paths */
4791 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4792 distinct_rel, NULL);
4793
4794 /* Now choose the best path(s) */
4795 set_cheapest(distinct_rel);
4796
4797 return distinct_rel;
4798}
4799
4800/*
4801 * create_partial_distinct_paths
4802 *
4803 * Process 'input_rel' partial paths and add unique/aggregate paths to the
4804 * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4805 * paths on top and add a final unique/aggregate path to remove any duplicate
4806 * produced from combining rows from parallel workers.
4807 */
4808static void
4810 RelOptInfo *final_distinct_rel,
4811 PathTarget *target)
4812{
4813 RelOptInfo *partial_distinct_rel;
4814 Query *parse;
4815 List *distinctExprs;
4816 double numDistinctRows;
4817 Path *cheapest_partial_path;
4818 ListCell *lc;
4819
4820 /* nothing to do when there are no partial paths in the input rel */
4821 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4822 return;
4823
4824 parse = root->parse;
4825
4826 /* can't do parallel DISTINCT ON */
4827 if (parse->hasDistinctOn)
4828 return;
4829
4830 partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4831 NULL);
4832 partial_distinct_rel->reltarget = target;
4833 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4834
4835 /*
4836 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4837 */
4838 partial_distinct_rel->serverid = input_rel->serverid;
4839 partial_distinct_rel->userid = input_rel->userid;
4840 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4841 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4842
4843 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4844
4845 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4846 parse->targetList);
4847
4848 /* estimate how many distinct rows we'll get from each worker */
4849 numDistinctRows = estimate_num_groups(root, distinctExprs,
4850 cheapest_partial_path->rows,
4851 NULL, NULL);
4852
4853 /*
4854 * Try sorting the cheapest path and incrementally sorting any paths with
4855 * presorted keys and put a unique paths atop of those. We'll also
4856 * attempt to reorder the required pathkeys to match the input path's
4857 * pathkeys as much as possible, in hopes of avoiding a possible need to
4858 * re-sort.
4859 */
4860 if (grouping_is_sortable(root->processed_distinctClause))
4861 {
4862 foreach(lc, input_rel->partial_pathlist)
4863 {
4864 Path *input_path = (Path *) lfirst(lc);
4865 Path *sorted_path;
4866 List *useful_pathkeys_list = NIL;
4867
4868 useful_pathkeys_list =
4870 root->distinct_pathkeys,
4871 input_path->pathkeys);
4872 Assert(list_length(useful_pathkeys_list) > 0);
4873
4874 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4875 {
4876 sorted_path = make_ordered_path(root,
4877 partial_distinct_rel,
4878 input_path,
4879 cheapest_partial_path,
4880 useful_pathkeys,
4881 -1.0);
4882
4883 if (sorted_path == NULL)
4884 continue;
4885
4886 /*
4887 * An empty distinct_pathkeys means all tuples have the same
4888 * value for the DISTINCT clause. See
4889 * create_final_distinct_paths()
4890 */
4891 if (root->distinct_pathkeys == NIL)
4892 {
4893 Node *limitCount;
4894
4895 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4896 sizeof(int64),
4897 Int64GetDatum(1), false,
4899
4900 /*
4901 * Apply a LimitPath onto the partial path to restrict the
4902 * tuples from each worker to 1.
4903 * create_final_distinct_paths will need to apply an
4904 * additional LimitPath to restrict this to a single row
4905 * after the Gather node. If the query already has a
4906 * LIMIT clause, then we could end up with three Limit
4907 * nodes in the final plan. Consolidating the top two of
4908 * these could be done, but does not seem worth troubling
4909 * over.
4910 */
4911 add_partial_path(partial_distinct_rel, (Path *)
4912 create_limit_path(root, partial_distinct_rel,
4913 sorted_path,
4914 NULL,
4915 limitCount,
4917 0, 1));
4918 }
4919 else
4920 {
4921 add_partial_path(partial_distinct_rel, (Path *)
4922 create_upper_unique_path(root, partial_distinct_rel,
4923 sorted_path,
4924 list_length(root->distinct_pathkeys),
4925 numDistinctRows));
4926 }
4927 }
4928 }
4929 }
4930
4931 /*
4932 * Now try hash aggregate paths, if enabled and hashing is possible. Since
4933 * we're not on the hook to ensure we do our best to create at least one
4934 * path here, we treat enable_hashagg as a hard off-switch rather than the
4935 * slightly softer variant in create_final_distinct_paths.
4936 */
4937 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4938 {
4939 add_partial_path(partial_distinct_rel, (Path *)
4941 partial_distinct_rel,
4942 cheapest_partial_path,
4943 cheapest_partial_path->pathtarget,
4944 AGG_HASHED,
4946 root->processed_distinctClause,
4947 NIL,
4948 NULL,
4949 numDistinctRows));
4950 }
4951
4952 /*
4953 * If there is an FDW that's responsible for all baserels of the query,
4954 * let it consider adding ForeignPaths.
4955 */
4956 if (partial_distinct_rel->fdwroutine &&
4957 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4958 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4960 input_rel,
4961 partial_distinct_rel,
4962 NULL);
4963
4964 /* Let extensions possibly add some more partial paths */
4966 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4967 input_rel, partial_distinct_rel, NULL);
4968
4969 if (partial_distinct_rel->partial_pathlist != NIL)
4970 {
4971 generate_useful_gather_paths(root, partial_distinct_rel, true);
4972 set_cheapest(partial_distinct_rel);
4973
4974 /*
4975 * Finally, create paths to distinctify the final result. This step
4976 * is needed to remove any duplicates due to combining rows from
4977 * parallel workers.
4978 */
4979 create_final_distinct_paths(root, partial_distinct_rel,
4980 final_distinct_rel);
4981 }
4982}
4983
4984/*
4985 * create_final_distinct_paths
4986 * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4987 *
4988 * input_rel: contains the source-data paths
4989 * distinct_rel: destination relation for storing created paths
4990 */
4991static RelOptInfo *
4993 RelOptInfo *distinct_rel)
4994{
4995 Query *parse = root->parse;
4996 Path *cheapest_input_path = input_rel->cheapest_total_path;
4997 double numDistinctRows;
4998 bool allow_hash;
4999
5000 /* Estimate number of distinct rows there will be */
5001 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5002 root->hasHavingQual)
5003 {
5004 /*
5005 * If there was grouping or aggregation, use the number of input rows
5006 * as the estimated number of DISTINCT rows (ie, assume the input is
5007 * already mostly unique).
5008 */
5009 numDistinctRows = cheapest_input_path->rows;
5010 }
5011 else
5012 {
5013 /*
5014 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5015 */
5016 List *distinctExprs;
5017
5018 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5019 parse->targetList);
5020 numDistinctRows = estimate_num_groups(root, distinctExprs,
5021 cheapest_input_path->rows,
5022 NULL, NULL);
5023 }
5024
5025 /*
5026 * Consider sort-based implementations of DISTINCT, if possible.
5027 */
5028 if (grouping_is_sortable(root->processed_distinctClause))
5029 {
5030 /*
5031 * Firstly, if we have any adequately-presorted paths, just stick a
5032 * Unique node on those. We also, consider doing an explicit sort of
5033 * the cheapest input path and Unique'ing that. If any paths have
5034 * presorted keys then we'll create an incremental sort atop of those
5035 * before adding a unique node on the top. We'll also attempt to
5036 * reorder the required pathkeys to match the input path's pathkeys as
5037 * much as possible, in hopes of avoiding a possible need to re-sort.
5038 *
5039 * When we have DISTINCT ON, we must sort by the more rigorous of
5040 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5041 * Also, if we do have to do an explicit sort, we might as well use
5042 * the more rigorous ordering to avoid a second sort later. (Note
5043 * that the parser will have ensured that one clause is a prefix of
5044 * the other.)
5045 */
5046 List *needed_pathkeys;
5047 ListCell *lc;
5048 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5049
5050 if (parse->hasDistinctOn &&
5051 list_length(root->distinct_pathkeys) <
5052 list_length(root->sort_pathkeys))
5053 needed_pathkeys = root->sort_pathkeys;
5054 else
5055 needed_pathkeys = root->distinct_pathkeys;
5056
5057 foreach(lc, input_rel->pathlist)
5058 {
5059 Path *input_path = (Path *) lfirst(lc);
5060 Path *sorted_path;
5061 List *useful_pathkeys_list = NIL;
5062
5063 useful_pathkeys_list =
5065 needed_pathkeys,
5066 input_path->pathkeys);
5067 Assert(list_length(useful_pathkeys_list) > 0);
5068
5069 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5070 {
5071 sorted_path = make_ordered_path(root,
5072 distinct_rel,
5073 input_path,
5074 cheapest_input_path,
5075 useful_pathkeys,
5076 limittuples);
5077
5078 if (sorted_path == NULL)
5079 continue;
5080
5081 /*
5082 * distinct_pathkeys may have become empty if all of the
5083 * pathkeys were determined to be redundant. If all of the
5084 * pathkeys are redundant then each DISTINCT target must only
5085 * allow a single value, therefore all resulting tuples must
5086 * be identical (or at least indistinguishable by an equality
5087 * check). We can uniquify these tuples simply by just taking
5088 * the first tuple. All we do here is add a path to do "LIMIT
5089 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5090 * still have a non-NIL sort_pathkeys list, so we must still
5091 * only do this with paths which are correctly sorted by
5092 * sort_pathkeys.
5093 */
5094 if (root->distinct_pathkeys == NIL)
5095 {
5096 Node *limitCount;
5097
5098 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5099 sizeof(int64),
5100 Int64GetDatum(1), false,
5102
5103 /*
5104 * If the query already has a LIMIT clause, then we could
5105 * end up with a duplicate LimitPath in the final plan.
5106 * That does not seem worth troubling over too much.
5107 */
5108 add_path(distinct_rel, (Path *)
5109 create_limit_path(root, distinct_rel, sorted_path,
5110 NULL, limitCount,
5111 LIMIT_OPTION_COUNT, 0, 1));
5112 }
5113 else
5114 {
5115 add_path(distinct_rel, (Path *)
5116 create_upper_unique_path(root, distinct_rel,
5117 sorted_path,
5118 list_length(root->distinct_pathkeys),
5119 numDistinctRows));
5120 }
5121 }
5122 }
5123 }
5124
5125 /*
5126 * Consider hash-based implementations of DISTINCT, if possible.
5127 *
5128 * If we were not able to make any other types of path, we *must* hash or
5129 * die trying. If we do have other choices, there are two things that
5130 * should prevent selection of hashing: if the query uses DISTINCT ON
5131 * (because it won't really have the expected behavior if we hash), or if
5132 * enable_hashagg is off.
5133 *
5134 * Note: grouping_is_hashable() is much more expensive to check than the
5135 * other gating conditions, so we want to do it last.
5136 */
5137 if (distinct_rel->pathlist == NIL)
5138 allow_hash = true; /* we have no alternatives */
5139 else if (parse->hasDistinctOn || !enable_hashagg)
5140 allow_hash = false; /* policy-based decision not to hash */
5141 else
5142 allow_hash = true; /* default */
5143
5144 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5145 {
5146 /* Generate hashed aggregate path --- no sort needed */
5147 add_path(distinct_rel, (Path *)
5149 distinct_rel,
5150 cheapest_input_path,
5151 cheapest_input_path->pathtarget,
5152 AGG_HASHED,
5154 root->processed_distinctClause,
5155 NIL,
5156 NULL,
5157 numDistinctRows));
5158 }
5159
5160 return distinct_rel;
5161}
5162
5163/*
5164 * get_useful_pathkeys_for_distinct
5165 * Get useful orderings of pathkeys for distinctClause by reordering
5166 * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5167 *
5168 * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5169 * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5170 */
5171static List *
5173 List *path_pathkeys)
5174{
5175 List *useful_pathkeys_list = NIL;
5176 List *useful_pathkeys = NIL;
5177
5178 /* always include the given 'needed_pathkeys' */
5179 useful_pathkeys_list = lappend(useful_pathkeys_list,
5180 needed_pathkeys);
5181
5183 return useful_pathkeys_list;
5184
5185 /*
5186 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5187 * that match 'needed_pathkeys', but only up to the longest matching
5188 * prefix.
5189 *
5190 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5191 * list matches initial distinctClause pathkeys; otherwise, it won't have
5192 * the desired behavior.
5193 */
5194 foreach_node(PathKey, pathkey, path_pathkeys)
5195 {
5196 /*
5197 * The PathKey nodes are canonical, so they can be checked for
5198 * equality by simple pointer comparison.
5199 */
5200 if (!list_member_ptr(needed_pathkeys, pathkey))
5201 break;
5202 if (root->parse->hasDistinctOn &&
5203 !list_member_ptr(root->distinct_pathkeys, pathkey))
5204 break;
5205
5206 useful_pathkeys = lappend(useful_pathkeys, pathkey);
5207 }
5208
5209 /* If no match at all, no point in reordering needed_pathkeys */
5210 if (useful_pathkeys == NIL)
5211 return useful_pathkeys_list;
5212
5213 /*
5214 * If not full match, the resulting pathkey list is not useful without
5215 * incremental sort.
5216 */
5217 if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5219 return useful_pathkeys_list;
5220
5221 /* Append the remaining PathKey nodes in needed_pathkeys */
5222 useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5223 needed_pathkeys);
5224
5225 /*
5226 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5227 * just drop it.
5228 */
5229 if (compare_pathkeys(needed_pathkeys,
5230 useful_pathkeys) == PATHKEYS_EQUAL)
5231 return useful_pathkeys_list;
5232
5233 useful_pathkeys_list = lappend(useful_pathkeys_list,
5234 useful_pathkeys);
5235
5236 return useful_pathkeys_list;
5237}
5238
5239/*
5240 * create_ordered_paths
5241 *
5242 * Build a new upperrel containing Paths for ORDER BY evaluation.
5243 *
5244 * All paths in the result must satisfy the ORDER BY ordering.
5245 * The only new paths we need consider are an explicit full sort
5246 * and incremental sort on the cheapest-total existing path.
5247 *
5248 * input_rel: contains the source-data Paths
5249 * target: the output tlist the result Paths must emit
5250 * limit_tuples: estimated bound on the number of output tuples,
5251 * or -1 if no LIMIT or couldn't estimate
5252 *
5253 * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5254 * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5255 */
5256static RelOptInfo *
5258 RelOptInfo *input_rel,
5259 PathTarget *target,
5260 bool target_parallel_safe,
5261 double limit_tuples)
5262{
5263 Path *cheapest_input_path = input_rel->cheapest_total_path;
5264 RelOptInfo *ordered_rel;
5265 ListCell *lc;
5266
5267 /* For now, do all work in the (ORDERED, NULL) upperrel */
5268 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5269
5270 /*
5271 * If the input relation is not parallel-safe, then the ordered relation
5272 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5273 * target list is parallel-safe.
5274 */
5275 if (input_rel->consider_parallel && target_parallel_safe)
5276 ordered_rel->consider_parallel = true;
5277
5278 /*
5279 * If the input rel belongs to a single FDW, so does the ordered_rel.
5280 */
5281 ordered_rel->serverid = input_rel->serverid;
5282 ordered_rel->userid = input_rel->userid;
5283 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5284 ordered_rel->fdwroutine = input_rel->fdwroutine;
5285
5286 foreach(lc, input_rel->pathlist)
5287 {
5288 Path *input_path = (Path *) lfirst(lc);
5289 Path *sorted_path;
5290 bool is_sorted;
5291 int presorted_keys;
5292
5293 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5294 input_path->pathkeys, &presorted_keys);
5295
5296 if (is_sorted)
5297 sorted_path = input_path;
5298 else
5299 {
5300 /*
5301 * Try at least sorting the cheapest path and also try
5302 * incrementally sorting any path which is partially sorted
5303 * already (no need to deal with paths which have presorted keys
5304 * when incremental sort is disabled unless it's the cheapest
5305 * input path).
5306 */
5307 if (input_path != cheapest_input_path &&
5308 (presorted_keys == 0 || !enable_incremental_sort))
5309 continue;
5310
5311 /*
5312 * We've no need to consider both a sort and incremental sort.
5313 * We'll just do a sort if there are no presorted keys and an
5314 * incremental sort when there are presorted keys.
5315 */
5316 if (presorted_keys == 0 || !enable_incremental_sort)
5317 sorted_path = (Path *) create_sort_path(root,
5318 ordered_rel,
5319 input_path,
5320 root->sort_pathkeys,
5321 limit_tuples);
5322 else
5323 sorted_path = (Path *) create_incremental_sort_path(root,
5324 ordered_rel,
5325 input_path,
5326 root->sort_pathkeys,
5327 presorted_keys,
5328 limit_tuples);
5329 }
5330
5331 /*
5332 * If the pathtarget of the result path has different expressions from
5333 * the target to be applied, a projection step is needed.
5334 */
5335 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5336 sorted_path = apply_projection_to_path(root, ordered_rel,
5337 sorted_path, target);
5338
5339 add_path(ordered_rel, sorted_path);
5340 }
5341
5342 /*
5343 * generate_gather_paths() will have already generated a simple Gather
5344 * path for the best parallel path, if any, and the loop above will have
5345 * considered sorting it. Similarly, generate_gather_paths() will also
5346 * have generated order-preserving Gather Merge plans which can be used
5347 * without sorting if they happen to match the sort_pathkeys, and the loop
5348 * above will have handled those as well. However, there's one more
5349 * possibility: it may make sense to sort the cheapest partial path or
5350 * incrementally sort any partial path that is partially sorted according
5351 * to the required output order and then use Gather Merge.
5352 */
5353 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5354 input_rel->partial_pathlist != NIL)
5355 {
5356 Path *cheapest_partial_path;
5357
5358 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5359
5360 foreach(lc, input_rel->partial_pathlist)
5361 {
5362 Path *input_path = (Path *) lfirst(lc);
5363 Path *sorted_path;
5364 bool is_sorted;
5365 int presorted_keys;
5366 double total_groups;
5367
5368 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5369 input_path->pathkeys,
5370 &presorted_keys);
5371
5372 if (is_sorted)
5373 continue;
5374
5375 /*
5376 * Try at least sorting the cheapest path and also try
5377 * incrementally sorting any path which is partially sorted
5378 * already (no need to deal with paths which have presorted keys
5379 * when incremental sort is disabled unless it's the cheapest
5380 * partial path).
5381 */
5382 if (input_path != cheapest_partial_path &&
5383 (presorted_keys == 0 || !enable_incremental_sort))
5384 continue;
5385
5386 /*
5387 * We've no need to consider both a sort and incremental sort.
5388 * We'll just do a sort if there are no presorted keys and an
5389 * incremental sort when there are presorted keys.
5390 */
5391 if (presorted_keys == 0 || !enable_incremental_sort)
5392 sorted_path = (Path *) create_sort_path(root,
5393 ordered_rel,
5394 input_path,
5395 root->sort_pathkeys,
5396 limit_tuples);
5397 else
5398 sorted_path = (Path *) create_incremental_sort_path(root,
5399 ordered_rel,
5400 input_path,
5401 root->sort_pathkeys,
5402 presorted_keys,
5403 limit_tuples);
5404 total_groups = compute_gather_rows(sorted_path);
5405 sorted_path = (Path *)
5406 create_gather_merge_path(root, ordered_rel,
5407 sorted_path,
5408 sorted_path->pathtarget,
5409 root->sort_pathkeys, NULL,
5410 &total_groups);
5411
5412 /*
5413 * If the pathtarget of the result path has different expressions
5414 * from the target to be applied, a projection step is needed.
5415 */
5416 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5417 sorted_path = apply_projection_to_path(root, ordered_rel,
5418 sorted_path, target);
5419
5420 add_path(ordered_rel, sorted_path);
5421 }
5422 }
5423
5424 /*
5425 * If there is an FDW that's responsible for all baserels of the query,
5426 * let it consider adding ForeignPaths.
5427 */
5428 if (ordered_rel->fdwroutine &&
5429 ordered_rel->fdwroutine->GetForeignUpperPaths)
5430 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5431 input_rel, ordered_rel,
5432 NULL);
5433
5434 /* Let extensions possibly add some more paths */
5436 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5437 input_rel, ordered_rel, NULL);
5438
5439 /*
5440 * No need to bother with set_cheapest here; grouping_planner does not
5441 * need us to do it.
5442 */
5443 Assert(ordered_rel->pathlist != NIL);
5444
5445 return ordered_rel;
5446}
5447
5448
5449/*
5450 * make_group_input_target
5451 * Generate appropriate PathTarget for initial input to grouping nodes.
5452 *
5453 * If there is grouping or aggregation, the scan/join subplan cannot emit
5454 * the query's final targetlist; for example, it certainly can't emit any
5455 * aggregate function calls. This routine generates the correct target
5456 * for the scan/join subplan.
5457 *
5458 * The query target list passed from the parser already contains entries
5459 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5460 * for variables used only in HAVING clauses; so we need to add those
5461 * variables to the subplan target list. Also, we flatten all expressions
5462 * except GROUP BY items into their component variables; other expressions
5463 * will be computed by the upper plan nodes rather than by the subplan.
5464 * For example, given a query like
5465 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5466 * we want to pass this targetlist to the subplan:
5467 * a+b,c,d
5468 * where the a+b target will be used by the Sort/Group steps, and the
5469 * other targets will be used for computing the final results.
5470 *
5471 * 'final_target' is the query's final target list (in PathTarget form)
5472 *
5473 * The result is the PathTarget to be computed by the Paths returned from
5474 * query_planner().
5475 */
5476static PathTarget *
5478{
5479 Query *parse = root->parse;
5480 PathTarget *input_target;
5481 List *non_group_cols;
5482 List *non_group_vars;
5483 int i;
5484 ListCell *lc;
5485
5486 /*
5487 * We must build a target containing all grouping columns, plus any other
5488 * Vars mentioned in the query's targetlist and HAVING qual.
5489 */
5490 input_target = create_empty_pathtarget();
5491 non_group_cols = NIL;
5492
5493 i = 0;
5494 foreach(lc, final_target->exprs)
5495 {
5496 Expr *expr = (Expr *) lfirst(lc);
5497 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5498
5499 if (sgref && root->processed_groupClause &&
5501 root->processed_groupClause) != NULL)
5502 {
5503 /*
5504 * It's a grouping column, so add it to the input target as-is.
5505 *
5506 * Note that the target is logically below the grouping step. So
5507 * with grouping sets we need to remove the RT index of the
5508 * grouping step if there is any from the target expression.
5509 */
5510 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5511 {
5512 Assert(root->group_rtindex > 0);
5513 expr = (Expr *)
5514 remove_nulling_relids((Node *) expr,
5515 bms_make_singleton(root->group_rtindex),
5516 NULL);
5517 }
5518 add_column_to_pathtarget(input_target, expr, sgref);
5519 }
5520 else
5521 {
5522 /*
5523 * Non-grouping column, so just remember the expression for later
5524 * call to pull_var_clause.
5525 */
5526 non_group_cols = lappend(non_group_cols, expr);
5527 }
5528
5529 i++;
5530 }
5531
5532 /*
5533 * If there's a HAVING clause, we'll need the Vars it uses, too.
5534 */
5535 if (parse->havingQual)
5536 non_group_cols = lappend(non_group_cols, parse->havingQual);
5537
5538 /*
5539 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5540 * add them to the input target if not already present. (A Var used
5541 * directly as a GROUP BY item will be present already.) Note this
5542 * includes Vars used in resjunk items, so we are covering the needs of
5543 * ORDER BY and window specifications. Vars used within Aggrefs and
5544 * WindowFuncs will be pulled out here, too.
5545 *
5546 * Note that the target is logically below the grouping step. So with
5547 * grouping sets we need to remove the RT index of the grouping step if
5548 * there is any from the non-group Vars.
5549 */
5550 non_group_vars = pull_var_clause((Node *) non_group_cols,
5554 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5555 {
5556 Assert(root->group_rtindex > 0);
5557 non_group_vars = (List *)
5558 remove_nulling_relids((Node *) non_group_vars,
5559 bms_make_singleton(root->group_rtindex),
5560 NULL);
5561 }
5562 add_new_columns_to_pathtarget(input_target, non_group_vars);
5563
5564 /* clean up cruft */
5565 list_free(non_group_vars);
5566 list_free(non_group_cols);
5567
5568 /* XXX this causes some redundant cost calculation ... */
5569 return set_pathtarget_cost_width(root, input_target);
5570}
5571
5572/*
5573 * make_partial_grouping_target
5574 * Generate appropriate PathTarget for output of partial aggregate
5575 * (or partial grouping, if there are no aggregates) nodes.
5576 *
5577 * A partial aggregation node needs to emit all the same aggregates that
5578 * a regular aggregation node would, plus any aggregates used in HAVING;
5579 * except that the Aggref nodes should be marked as partial aggregates.
5580 *
5581 * In addition, we'd better emit any Vars and PlaceHolderVars that are
5582 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5583 * these would be Vars that are grouped by or used in grouping expressions.)
5584 *
5585 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5586 * havingQual represents the HAVING clause.
5587 */
5588static PathTarget *
5590 PathTarget *grouping_target,
5591 Node *havingQual)
5592{
5593 PathTarget *partial_target;
5594 List *non_group_cols;
5595 List *non_group_exprs;
5596 int i;
5597 ListCell *lc;
5598
5599 partial_target = create_empty_pathtarget();
5600 non_group_cols = NIL;
5601
5602 i = 0;
5603 foreach(lc, grouping_target->exprs)
5604 {
5605 Expr *expr = (Expr *) lfirst(lc);
5606 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5607
5608 if (sgref && root->processed_groupClause &&
5610 root->processed_groupClause) != NULL)
5611 {
5612 /*
5613 * It's a grouping column, so add it to the partial_target as-is.
5614 * (This allows the upper agg step to repeat the grouping calcs.)
5615 */
5616 add_column_to_pathtarget(partial_target, expr, sgref);
5617 }
5618 else
5619 {
5620 /*
5621 * Non-grouping column, so just remember the expression for later
5622 * call to pull_var_clause.
5623 */
5624 non_group_cols = lappend(non_group_cols, expr);
5625 }
5626
5627 i++;
5628 }
5629
5630 /*
5631 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5632 */
5633 if (havingQual)
5634 non_group_cols = lappend(non_group_cols, havingQual);
5635
5636 /*
5637 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5638 * non-group cols (plus HAVING), and add them to the partial_target if not
5639 * already present. (An expression used directly as a GROUP BY item will
5640 * be present already.) Note this includes Vars used in resjunk items, so
5641 * we are covering the needs of ORDER BY and window specifications.
5642 */
5643 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5647
5648 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5649
5650 /*
5651 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5652 * are at the top level of the target list, so we can just scan the list
5653 * rather than recursing through the expression trees.
5654 */
5655 foreach(lc, partial_target->exprs)
5656 {
5657 Aggref *aggref = (Aggref *) lfirst(lc);
5658
5659 if (IsA(aggref, Aggref))
5660 {
5661 Aggref *newaggref;
5662
5663 /*
5664 * We shouldn't need to copy the substructure of the Aggref node,
5665 * but flat-copy the node itself to avoid damaging other trees.
5666 */
5667 newaggref = makeNode(Aggref);
5668 memcpy(newaggref, aggref, sizeof(Aggref));
5669
5670 /* For now, assume serialization is required */
5672
5673 lfirst(lc) = newaggref;
5674 }
5675 }
5676
5677 /* clean up cruft */
5678 list_free(non_group_exprs);
5679 list_free(non_group_cols);
5680
5681 /* XXX this causes some redundant cost calculation ... */
5682 return set_pathtarget_cost_width(root, partial_target);
5683}
5684
5685/*
5686 * mark_partial_aggref
5687 * Adjust an Aggref to make it represent a partial-aggregation step.
5688 *
5689 * The Aggref node is modified in-place; caller must do any copying required.
5690 */
5691void
5693{
5694 /* aggtranstype should be computed by this point */
5695 Assert(OidIsValid(agg->aggtranstype));
5696 /* ... but aggsplit should still be as the parser left it */
5697 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5698
5699 /* Mark the Aggref with the intended partial-aggregation mode */
5700 agg->aggsplit = aggsplit;
5701
5702 /*
5703 * Adjust result type if needed. Normally, a partial aggregate returns
5704 * the aggregate's transition type; but if that's INTERNAL and we're
5705 * serializing, it returns BYTEA instead.
5706 */
5707 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5708 {
5709 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5710 agg->aggtype = BYTEAOID;
5711 else
5712 agg->aggtype = agg->aggtranstype;
5713 }
5714}
5715
5716/*
5717 * postprocess_setop_tlist
5718 * Fix up targetlist returned by plan_set_operations().
5719 *
5720 * We need to transpose sort key info from the orig_tlist into new_tlist.
5721 * NOTE: this would not be good enough if we supported resjunk sort keys
5722 * for results of set operations --- then, we'd need to project a whole
5723 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5724 * find any resjunk columns in orig_tlist.
5725 */
5726static List *
5727postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5728{
5729 ListCell *l;
5730 ListCell *orig_tlist_item = list_head(orig_tlist);
5731
5732 foreach(l, new_tlist)
5733 {
5734 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5735 TargetEntry *orig_tle;
5736
5737 /* ignore resjunk columns in setop result */
5738 if (new_tle->resjunk)
5739 continue;
5740
5741 Assert(orig_tlist_item != NULL);
5742 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5743 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5744 if (orig_tle->resjunk) /* should not happen */
5745 elog(ERROR, "resjunk output columns are not implemented");
5746 Assert(new_tle->resno == orig_tle->resno);
5747 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5748 }
5749 if (orig_tlist_item != NULL)
5750 elog(ERROR, "resjunk output columns are not implemented");
5751 return new_tlist;
5752}
5753
5754/*
5755 * optimize_window_clauses
5756 * Call each WindowFunc's prosupport function to see if we're able to
5757 * make any adjustments to any of the WindowClause's so that the executor
5758 * can execute the window functions in a more optimal way.
5759 *
5760 * Currently we only allow adjustments to the WindowClause's frameOptions. We
5761 * may allow more things to be done here in the future.
5762 */
5763static void
5765{
5766 List *windowClause = root->parse->windowClause;
5767 ListCell *lc;
5768
5769 foreach(lc, windowClause)
5770 {
5772 ListCell *lc2;
5773 int optimizedFrameOptions = 0;
5774
5775 Assert(wc->winref <= wflists->maxWinRef);
5776
5777 /* skip any WindowClauses that have no WindowFuncs */
5778 if (wflists->windowFuncs[wc->winref] == NIL)
5779 continue;
5780
5781 foreach(lc2, wflists->windowFuncs[wc->winref])
5782 {
5785 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5786 Oid prosupport;
5787
5788 prosupport = get_func_support(wfunc->winfnoid);
5789
5790 /* Check if there's a support function for 'wfunc' */
5791 if (!OidIsValid(prosupport))
5792 break; /* can't optimize this WindowClause */
5793
5794 req.type = T_SupportRequestOptimizeWindowClause;
5795 req.window_clause = wc;
5796 req.window_func = wfunc;
5797 req.frameOptions = wc->frameOptions;
5798
5799 /* call the support function */
5802 PointerGetDatum(&req)));
5803
5804 /*
5805 * Skip to next WindowClause if the support function does not
5806 * support this request type.
5807 */
5808 if (res == NULL)
5809 break;
5810
5811 /*
5812 * Save these frameOptions for the first WindowFunc for this
5813 * WindowClause.
5814 */
5815 if (foreach_current_index(lc2) == 0)
5816 optimizedFrameOptions = res->frameOptions;
5817
5818 /*
5819 * On subsequent WindowFuncs, if the frameOptions are not the same
5820 * then we're unable to optimize the frameOptions for this
5821 * WindowClause.
5822 */
5823 else if (optimizedFrameOptions != res->frameOptions)
5824 break; /* skip to the next WindowClause, if any */
5825 }
5826
5827 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5828 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5829 {
5830 ListCell *lc3;
5831
5832 /* apply the new frame options */
5833 wc->frameOptions = optimizedFrameOptions;
5834
5835 /*
5836 * We now check to see if changing the frameOptions has caused
5837 * this WindowClause to be a duplicate of some other WindowClause.
5838 * This can only happen if we have multiple WindowClauses, so
5839 * don't bother if there's only 1.
5840 */
5841 if (list_length(windowClause) == 1)
5842 continue;
5843
5844 /*
5845 * Do the duplicate check and reuse the existing WindowClause if
5846 * we find a duplicate.
5847 */
5848 foreach(lc3, windowClause)
5849 {
5850 WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5851
5852 /* skip over the WindowClause we're currently editing */
5853 if (existing_wc == wc)
5854 continue;
5855
5856 /*
5857 * Perform the same duplicate check that is done in
5858 * transformWindowFuncCall.
5859 */
5860 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5861 equal(wc->orderClause, existing_wc->orderClause) &&
5862 wc->frameOptions == existing_wc->frameOptions &&
5863 equal(wc->startOffset, existing_wc->startOffset) &&
5864 equal(wc->endOffset, existing_wc->endOffset))
5865 {
5866 ListCell *lc4;
5867
5868 /*
5869 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5870 * This required adjusting each WindowFunc's winref and
5871 * moving the WindowFuncs in 'wc' to the list of
5872 * WindowFuncs in 'existing_wc'.
5873 */
5874 foreach(lc4, wflists->windowFuncs[wc->winref])
5875 {
5876 WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5877
5878 wfunc->winref = existing_wc->winref;
5879 }
5880
5881 /* move list items */
5882 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5883 wflists->windowFuncs[wc->winref]);
5884 wflists->windowFuncs[wc->winref] = NIL;
5885
5886 /*
5887 * transformWindowFuncCall() should have made sure there
5888 * are no other duplicates, so we needn't bother looking
5889 * any further.
5890 */
5891 break;
5892 }
5893 }
5894 }
5895 }
5896}
5897
5898/*
5899 * select_active_windows
5900 * Create a list of the "active" window clauses (ie, those referenced
5901 * by non-deleted WindowFuncs) in the order they are to be executed.
5902 */
5903static List *
5905{
5906 List *windowClause = root->parse->windowClause;
5907 List *result = NIL;
5908 ListCell *lc;
5909 int nActive = 0;
5911 * list_length(windowClause));
5912
5913 /* First, construct an array of the active windows */
5914 foreach(lc, windowClause)
5915 {
5917
5918 /* It's only active if wflists shows some related WindowFuncs */
5919 Assert(wc->winref <= wflists->maxWinRef);
5920 if (wflists->windowFuncs[wc->winref] == NIL)
5921 continue;
5922
5923 actives[nActive].wc = wc; /* original clause */
5924
5925 /*
5926 * For sorting, we want the list of partition keys followed by the
5927 * list of sort keys. But pathkeys construction will remove duplicates
5928 * between the two, so we can as well (even though we can't detect all
5929 * of the duplicates, since some may come from ECs - that might mean
5930 * we miss optimization chances here). We must, however, ensure that
5931 * the order of entries is preserved with respect to the ones we do
5932 * keep.
5933 *
5934 * partitionClause and orderClause had their own duplicates removed in
5935 * parse analysis, so we're only concerned here with removing
5936 * orderClause entries that also appear in partitionClause.
5937 */
5938 actives[nActive].uniqueOrder =
5940 wc->orderClause);
5941 nActive++;
5942 }
5943
5944 /*
5945 * Sort active windows by their partitioning/ordering clauses, ignoring
5946 * any framing clauses, so that the windows that need the same sorting are
5947 * adjacent in the list. When we come to generate paths, this will avoid
5948 * inserting additional Sort nodes.
5949 *
5950 * This is how we implement a specific requirement from the SQL standard,
5951 * which says that when two or more windows are order-equivalent (i.e.
5952 * have matching partition and order clauses, even if their names or
5953 * framing clauses differ), then all peer rows must be presented in the
5954 * same order in all of them. If we allowed multiple sort nodes for such
5955 * cases, we'd risk having the peer rows end up in different orders in
5956 * equivalent windows due to sort instability. (See General Rule 4 of
5957 * <window clause> in SQL2008 - SQL2016.)
5958 *
5959 * Additionally, if the entire list of clauses of one window is a prefix
5960 * of another, put first the window with stronger sorting requirements.
5961 * This way we will first sort for stronger window, and won't have to sort
5962 * again for the weaker one.
5963 */
5964 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5965
5966 /* build ordered list of the original WindowClause nodes */
5967 for (int i = 0; i < nActive; i++)
5968 result = lappend(result, actives[i].wc);
5969
5970 pfree(actives);
5971
5972 return result;
5973}
5974
5975/*
5976 * name_active_windows
5977 * Ensure all active windows have unique names.
5978 *
5979 * The parser will have checked that user-assigned window names are unique
5980 * within the Query. Here we assign made-up names to any unnamed
5981 * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
5982 * at parse time, because it'd mess up decompilation of views.)
5983 *
5984 * activeWindows: result of select_active_windows
5985 */
5986static void
5988{
5989 int next_n = 1;
5990 char newname[16];
5991 ListCell *lc;
5992
5993 foreach(lc, activeWindows)
5994 {
5996
5997 /* Nothing to do if it has a name already. */
5998 if (wc->name)
5999 continue;
6000
6001 /* Select a name not currently present in the list. */
6002 for (;;)
6003 {
6004 ListCell *lc2;
6005
6006 snprintf(newname, sizeof(newname), "w%d", next_n++);
6007 foreach(lc2, activeWindows)
6008 {
6010
6011 if (wc2->name && strcmp(wc2->name, newname) == 0)
6012 break; /* matched */
6013 }
6014 if (lc2 == NULL)
6015 break; /* reached the end with no match */
6016 }
6017 wc->name = pstrdup(newname);
6018 }
6019}
6020
6021/*
6022 * common_prefix_cmp
6023 * QSort comparison function for WindowClauseSortData
6024 *
6025 * Sort the windows by the required sorting clauses. First, compare the sort
6026 * clauses themselves. Second, if one window's clauses are a prefix of another
6027 * one's clauses, put the window with more sort clauses first.
6028 *
6029 * We purposefully sort by the highest tleSortGroupRef first. Since
6030 * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6031 * and because here we sort the lowest tleSortGroupRefs last, if a
6032 * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6033 * ORDER BY clause, this makes it more likely that the final WindowAgg will
6034 * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6035 * reducing the total number of sorts required for the query.
6036 */
6037static int
6038common_prefix_cmp(const void *a, const void *b)
6039{
6040 const WindowClauseSortData *wcsa = a;
6041 const WindowClauseSortData *wcsb = b;
6042 ListCell *item_a;
6043 ListCell *item_b;
6044
6045 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6046 {
6049
6050 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6051 return -1;
6052 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6053 return 1;
6054 else if (sca->sortop > scb->sortop)
6055 return -1;
6056 else if (sca->sortop < scb->sortop)
6057 return 1;
6058 else if (sca->nulls_first && !scb->nulls_first)
6059 return -1;
6060 else if (!sca->nulls_first && scb->nulls_first)
6061 return 1;
6062 /* no need to compare eqop, since it is fully determined by sortop */
6063 }
6064
6065 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6066 return -1;
6067 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6068 return 1;
6069
6070 return 0;
6071}
6072
6073/*
6074 * make_window_input_target
6075 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6076 *
6077 * When the query has window functions, this function computes the desired
6078 * target to be computed by the node just below the first WindowAgg.
6079 * This tlist must contain all values needed to evaluate the window functions,
6080 * compute the final target list, and perform any required final sort step.
6081 * If multiple WindowAggs are needed, each intermediate one adds its window
6082 * function results onto this base tlist; only the topmost WindowAgg computes
6083 * the actual desired target list.
6084 *
6085 * This function is much like make_group_input_target, though not quite enough
6086 * like it to share code. As in that function, we flatten most expressions
6087 * into their component variables. But we do not want to flatten window
6088 * PARTITION BY/ORDER BY clauses, since that might result in multiple
6089 * evaluations of them, which would be bad (possibly even resulting in
6090 * inconsistent answers, if they contain volatile functions).
6091 * Also, we must not flatten GROUP BY clauses that were left unflattened by
6092 * make_group_input_target, because we may no longer have access to the
6093 * individual Vars in them.
6094 *
6095 * Another key difference from make_group_input_target is that we don't
6096 * flatten Aggref expressions, since those are to be computed below the
6097 * window functions and just referenced like Vars above that.
6098 *
6099 * 'final_target' is the query's final target list (in PathTarget form)
6100 * 'activeWindows' is the list of active windows previously identified by
6101 * select_active_windows.
6102 *
6103 * The result is the PathTarget to be computed by the plan node immediately
6104 * below the first WindowAgg node.
6105 */
6106static PathTarget *
6108 PathTarget *final_target,
6109 List *activeWindows)
6110{
6111 PathTarget *input_target;
6112 Bitmapset *sgrefs;
6113 List *flattenable_cols;
6114 List *flattenable_vars;
6115 int i;
6116 ListCell *lc;
6117
6118 Assert(root->parse->hasWindowFuncs);
6119
6120 /*
6121 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6122 * into a bitmapset for convenient reference below.
6123 */
6124 sgrefs = NULL;
6125 foreach(lc, activeWindows)
6126 {
6128 ListCell *lc2;
6129
6130 foreach(lc2, wc->partitionClause)
6131 {
6133
6134 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6135 }
6136 foreach(lc2, wc->orderClause)
6137 {
6139
6140 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6141 }
6142 }
6143
6144 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6145 foreach(lc, root->processed_groupClause)
6146 {
6148
6149 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6150 }
6151
6152 /*
6153 * Construct a target containing all the non-flattenable targetlist items,
6154 * and save aside the others for a moment.
6155 */
6156 input_target = create_empty_pathtarget();
6157 flattenable_cols = NIL;
6158
6159 i = 0;
6160 foreach(lc, final_target->exprs)
6161 {
6162 Expr *expr = (Expr *) lfirst(lc);
6163 Index sgref = get_pathtarget_sortgroupref(final_target, i);
6164
6165 /*
6166 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6167 * that such items can't contain window functions, so it's okay to
6168 * compute them below the WindowAgg nodes.)
6169 */
6170 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6171 {
6172 /*
6173 * Don't want to deconstruct this value, so add it to the input
6174 * target as-is.
6175 */
6176 add_column_to_pathtarget(input_target, expr, sgref);
6177 }
6178 else
6179 {
6180 /*
6181 * Column is to be flattened, so just remember the expression for
6182 * later call to pull_var_clause.
6183 */
6184 flattenable_cols = lappend(flattenable_cols, expr);
6185 }
6186
6187 i++;
6188 }
6189
6190 /*
6191 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6192 * add them to the input target if not already present. (Some might be
6193 * there already because they're used directly as window/group clauses.)
6194 *
6195 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6196 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6197 * at higher levels. On the other hand, we should recurse into
6198 * WindowFuncs to make sure their input expressions are available.
6199 */
6200 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6204 add_new_columns_to_pathtarget(input_target, flattenable_vars);
6205
6206 /* clean up cruft */
6207 list_free(flattenable_vars);
6208 list_free(flattenable_cols);
6209
6210 /* XXX this causes some redundant cost calculation ... */
6211 return set_pathtarget_cost_width(root, input_target);
6212}
6213
6214/*
6215 * make_pathkeys_for_window
6216 * Create a pathkeys list describing the required input ordering
6217 * for the given WindowClause.
6218 *
6219 * Modifies wc's partitionClause to remove any clauses which are deemed
6220 * redundant by the pathkey logic.
6221 *
6222 * The required ordering is first the PARTITION keys, then the ORDER keys.
6223 * In the future we might try to implement windowing using hashing, in which
6224 * case the ordering could be relaxed, but for now we always sort.
6225 */
6226static List *
6228 List *tlist)
6229{
6230 List *window_pathkeys = NIL;
6231
6232 /* Throw error if can't sort */
6234 ereport(ERROR,
6235 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6236 errmsg("could not implement window PARTITION BY"),
6237 errdetail("Window partitioning columns must be of sortable datatypes.")));
6239 ereport(ERROR,
6240 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6241 errmsg("could not implement window ORDER BY"),
6242 errdetail("Window ordering columns must be of sortable datatypes.")));
6243
6244 /*
6245 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6246 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6247 */
6248 if (wc->partitionClause != NIL)
6249 {
6250 bool sortable;
6251
6253 &wc->partitionClause,
6254 tlist,
6255 true,
6256 false,
6257 &sortable,
6258 false);
6259
6260 Assert(sortable);
6261 }
6262
6263 /*
6264 * In principle, we could also consider removing redundant ORDER BY items
6265 * too as doing so does not alter the result of peer row checks done by
6266 * the executor. However, we must *not* remove the ordering column for
6267 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6268 * if it's known to be equal to some partitioning column.
6269 */
6270 if (wc->orderClause != NIL)
6271 {
6272 List *orderby_pathkeys;
6273
6274 orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6275 wc->orderClause,
6276 tlist);
6277
6278 /* Okay, make the combined pathkeys */
6279 if (window_pathkeys != NIL)
6280 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6281 else
6282 window_pathkeys = orderby_pathkeys;
6283 }
6284
6285 return window_pathkeys;
6286}
6287
6288/*
6289 * make_sort_input_target
6290 * Generate appropriate PathTarget for initial input to Sort step.
6291 *
6292 * If the query has ORDER BY, this function chooses the target to be computed
6293 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6294 * project) steps. This might or might not be identical to the query's final
6295 * output target.
6296 *
6297 * The main argument for keeping the sort-input tlist the same as the final
6298 * is that we avoid a separate projection node (which will be needed if
6299 * they're different, because Sort can't project). However, there are also
6300 * advantages to postponing tlist evaluation till after the Sort: it ensures
6301 * a consistent order of evaluation for any volatile functions in the tlist,
6302 * and if there's also a LIMIT, we can stop the query without ever computing
6303 * tlist functions for later rows, which is beneficial for both volatile and
6304 * expensive functions.
6305 *
6306 * Our current policy is to postpone volatile expressions till after the sort
6307 * unconditionally (assuming that that's possible, ie they are in plain tlist
6308 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6309 * postpone set-returning expressions, because running them beforehand would
6310 * bloat the sort dataset, and because it might cause unexpected output order
6311 * if the sort isn't stable. However there's a constraint on that: all SRFs
6312 * in the tlist should be evaluated at the same plan step, so that they can
6313 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6314 * mustn't postpone any SRFs. (Note that in principle that policy should
6315 * probably get applied to the group/window input targetlists too, but we
6316 * have not done that historically.) Lastly, expensive expressions are
6317 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6318 * partial evaluation of the query is possible (if neither is true, we expect
6319 * to have to evaluate the expressions for every row anyway), or if there are
6320 * any volatile or set-returning expressions (since once we've put in a
6321 * projection at all, it won't cost any more to postpone more stuff).
6322 *
6323 * Another issue that could potentially be considered here is that
6324 * evaluating tlist expressions could result in data that's either wider
6325 * or narrower than the input Vars, thus changing the volume of data that
6326 * has to go through the Sort. However, we usually have only a very bad
6327 * idea of the output width of any expression more complex than a Var,
6328 * so for now it seems too risky to try to optimize on that basis.
6329 *
6330 * Note that if we do produce a modified sort-input target, and then the
6331 * query ends up not using an explicit Sort, no particular harm is done:
6332 * we'll initially use the modified target for the preceding path nodes,
6333 * but then change them to the final target with apply_projection_to_path.
6334 * Moreover, in such a case the guarantees about evaluation order of
6335 * volatile functions still hold, since the rows are sorted already.
6336 *
6337 * This function has some things in common with make_group_input_target and
6338 * make_window_input_target, though the detailed rules for what to do are
6339 * different. We never flatten/postpone any grouping or ordering columns;
6340 * those are needed before the sort. If we do flatten a particular
6341 * expression, we leave Aggref and WindowFunc nodes alone, since those were
6342 * computed earlier.
6343 *
6344 * 'final_target' is the query's final target list (in PathTarget form)
6345 * 'have_postponed_srfs' is an output argument, see below
6346 *
6347 * The result is the PathTarget to be computed by the plan node immediately
6348 * below the Sort step (and the Distinct step, if any). This will be
6349 * exactly final_target if we decide a projection step wouldn't be helpful.
6350 *
6351 * In addition, *have_postponed_srfs is set to true if we choose to postpone
6352 * any set-returning functions to after the Sort.
6353 */
6354static PathTarget *
6356 PathTarget *final_target,
6357 bool *have_postponed_srfs)
6358{
6359 Query *parse = root->parse;
6360 PathTarget *input_target;
6361 int ncols;
6362 bool *col_is_srf;
6363 bool *postpone_col;
6364 bool have_srf;
6365 bool have_volatile;
6366 bool have_expensive;
6367 bool have_srf_sortcols;
6368 bool postpone_srfs;
6369 List *postponable_cols;
6370 List *postponable_vars;
6371 int i;
6372 ListCell *lc;
6373
6374 /* Shouldn't get here unless query has ORDER BY */
6375 Assert(parse->sortClause);
6376
6377 *have_postponed_srfs = false; /* default result */
6378
6379 /* Inspect tlist and collect per-column information */
6380 ncols = list_length(final_target->exprs);
6381 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6382 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6383 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6384
6385 i = 0;
6386 foreach(lc, final_target->exprs)
6387 {
6388 Expr *expr = (Expr *) lfirst(lc);
6389
6390 /*
6391 * If the column has a sortgroupref, assume it has to be evaluated
6392 * before sorting. Generally such columns would be ORDER BY, GROUP
6393 * BY, etc targets. One exception is columns that were removed from
6394 * GROUP BY by remove_useless_groupby_columns() ... but those would
6395 * only be Vars anyway. There don't seem to be any cases where it
6396 * would be worth the trouble to double-check.
6397 */
6398 if (get_pathtarget_sortgroupref(final_target, i) == 0)
6399 {
6400 /*
6401 * Check for SRF or volatile functions. Check the SRF case first
6402 * because we must know whether we have any postponed SRFs.
6403 */
6404 if (parse->hasTargetSRFs &&
6405 expression_returns_set((Node *) expr))
6406 {
6407 /* We'll decide below whether these are postponable */
6408 col_is_srf[i] = true;
6409 have_srf = true;
6410 }
6411 else if (contain_volatile_functions((Node *) expr))
6412 {
6413 /* Unconditionally postpone */
6414 postpone_col[i] = true;
6415 have_volatile = true;
6416 }
6417 else
6418 {
6419 /*
6420 * Else check the cost. XXX it's annoying to have to do this
6421 * when set_pathtarget_cost_width() just did it. Refactor to
6422 * allow sharing the work?
6423 */
6424 QualCost cost;
6425
6426 cost_qual_eval_node(&cost, (Node *) expr, root);
6427
6428 /*
6429 * We arbitrarily define "expensive" as "more than 10X
6430 * cpu_operator_cost". Note this will take in any PL function
6431 * with default cost.
6432 */
6433 if (cost.per_tuple > 10 * cpu_operator_cost)
6434 {
6435 postpone_col[i] = true;
6436 have_expensive = true;
6437 }
6438 }
6439 }
6440 else
6441 {
6442 /* For sortgroupref cols, just check if any contain SRFs */
6443 if (!have_srf_sortcols &&
6444 parse->hasTargetSRFs &&
6445 expression_returns_set((Node *) expr))
6446 have_srf_sortcols = true;
6447 }
6448
6449 i++;
6450 }
6451
6452 /*
6453 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6454 */
6455 postpone_srfs = (have_srf && !have_srf_sortcols);
6456
6457 /*
6458 * If we don't need a post-sort projection, just return final_target.
6459 */
6460 if (!(postpone_srfs || have_volatile ||
6461 (have_expensive &&
6462 (parse->limitCount || root->tuple_fraction > 0))))
6463 return final_target;
6464
6465 /*
6466 * Report whether the post-sort projection will contain set-returning
6467 * functions. This is important because it affects whether the Sort can
6468 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6469 * to return.
6470 */
6471 *have_postponed_srfs = postpone_srfs;
6472
6473 /*
6474 * Construct the sort-input target, taking all non-postponable columns and
6475 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6476 * the postponable ones.
6477 */
6478 input_target = create_empty_pathtarget();
6479 postponable_cols = NIL;
6480
6481 i = 0;
6482 foreach(lc, final_target->exprs)
6483 {
6484 Expr *expr = (Expr *) lfirst(lc);
6485
6486 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6487 postponable_cols = lappend(postponable_cols, expr);
6488 else
6489 add_column_to_pathtarget(input_target, expr,
6490 get_pathtarget_sortgroupref(final_target, i));
6491
6492 i++;
6493 }
6494
6495 /*
6496 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6497 * postponable columns, and add them to the sort-input target if not
6498 * already present. (Some might be there already.) We mustn't
6499 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6500 * would be unable to recompute them.
6501 */
6502 postponable_vars = pull_var_clause((Node *) postponable_cols,
6506 add_new_columns_to_pathtarget(input_target, postponable_vars);
6507
6508 /* clean up cruft */
6509 list_free(postponable_vars);
6510 list_free(postponable_cols);
6511
6512 /* XXX this represents even more redundant cost calculation ... */
6513 return set_pathtarget_cost_width(root, input_target);
6514}
6515
6516/*
6517 * get_cheapest_fractional_path
6518 * Find the cheapest path for retrieving a specified fraction of all
6519 * the tuples expected to be returned by the given relation.
6520 *
6521 * Do not consider parameterized paths. If the caller needs a path for upper
6522 * rel, it can't have parameterized paths. If the caller needs an append
6523 * subpath, it could become limited by the treatment of similar
6524 * parameterization of all the subpaths.
6525 *
6526 * We interpret tuple_fraction the same way as grouping_planner.
6527 *
6528 * We assume set_cheapest() has been run on the given rel.
6529 */
6530Path *
6531get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6532{
6533 Path *best_path = rel->cheapest_total_path;
6534 ListCell *l;
6535
6536 /* If all tuples will be retrieved, just return the cheapest-total path */
6537 if (tuple_fraction <= 0.0)
6538 return best_path;
6539
6540 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6541 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6542 tuple_fraction /= best_path->rows;
6543
6544 foreach(l, rel->pathlist)
6545 {
6546 Path *path = (Path *) lfirst(l);
6547
6548 if (path->param_info)
6549 continue;
6550
6551 if (path == rel->cheapest_total_path ||
6552 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6553 continue;
6554
6555 best_path = path;
6556 }
6557
6558 return best_path;
6559}
6560
6561/*
6562 * adjust_paths_for_srfs
6563 * Fix up the Paths of the given upperrel to handle tSRFs properly.
6564 *
6565 * The executor can only handle set-returning functions that appear at the
6566 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6567 * that are not at top level, we need to split up the evaluation into multiple
6568 * plan levels in which each level satisfies this constraint. This function
6569 * modifies each Path of an upperrel that (might) compute any SRFs in its
6570 * output tlist to insert appropriate projection steps.
6571 *
6572 * The given targets and targets_contain_srfs lists are from
6573 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6574 * target in targets.
6575 */
6576static void
6578 List *targets, List *targets_contain_srfs)
6579{
6580 ListCell *lc;
6581
6582 Assert(list_length(targets) == list_length(targets_contain_srfs));
6583 Assert(!linitial_int(targets_contain_srfs));
6584
6585 /* If no SRFs appear at this plan level, nothing to do */
6586 if (list_length(targets) == 1)
6587 return;
6588
6589 /*
6590 * Stack SRF-evaluation nodes atop each path for the rel.
6591 *
6592 * In principle we should re-run set_cheapest() here to identify the
6593 * cheapest path, but it seems unlikely that adding the same tlist eval
6594 * costs to all the paths would change that, so we don't bother. Instead,
6595 * just assume that the cheapest-startup and cheapest-total paths remain
6596 * so. (There should be no parameterized paths anymore, so we needn't
6597 * worry about updating cheapest_parameterized_paths.)
6598 */
6599 foreach(lc, rel->pathlist)
6600 {
6601 Path *subpath = (Path *) lfirst(lc);
6602 Path *newpath = subpath;
6603 ListCell *lc1,
6604 *lc2;
6605
6606 Assert(subpath->param_info == NULL);
6607 forboth(lc1, targets, lc2, targets_contain_srfs)
6608 {
6609 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6610 bool contains_srfs = (bool) lfirst_int(lc2);
6611
6612 /* If this level doesn't contain SRFs, do regular projection */
6613 if (contains_srfs)
6614 newpath = (Path *) create_set_projection_path(root,
6615 rel,
6616 newpath,
6617 thistarget);
6618 else
6619 newpath = (Path *) apply_projection_to_path(root,
6620 rel,
6621 newpath,
6622 thistarget);
6623 }
6624 lfirst(lc) = newpath;
6625 if (subpath == rel->cheapest_startup_path)
6626 rel->cheapest_startup_path = newpath;
6627 if (subpath == rel->cheapest_total_path)
6628 rel->cheapest_total_path = newpath;
6629 }
6630
6631 /* Likewise for partial paths, if any */
6632 foreach(lc, rel->partial_pathlist)
6633 {
6634 Path *subpath = (Path *) lfirst(lc);
6635 Path *newpath = subpath;
6636 ListCell *lc1,
6637 *lc2;
6638
6639 Assert(subpath->param_info == NULL);
6640 forboth(lc1, targets, lc2, targets_contain_srfs)
6641 {
6642 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6643 bool contains_srfs = (bool) lfirst_int(lc2);
6644
6645 /* If this level doesn't contain SRFs, do regular projection */
6646 if (contains_srfs)
6647 newpath = (Path *) create_set_projection_path(root,
6648 rel,
6649 newpath,
6650 thistarget);
6651 else
6652 {
6653 /* avoid apply_projection_to_path, in case of multiple refs */
6654 newpath = (Path *) create_projection_path(root,
6655 rel,
6656 newpath,
6657 thistarget);
6658 }
6659 }
6660 lfirst(lc) = newpath;
6661 }
6662}
6663
6664/*
6665 * expression_planner
6666 * Perform planner's transformations on a standalone expression.
6667 *
6668 * Various utility commands need to evaluate expressions that are not part
6669 * of a plannable query. They can do so using the executor's regular
6670 * expression-execution machinery, but first the expression has to be fed
6671 * through here to transform it from parser output to something executable.
6672 *
6673 * Currently, we disallow sublinks in standalone expressions, so there's no
6674 * real "planning" involved here. (That might not always be true though.)
6675 * What we must do is run eval_const_expressions to ensure that any function
6676 * calls are converted to positional notation and function default arguments
6677 * get inserted. The fact that constant subexpressions get simplified is a
6678 * side-effect that is useful when the expression will get evaluated more than
6679 * once. Also, we must fix operator function IDs.
6680 *
6681 * This does not return any information about dependencies of the expression.
6682 * Hence callers should use the results only for the duration of the current
6683 * query. Callers that would like to cache the results for longer should use
6684 * expression_planner_with_deps, probably via the plancache.
6685 *
6686 * Note: this must not make any damaging changes to the passed-in expression
6687 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6688 * we first do an expression_tree_mutator-based walk, what is returned will
6689 * be a new node tree.) The result is constructed in the current memory
6690 * context; beware that this can leak a lot of additional stuff there, too.
6691 */
6692Expr *
6694{
6695 Node *result;
6696
6697 /*
6698 * Convert named-argument function calls, insert default arguments and
6699 * simplify constant subexprs
6700 */
6701 result = eval_const_expressions(NULL, (Node *) expr);
6702
6703 /* Fill in opfuncid values if missing */
6704 fix_opfuncids(result);
6705
6706 return (Expr *) result;
6707}
6708
6709/*
6710 * expression_planner_with_deps
6711 * Perform planner's transformations on a standalone expression,
6712 * returning expression dependency information along with the result.
6713 *
6714 * This is identical to expression_planner() except that it also returns
6715 * information about possible dependencies of the expression, ie identities of
6716 * objects whose definitions affect the result. As in a PlannedStmt, these
6717 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6718 */
6719Expr *
6721 List **relationOids,
6722 List **invalItems)
6723{
6724 Node *result;
6725 PlannerGlobal glob;
6727
6728 /* Make up dummy planner state so we can use setrefs machinery */
6729 MemSet(&glob, 0, sizeof(glob));
6730 glob.type = T_PlannerGlobal;
6731 glob.relationOids = NIL;
6732 glob.invalItems = NIL;
6733
6734 MemSet(&root, 0, sizeof(root));
6735 root.type = T_PlannerInfo;
6736 root.glob = &glob;
6737
6738 /*
6739 * Convert named-argument function calls, insert default arguments and
6740 * simplify constant subexprs. Collect identities of inlined functions
6741 * and elided domains, too.
6742 */
6743 result = eval_const_expressions(&root, (Node *) expr);
6744
6745 /* Fill in opfuncid values if missing */
6746 fix_opfuncids(result);
6747
6748 /*
6749 * Now walk the finished expression to find anything else we ought to
6750 * record as an expression dependency.
6751 */
6752 (void) extract_query_dependencies_walker(result, &root);
6753
6754 *relationOids = glob.relationOids;
6755 *invalItems = glob.invalItems;
6756
6757 return (Expr *) result;
6758}
6759
6760
6761/*
6762 * plan_cluster_use_sort
6763 * Use the planner to decide how CLUSTER should implement sorting
6764 *
6765 * tableOid is the OID of a table to be clustered on its index indexOid
6766 * (which is already known to be a btree index). Decide whether it's
6767 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6768 * Return true to use sorting, false to use an indexscan.
6769 *
6770 * Note: caller had better already hold some type of lock on the table.
6771 */
6772bool
6773plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6774{
6776 Query *query;
6777 PlannerGlobal *glob;
6778 RangeTblEntry *rte;
6779 RelOptInfo *rel;
6780 IndexOptInfo *indexInfo;
6781 QualCost indexExprCost;
6782 Cost comparisonCost;
6783 Path *seqScanPath;
6784 Path seqScanAndSortPath;
6785 IndexPath *indexScanPath;
6786 ListCell *lc;
6787
6788 /* We can short-circuit the cost comparison if indexscans are disabled */
6789 if (!enable_indexscan)
6790 return true; /* use sort */
6791
6792 /* Set up mostly-dummy planner state */
6793 query = makeNode(Query);
6794 query->commandType = CMD_SELECT;
6795
6796 glob = makeNode(PlannerGlobal);
6797
6799 root->parse = query;
6800 root->glob = glob;
6801 root->query_level = 1;
6802 root->planner_cxt = CurrentMemoryContext;
6803 root->wt_param_id = -1;
6804 root->join_domains = list_make1(makeNode(JoinDomain));
6805
6806 /* Build a minimal RTE for the rel */
6807 rte = makeNode(RangeTblEntry);
6808 rte->rtekind = RTE_RELATION;
6809 rte->relid = tableOid;
6810 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6811 rte->rellockmode = AccessShareLock;
6812 rte->lateral = false;
6813 rte->inh = false;
6814 rte->inFromCl = true;
6815 query->rtable = list_make1(rte);
6816 addRTEPermissionInfo(&query->rteperminfos, rte);
6817
6818 /* Set up RTE/RelOptInfo arrays */
6820
6821 /* Build RelOptInfo */
6822 rel = build_simple_rel(root, 1, NULL);
6823
6824 /* Locate IndexOptInfo for the target index */
6825 indexInfo = NULL;
6826 foreach(lc, rel->indexlist)
6827 {
6828 indexInfo = lfirst_node(IndexOptInfo, lc);
6829 if (indexInfo->indexoid == indexOid)
6830 break;
6831 }
6832
6833 /*
6834 * It's possible that get_relation_info did not generate an IndexOptInfo
6835 * for the desired index; this could happen if it's not yet reached its
6836 * indcheckxmin usability horizon, or if it's a system index and we're
6837 * ignoring system indexes. In such cases we should tell CLUSTER to not
6838 * trust the index contents but use seqscan-and-sort.
6839 */
6840 if (lc == NULL) /* not in the list? */
6841 return true; /* use sort */
6842
6843 /*
6844 * Rather than doing all the pushups that would be needed to use
6845 * set_baserel_size_estimates, just do a quick hack for rows and width.
6846 */
6847 rel->rows = rel->tuples;
6848 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6849
6850 root->total_table_pages = rel->pages;
6851
6852 /*
6853 * Determine eval cost of the index expressions, if any. We need to
6854 * charge twice that amount for each tuple comparison that happens during
6855 * the sort, since tuplesort.c will have to re-evaluate the index
6856 * expressions each time. (XXX that's pretty inefficient...)
6857 */
6858 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6859 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6860
6861 /* Estimate the cost of seq scan + sort */
6862 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6863 cost_sort(&seqScanAndSortPath, root, NIL,
6864 seqScanPath->disabled_nodes,
6865 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6866 comparisonCost, maintenance_work_mem, -1.0);
6867
6868 /* Estimate the cost of index scan */
6869 indexScanPath = create_index_path(root, indexInfo,
6870 NIL, NIL, NIL, NIL,
6871 ForwardScanDirection, false,
6872 NULL, 1.0, false);
6873
6874 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6875}
6876
6877/*
6878 * plan_create_index_workers
6879 * Use the planner to decide how many parallel worker processes
6880 * CREATE INDEX should request for use
6881 *
6882 * tableOid is the table on which the index is to be built. indexOid is the
6883 * OID of an index to be created or reindexed (which must be an index with
6884 * support for parallel builds - currently btree or BRIN).
6885 *
6886 * Return value is the number of parallel worker processes to request. It
6887 * may be unsafe to proceed if this is 0. Note that this does not include the
6888 * leader participating as a worker (value is always a number of parallel
6889 * worker processes).
6890 *
6891 * Note: caller had better already hold some type of lock on the table and
6892 * index.
6893 */
6894int
6896{
6898 Query *query;
6899 PlannerGlobal *glob;
6900 RangeTblEntry *rte;
6901 Relation heap;
6903 RelOptInfo *rel;
6904 int parallel_workers;
6905 BlockNumber heap_blocks;
6906 double reltuples;
6907 double allvisfrac;
6908
6909 /*
6910 * We don't allow performing parallel operation in standalone backend or
6911 * when parallelism is disabled.
6912 */
6914 return 0;
6915
6916 /* Set up largely-dummy planner state */
6917 query = makeNode(Query);
6918 query->commandType = CMD_SELECT;
6919
6920 glob = makeNode(PlannerGlobal);
6921
6923 root->parse = query;
6924 root->glob = glob;
6925 root->query_level = 1;
6926 root->planner_cxt = CurrentMemoryContext;
6927 root->wt_param_id = -1;
6928 root->join_domains = list_make1(makeNode(JoinDomain));
6929
6930 /*
6931 * Build a minimal RTE.
6932 *
6933 * Mark the RTE with inh = true. This is a kludge to prevent
6934 * get_relation_info() from fetching index info, which is necessary
6935 * because it does not expect that any IndexOptInfo is currently
6936 * undergoing REINDEX.
6937 */
6938 rte = makeNode(RangeTblEntry);
6939 rte->rtekind = RTE_RELATION;
6940 rte->relid = tableOid;
6941 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6942 rte->rellockmode = AccessShareLock;
6943 rte->lateral = false;
6944 rte->inh = true;
6945 rte->inFromCl = true;
6946 query->rtable = list_make1(rte);
6947 addRTEPermissionInfo(&query->rteperminfos, rte);
6948
6949 /* Set up RTE/RelOptInfo arrays */
6951
6952 /* Build RelOptInfo */
6953 rel = build_simple_rel(root, 1, NULL);
6954
6955 /* Rels are assumed already locked by the caller */
6956 heap = table_open(tableOid, NoLock);
6957 index = index_open(indexOid, NoLock);
6958
6959 /*
6960 * Determine if it's safe to proceed.
6961 *
6962 * Currently, parallel workers can't access the leader's temporary tables.
6963 * Furthermore, any index predicate or index expressions must be parallel
6964 * safe.
6965 */
6966 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6969 {
6970 parallel_workers = 0;
6971 goto done;
6972 }
6973
6974 /*
6975 * If parallel_workers storage parameter is set for the table, accept that
6976 * as the number of parallel worker processes to launch (though still cap
6977 * at max_parallel_maintenance_workers). Note that we deliberately do not
6978 * consider any other factor when parallel_workers is set. (e.g., memory
6979 * use by workers.)
6980 */
6981 if (rel->rel_parallel_workers != -1)
6982 {
6983 parallel_workers = Min(rel->rel_parallel_workers,
6985 goto done;
6986 }
6987
6988 /*
6989 * Estimate heap relation size ourselves, since rel->pages cannot be
6990 * trusted (heap RTE was marked as inheritance parent)
6991 */
6992 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6993
6994 /*
6995 * Determine number of workers to scan the heap relation using generic
6996 * model
6997 */
6998 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7000
7001 /*
7002 * Cap workers based on available maintenance_work_mem as needed.
7003 *
7004 * Note that each tuplesort participant receives an even share of the
7005 * total maintenance_work_mem budget. Aim to leave participants
7006 * (including the leader as a participant) with no less than 32MB of
7007 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7008 * immediately past the threshold of being capable of launching a single
7009 * parallel worker to sort.
7010 */
7011 while (parallel_workers > 0 &&
7012 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7013 parallel_workers--;
7014
7015done:
7017 table_close(heap, NoLock);
7018
7019 return parallel_workers;
7020}
7021
7022/*
7023 * add_paths_to_grouping_rel
7024 *
7025 * Add non-partial paths to grouping relation.
7026 */
7027static void
7029 RelOptInfo *grouped_rel,
7030 RelOptInfo *partially_grouped_rel,
7031 const AggClauseCosts *agg_costs,
7032 grouping_sets_data *gd, double dNumGroups,
7033 GroupPathExtraData *extra)
7034{
7035 Query *parse = root->parse;
7036 Path *cheapest_path = input_rel->cheapest_total_path;
7037 ListCell *lc;
7038 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7039 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7040 List *havingQual = (List *) extra->havingQual;
7041 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7042
7043 if (can_sort)
7044 {
7045 /*
7046 * Use any available suitably-sorted path as input, and also consider
7047 * sorting the cheapest-total path and incremental sort on any paths
7048 * with presorted keys.
7049 */
7050 foreach(lc, input_rel->pathlist)
7051 {
7052 ListCell *lc2;
7053 Path *path = (Path *) lfirst(lc);
7054 Path *path_save = path;
7055 List *pathkey_orderings = NIL;
7056
7057 /* generate alternative group orderings that might be useful */
7058 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7059
7060 Assert(list_length(pathkey_orderings) > 0);
7061
7062 foreach(lc2, pathkey_orderings)
7063 {
7064 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7065
7066 /* restore the path (we replace it in the loop) */
7067 path = path_save;
7068
7069 path = make_ordered_path(root,
7070 grouped_rel,
7071 path,
7072 cheapest_path,
7073 info->pathkeys,
7074 -1.0);
7075 if (path == NULL)
7076 continue;
7077
7078 /* Now decide what to stick atop it */
7079 if (parse->groupingSets)
7080 {
7081 consider_groupingsets_paths(root, grouped_rel,
7082 path, true, can_hash,
7083 gd, agg_costs, dNumGroups);
7084 }
7085 else if (parse->hasAggs)
7086 {
7087 /*
7088 * We have aggregation, possibly with plain GROUP BY. Make
7089 * an AggPath.
7090 */
7091 add_path(grouped_rel, (Path *)
7093 grouped_rel,
7094 path,
7095 grouped_rel->reltarget,
7096 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7098 info->clauses,
7099 havingQual,
7100 agg_costs,
7101 dNumGroups));
7102 }
7103 else if (parse->groupClause)
7104 {
7105 /*
7106 * We have GROUP BY without aggregation or grouping sets.
7107 * Make a GroupPath.
7108 */
7109 add_path(grouped_rel, (Path *)
7111 grouped_rel,
7112 path,
7113 info->clauses,
7114 havingQual,
7115 dNumGroups));
7116 }
7117 else
7118 {
7119 /* Other cases should have been handled above */
7120 Assert(false);
7121 }
7122 }
7123 }
7124
7125 /*
7126 * Instead of operating directly on the input relation, we can
7127 * consider finalizing a partially aggregated path.
7128 */
7129 if (partially_grouped_rel != NULL)
7130 {
7131 foreach(lc, partially_grouped_rel->pathlist)
7132 {
7133 ListCell *lc2;
7134 Path *path = (Path *) lfirst(lc);
7135 Path *path_save = path;
7136 List *pathkey_orderings = NIL;
7137
7138 /* generate alternative group orderings that might be useful */
7139 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7140
7141 Assert(list_length(pathkey_orderings) > 0);
7142
7143 /* process all potentially interesting grouping reorderings */
7144 foreach(lc2, pathkey_orderings)
7145 {
7146 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7147
7148 /* restore the path (we replace it in the loop) */
7149 path = path_save;
7150
7151 path = make_ordered_path(root,
7152 grouped_rel,
7153 path,
7154 partially_grouped_rel->cheapest_total_path,
7155 info->pathkeys,
7156 -1.0);
7157
7158 if (path == NULL)
7159 continue;
7160
7161 if (parse->hasAggs)
7162 add_path(grouped_rel, (Path *)
7164 grouped_rel,
7165 path,
7166 grouped_rel->reltarget,
7167 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7169 info->clauses,
7170 havingQual,
7171 agg_final_costs,
7172 dNumGroups));
7173 else
7174 add_path(grouped_rel, (Path *)
7176 grouped_rel,
7177 path,
7178 info->clauses,
7179 havingQual,
7180 dNumGroups));
7181
7182 }
7183 }
7184 }
7185 }
7186
7187 if (can_hash)
7188 {
7189 if (parse->groupingSets)
7190 {
7191 /*
7192 * Try for a hash-only groupingsets path over unsorted input.
7193 */
7194 consider_groupingsets_paths(root, grouped_rel,
7195 cheapest_path, false, true,
7196 gd, agg_costs, dNumGroups);
7197 }
7198 else
7199 {
7200 /*
7201 * Generate a HashAgg Path. We just need an Agg over the
7202 * cheapest-total input path, since input order won't matter.
7203 */
7204 add_path(grouped_rel, (Path *)
7205 create_agg_path(root, grouped_rel,
7206 cheapest_path,
7207 grouped_rel->reltarget,
7208 AGG_HASHED,
7210 root->processed_groupClause,
7211 havingQual,
7212 agg_costs,
7213 dNumGroups));
7214 }
7215
7216 /*
7217 * Generate a Finalize HashAgg Path atop of the cheapest partially
7218 * grouped path, assuming there is one
7219 */
7220 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7221 {
7222 Path *path = partially_grouped_rel->cheapest_total_path;
7223
7224 add_path(grouped_rel, (Path *)
7226 grouped_rel,
7227 path,
7228 grouped_rel->reltarget,
7229 AGG_HASHED,
7231 root->processed_groupClause,
7232 havingQual,
7233 agg_final_costs,
7234 dNumGroups));
7235 }
7236 }
7237
7238 /*
7239 * When partitionwise aggregate is used, we might have fully aggregated
7240 * paths in the partial pathlist, because add_paths_to_append_rel() will
7241 * consider a path for grouped_rel consisting of a Parallel Append of
7242 * non-partial paths from each child.
7243 */
7244 if (grouped_rel->partial_pathlist != NIL)
7245 gather_grouping_paths(root, grouped_rel);
7246}
7247
7248/*
7249 * create_partial_grouping_paths
7250 *
7251 * Create a new upper relation representing the result of partial aggregation
7252 * and populate it with appropriate paths. Note that we don't finalize the
7253 * lists of paths here, so the caller can add additional partial or non-partial
7254 * paths and must afterward call gather_grouping_paths and set_cheapest on
7255 * the returned upper relation.
7256 *
7257 * All paths for this new upper relation -- both partial and non-partial --
7258 * have been partially aggregated but require a subsequent FinalizeAggregate
7259 * step.
7260 *
7261 * NB: This function is allowed to return NULL if it determines that there is
7262 * no real need to create a new RelOptInfo.
7263 */
7264static RelOptInfo *
7266 RelOptInfo *grouped_rel,
7267 RelOptInfo *input_rel,
7269 GroupPathExtraData *extra,
7270 bool force_rel_creation)
7271{
7272 Query *parse = root->parse;
7273 RelOptInfo *partially_grouped_rel;
7274 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7275 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7276 Path *cheapest_partial_path = NULL;
7277 Path *cheapest_total_path = NULL;
7278 double dNumPartialGroups = 0;
7279 double dNumPartialPartialGroups = 0;
7280 ListCell *lc;
7281 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7282 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7283
7284 /*
7285 * Consider whether we should generate partially aggregated non-partial
7286 * paths. We can only do this if we have a non-partial path, and only if
7287 * the parent of the input rel is performing partial partitionwise
7288 * aggregation. (Note that extra->patype is the type of partitionwise
7289 * aggregation being used at the parent level, not this level.)
7290 */
7291 if (input_rel->pathlist != NIL &&
7293 cheapest_total_path = input_rel->cheapest_total_path;
7294
7295 /*
7296 * If parallelism is possible for grouped_rel, then we should consider
7297 * generating partially-grouped partial paths. However, if the input rel
7298 * has no partial paths, then we can't.
7299 */
7300 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7301 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7302
7303 /*
7304 * If we can't partially aggregate partial paths, and we can't partially
7305 * aggregate non-partial paths, then don't bother creating the new
7306 * RelOptInfo at all, unless the caller specified force_rel_creation.
7307 */
7308 if (cheapest_total_path == NULL &&
7309 cheapest_partial_path == NULL &&
7310 !force_rel_creation)
7311 return NULL;
7312
7313 /*
7314 * Build a new upper relation to represent the result of partially
7315 * aggregating the rows from the input relation.
7316 */
7317 partially_grouped_rel = fetch_upper_rel(root,
7319 grouped_rel->relids);
7320 partially_grouped_rel->consider_parallel =
7321 grouped_rel->consider_parallel;
7322 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7323 partially_grouped_rel->serverid = grouped_rel->serverid;
7324 partially_grouped_rel->userid = grouped_rel->userid;
7325 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7326 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7327
7328 /*
7329 * Build target list for partial aggregate paths. These paths cannot just
7330 * emit the same tlist as regular aggregate paths, because (1) we must
7331 * include Vars and Aggrefs needed in HAVING, which might not appear in
7332 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7333 */
7334 partially_grouped_rel->reltarget =
7336 extra->havingQual);
7337
7338 if (!extra->partial_costs_set)
7339 {
7340 /*
7341 * Collect statistics about aggregates for estimating costs of
7342 * performing aggregation in parallel.
7343 */
7344 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7345 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7346 if (parse->hasAggs)
7347 {
7348 /* partial phase */
7350 agg_partial_costs);
7351
7352 /* final phase */
7354 agg_final_costs);
7355 }
7356
7357 extra->partial_costs_set = true;
7358 }
7359
7360 /* Estimate number of partial groups. */
7361 if (cheapest_total_path != NULL)
7362 dNumPartialGroups =
7364 cheapest_total_path->rows,
7365 gd,
7366 extra->targetList);
7367 if (cheapest_partial_path != NULL)
7368 dNumPartialPartialGroups =
7370 cheapest_partial_path->rows,
7371 gd,
7372 extra->targetList);
7373
7374 if (can_sort && cheapest_total_path != NULL)
7375 {
7376 /* This should have been checked previously */
7377 Assert(parse->hasAggs || parse->groupClause);
7378
7379 /*
7380 * Use any available suitably-sorted path as input, and also consider
7381 * sorting the cheapest partial path.
7382 */
7383 foreach(lc, input_rel->pathlist)
7384 {
7385 ListCell *lc2;
7386 Path *path = (Path *) lfirst(lc);
7387 Path *path_save = path;
7388 List *pathkey_orderings = NIL;
7389
7390 /* generate alternative group orderings that might be useful */
7391 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7392
7393 Assert(list_length(pathkey_orderings) > 0);
7394
7395 /* process all potentially interesting grouping reorderings */
7396 foreach(lc2, pathkey_orderings)
7397 {
7398 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7399
7400 /* restore the path (we replace it in the loop) */
7401 path = path_save;
7402
7403 path = make_ordered_path(root,
7404 partially_grouped_rel,
7405 path,
7406 cheapest_total_path,
7407 info->pathkeys,
7408 -1.0);
7409
7410 if (path == NULL)
7411 continue;
7412
7413 if (parse->hasAggs)
7414 add_path(partially_grouped_rel, (Path *)
7416 partially_grouped_rel,
7417 path,
7418 partially_grouped_rel->reltarget,
7419 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7421 info->clauses,
7422 NIL,
7423 agg_partial_costs,
7424 dNumPartialGroups));
7425 else
7426 add_path(partially_grouped_rel, (Path *)
7428 partially_grouped_rel,
7429 path,
7430 info->clauses,
7431 NIL,
7432 dNumPartialGroups));
7433 }
7434 }
7435 }
7436
7437 if (can_sort && cheapest_partial_path != NULL)
7438 {
7439 /* Similar to above logic, but for partial paths. */
7440 foreach(lc, input_rel->partial_pathlist)
7441 {
7442 ListCell *lc2;
7443 Path *path = (Path *) lfirst(lc);
7444 Path *path_save = path;
7445 List *pathkey_orderings = NIL;
7446
7447 /* generate alternative group orderings that might be useful */
7448 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7449
7450 Assert(list_length(pathkey_orderings) > 0);
7451
7452 /* process all potentially interesting grouping reorderings */
7453 foreach(lc2, pathkey_orderings)
7454 {
7455 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7456
7457
7458 /* restore the path (we replace it in the loop) */
7459 path = path_save;
7460
7461 path = make_ordered_path(root,
7462 partially_grouped_rel,
7463 path,
7464 cheapest_partial_path,
7465 info->pathkeys,
7466 -1.0);
7467
7468 if (path == NULL)
7469 continue;
7470
7471 if (parse->hasAggs)
7472 add_partial_path(partially_grouped_rel, (Path *)
7474 partially_grouped_rel,
7475 path,
7476 partially_grouped_rel->reltarget,
7477 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7479 info->clauses,
7480 NIL,
7481 agg_partial_costs,
7482 dNumPartialPartialGroups));
7483 else
7484 add_partial_path(partially_grouped_rel, (Path *)
7486 partially_grouped_rel,
7487 path,
7488 info->clauses,
7489 NIL,
7490 dNumPartialPartialGroups));
7491 }
7492 }
7493 }
7494
7495 /*
7496 * Add a partially-grouped HashAgg Path where possible
7497 */
7498 if (can_hash && cheapest_total_path != NULL)
7499 {
7500 /* Checked above */
7501 Assert(parse->hasAggs || parse->groupClause);
7502
7503 add_path(partially_grouped_rel, (Path *)
7505 partially_grouped_rel,
7506 cheapest_total_path,
7507 partially_grouped_rel->reltarget,
7508 AGG_HASHED,
7510 root->processed_groupClause,
7511 NIL,
7512 agg_partial_costs,
7513 dNumPartialGroups));
7514 }
7515
7516 /*
7517 * Now add a partially-grouped HashAgg partial Path where possible
7518 */
7519 if (can_hash && cheapest_partial_path != NULL)
7520 {
7521 add_partial_path(partially_grouped_rel, (Path *)
7523 partially_grouped_rel,
7524 cheapest_partial_path,
7525 partially_grouped_rel->reltarget,
7526 AGG_HASHED,
7528 root->processed_groupClause,
7529 NIL,
7530 agg_partial_costs,
7531 dNumPartialPartialGroups));
7532 }
7533
7534 /*
7535 * If there is an FDW that's responsible for all baserels of the query,
7536 * let it consider adding partially grouped ForeignPaths.
7537 */
7538 if (partially_grouped_rel->fdwroutine &&
7539 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7540 {
7541 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7542
7543 fdwroutine->GetForeignUpperPaths(root,
7545 input_rel, partially_grouped_rel,
7546 extra);
7547 }
7548
7549 return partially_grouped_rel;
7550}
7551
7552/*
7553 * make_ordered_path
7554 * Return a path ordered by 'pathkeys' based on the given 'path'. May
7555 * return NULL if it doesn't make sense to generate an ordered path in
7556 * this case.
7557 */
7558static Path *
7560 Path *cheapest_path, List *pathkeys, double limit_tuples)
7561{
7562 bool is_sorted;
7563 int presorted_keys;
7564
7565 is_sorted = pathkeys_count_contained_in(pathkeys,
7566 path->pathkeys,
7567 &presorted_keys);
7568
7569 if (!is_sorted)
7570 {
7571 /*
7572 * Try at least sorting the cheapest path and also try incrementally
7573 * sorting any path which is partially sorted already (no need to deal
7574 * with paths which have presorted keys when incremental sort is
7575 * disabled unless it's the cheapest input path).
7576 */
7577 if (path != cheapest_path &&
7578 (presorted_keys == 0 || !enable_incremental_sort))
7579 return NULL;
7580
7581 /*
7582 * We've no need to consider both a sort and incremental sort. We'll
7583 * just do a sort if there are no presorted keys and an incremental
7584 * sort when there are presorted keys.
7585 */
7586 if (presorted_keys == 0 || !enable_incremental_sort)
7587 path = (Path *) create_sort_path(root,
7588 rel,
7589 path,
7590 pathkeys,
7591 limit_tuples);
7592 else
7594 rel,
7595 path,
7596 pathkeys,
7597 presorted_keys,
7598 limit_tuples);
7599 }
7600
7601 return path;
7602}
7603
7604/*
7605 * Generate Gather and Gather Merge paths for a grouping relation or partial
7606 * grouping relation.
7607 *
7608 * generate_useful_gather_paths does most of the work, but we also consider a
7609 * special case: we could try sorting the data by the group_pathkeys and then
7610 * applying Gather Merge.
7611 *
7612 * NB: This function shouldn't be used for anything other than a grouped or
7613 * partially grouped relation not only because of the fact that it explicitly
7614 * references group_pathkeys but we pass "true" as the third argument to
7615 * generate_useful_gather_paths().
7616 */
7617static void
7619{
7620 ListCell *lc;
7621 Path *cheapest_partial_path;
7622 List *groupby_pathkeys;
7623
7624 /*
7625 * This occurs after any partial aggregation has taken place, so trim off
7626 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7627 */
7628 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7629 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7630 root->num_groupby_pathkeys);
7631 else
7632 groupby_pathkeys = root->group_pathkeys;
7633
7634 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7636
7637 cheapest_partial_path = linitial(rel->partial_pathlist);
7638
7639 /* XXX Shouldn't this also consider the group-key-reordering? */
7640 foreach(lc, rel->partial_pathlist)
7641 {
7642 Path *path = (Path *) lfirst(lc);
7643 bool is_sorted;
7644 int presorted_keys;
7645 double total_groups;
7646
7647 is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7648 path->pathkeys,
7649 &presorted_keys);
7650
7651 if (is_sorted)
7652 continue;
7653
7654 /*
7655 * Try at least sorting the cheapest path and also try incrementally
7656 * sorting any path which is partially sorted already (no need to deal
7657 * with paths which have presorted keys when incremental sort is
7658 * disabled unless it's the cheapest input path).
7659 */
7660 if (path != cheapest_partial_path &&
7661 (presorted_keys == 0 || !enable_incremental_sort))
7662 continue;
7663
7664 /*
7665 * We've no need to consider both a sort and incremental sort. We'll
7666 * just do a sort if there are no presorted keys and an incremental
7667 * sort when there are presorted keys.
7668 */
7669 if (presorted_keys == 0 || !enable_incremental_sort)
7670 path = (Path *) create_sort_path(root, rel, path,
7671 groupby_pathkeys,
7672 -1.0);
7673 else
7675 rel,
7676 path,
7677 groupby_pathkeys,
7678 presorted_keys,
7679 -1.0);
7680 total_groups = compute_gather_rows(path);
7681 path = (Path *)
7683 rel,
7684 path,
7685 rel->reltarget,
7686 groupby_pathkeys,
7687 NULL,
7688 &total_groups);
7689
7690 add_path(rel, path);
7691 }
7692}
7693
7694/*
7695 * can_partial_agg
7696 *
7697 * Determines whether or not partial grouping and/or aggregation is possible.
7698 * Returns true when possible, false otherwise.
7699 */
7700static bool
7702{
7703 Query *parse = root->parse;
7704
7705 if (!parse->hasAggs && parse->groupClause == NIL)
7706 {
7707 /*
7708 * We don't know how to do parallel aggregation unless we have either
7709 * some aggregates or a grouping clause.
7710 */
7711 return false;
7712 }
7713 else if (parse->groupingSets)
7714 {
7715 /* We don't know how to do grouping sets in parallel. */
7716 return false;
7717 }
7718 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7719 {
7720 /* Insufficient support for partial mode. */
7721 return false;
7722 }
7723
7724 /* Everything looks good. */
7725 return true;
7726}
7727
7728/*
7729 * apply_scanjoin_target_to_paths
7730 *
7731 * Adjust the final scan/join relation, and recursively all of its children,
7732 * to generate the final scan/join target. It would be more correct to model
7733 * this as a separate planning step with a new RelOptInfo at the toplevel and
7734 * for each child relation, but doing it this way is noticeably cheaper.
7735 * Maybe that problem can be solved at some point, but for now we do this.
7736 *
7737 * If tlist_same_exprs is true, then the scan/join target to be applied has
7738 * the same expressions as the existing reltarget, so we need only insert the
7739 * appropriate sortgroupref information. By avoiding the creation of
7740 * projection paths we save effort both immediately and at plan creation time.
7741 */
7742static void
7744 RelOptInfo *rel,
7745 List *scanjoin_targets,
7746 List *scanjoin_targets_contain_srfs,
7747 bool scanjoin_target_parallel_safe,
7748 bool tlist_same_exprs)
7749{
7750 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7751 PathTarget *scanjoin_target;
7752 ListCell *lc;
7753
7754 /* This recurses, so be paranoid. */
7756
7757 /*
7758 * If the rel is partitioned, we want to drop its existing paths and
7759 * generate new ones. This function would still be correct if we kept the
7760 * existing paths: we'd modify them to generate the correct target above
7761 * the partitioning Append, and then they'd compete on cost with paths
7762 * generating the target below the Append. However, in our current cost
7763 * model the latter way is always the same or cheaper cost, so modifying
7764 * the existing paths would just be useless work. Moreover, when the cost
7765 * is the same, varying roundoff errors might sometimes allow an existing
7766 * path to be picked, resulting in undesirable cross-platform plan
7767 * variations. So we drop old paths and thereby force the work to be done
7768 * below the Append, except in the case of a non-parallel-safe target.
7769 *
7770 * Some care is needed, because we have to allow
7771 * generate_useful_gather_paths to see the old partial paths in the next
7772 * stanza. Hence, zap the main pathlist here, then allow
7773 * generate_useful_gather_paths to add path(s) to the main list, and
7774 * finally zap the partial pathlist.
7775 */
7776 if (rel_is_partitioned)
7777 rel->pathlist = NIL;
7778
7779 /*
7780 * If the scan/join target is not parallel-safe, partial paths cannot
7781 * generate it.
7782 */
7783 if (!scanjoin_target_parallel_safe)
7784 {
7785 /*
7786 * Since we can't generate the final scan/join target in parallel
7787 * workers, this is our last opportunity to use any partial paths that
7788 * exist; so build Gather path(s) that use them and emit whatever the
7789 * current reltarget is. We don't do this in the case where the
7790 * target is parallel-safe, since we will be able to generate superior
7791 * paths by doing it after the final scan/join target has been
7792 * applied.
7793 */
7795
7796 /* Can't use parallel query above this level. */
7797 rel->partial_pathlist = NIL;
7798 rel->consider_parallel = false;
7799 }
7800
7801 /* Finish dropping old paths for a partitioned rel, per comment above */
7802 if (rel_is_partitioned)
7803 rel->partial_pathlist = NIL;
7804
7805 /* Extract SRF-free scan/join target. */
7806 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7807
7808 /*
7809 * Apply the SRF-free scan/join target to each existing path.
7810 *
7811 * If the tlist exprs are the same, we can just inject the sortgroupref
7812 * information into the existing pathtargets. Otherwise, replace each
7813 * path with a projection path that generates the SRF-free scan/join
7814 * target. This can't change the ordering of paths within rel->pathlist,
7815 * so we just modify the list in place.
7816 */
7817 foreach(lc, rel->pathlist)
7818 {
7819 Path *subpath = (Path *) lfirst(lc);
7820
7821 /* Shouldn't have any parameterized paths anymore */
7822 Assert(subpath->param_info == NULL);
7823
7824 if (tlist_same_exprs)
7825 subpath->pathtarget->sortgrouprefs =
7826 scanjoin_target->sortgrouprefs;
7827 else
7828 {
7829 Path *newpath;
7830
7831 newpath = (Path *) create_projection_path(root, rel, subpath,
7832 scanjoin_target);
7833 lfirst(lc) = newpath;
7834 }
7835 }
7836
7837 /* Likewise adjust the targets for any partial paths. */
7838 foreach(lc, rel->partial_pathlist)
7839 {
7840 Path *subpath = (Path *) lfirst(lc);
7841
7842 /* Shouldn't have any parameterized paths anymore */
7843 Assert(subpath->param_info == NULL);
7844
7845 if (tlist_same_exprs)
7846 subpath->pathtarget->sortgrouprefs =
7847 scanjoin_target->sortgrouprefs;
7848 else
7849 {
7850 Path *newpath;
7851
7852 newpath = (Path *) create_projection_path(root, rel, subpath,
7853 scanjoin_target);
7854 lfirst(lc) = newpath;
7855 }
7856 }
7857
7858 /*
7859 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7860 * atop each existing path. (Note that this function doesn't look at the
7861 * cheapest-path fields, which is a good thing because they're bogus right
7862 * now.)
7863 */
7864 if (root->parse->hasTargetSRFs)
7866 scanjoin_targets,
7867 scanjoin_targets_contain_srfs);
7868
7869 /*
7870 * Update the rel's target to be the final (with SRFs) scan/join target.
7871 * This now matches the actual output of all the paths, and we might get
7872 * confused in createplan.c if they don't agree. We must do this now so
7873 * that any append paths made in the next part will use the correct
7874 * pathtarget (cf. create_append_path).
7875 *
7876 * Note that this is also necessary if GetForeignUpperPaths() gets called
7877 * on the final scan/join relation or on any of its children, since the
7878 * FDW might look at the rel's target to create ForeignPaths.
7879 */
7880 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7881
7882 /*
7883 * If the relation is partitioned, recursively apply the scan/join target
7884 * to all partitions, and generate brand-new Append paths in which the
7885 * scan/join target is computed below the Append rather than above it.
7886 * Since Append is not projection-capable, that might save a separate
7887 * Result node, and it also is important for partitionwise aggregate.
7888 */
7889 if (rel_is_partitioned)
7890 {
7891 List *live_children = NIL;
7892 int i;
7893
7894 /* Adjust each partition. */
7895 i = -1;
7896 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7897 {
7898 RelOptInfo *child_rel = rel->part_rels[i];
7899 AppendRelInfo **appinfos;
7900 int nappinfos;
7901 List *child_scanjoin_targets = NIL;
7902
7903 Assert(child_rel != NULL);
7904
7905 /* Dummy children can be ignored. */
7906 if (IS_DUMMY_REL(child_rel))
7907 continue;
7908
7909 /* Translate scan/join targets for this child. */
7910 appinfos = find_appinfos_by_relids(root, child_rel->relids,
7911 &nappinfos);
7912 foreach(lc, scanjoin_targets)
7913 {
7914 PathTarget *target = lfirst_node(PathTarget, lc);
7915
7916 target = copy_pathtarget(target);
7917 target->exprs = (List *)
7919 (Node *) target->exprs,
7920 nappinfos, appinfos);
7921 child_scanjoin_targets = lappend(child_scanjoin_targets,
7922 target);
7923 }
7924 pfree(appinfos);
7925
7926 /* Recursion does the real work. */
7928 child_scanjoin_targets,
7929 scanjoin_targets_contain_srfs,
7930 scanjoin_target_parallel_safe,
7932
7933 /* Save non-dummy children for Append paths. */
7934 if (!IS_DUMMY_REL(child_rel))
7935 live_children = lappend(live_children, child_rel);
7936 }
7937
7938 /* Build new paths for this relation by appending child paths. */
7939 add_paths_to_append_rel(root, rel, live_children);
7940 }
7941
7942 /*
7943 * Consider generating Gather or Gather Merge paths. We must only do this
7944 * if the relation is parallel safe, and we don't do it for child rels to
7945 * avoid creating multiple Gather nodes within the same plan. We must do
7946 * this after all paths have been generated and before set_cheapest, since
7947 * one of the generated paths may turn out to be the cheapest one.
7948 */
7949 if (rel->consider_parallel && !IS_OTHER_REL(rel))
7951
7952 /*
7953 * Reassess which paths are the cheapest, now that we've potentially added
7954 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7955 * this relation.
7956 */
7957 set_cheapest(rel);
7958}
7959
7960/*
7961 * create_partitionwise_grouping_paths
7962 *
7963 * If the partition keys of input relation are part of the GROUP BY clause, all
7964 * the rows belonging to a given group come from a single partition. This
7965 * allows aggregation/grouping over a partitioned relation to be broken down
7966 * into aggregation/grouping on each partition. This should be no worse, and
7967 * often better, than the normal approach.
7968 *
7969 * However, if the GROUP BY clause does not contain all the partition keys,
7970 * rows from a given group may be spread across multiple partitions. In that
7971 * case, we perform partial aggregation for each group, append the results,
7972 * and then finalize aggregation. This is less certain to win than the
7973 * previous case. It may win if the PartialAggregate stage greatly reduces
7974 * the number of groups, because fewer rows will pass through the Append node.
7975 * It may lose if we have lots of small groups.
7976 */
7977static void
7979 RelOptInfo *input_rel,
7980 RelOptInfo *grouped_rel,
7981 RelOptInfo *partially_grouped_rel,
7982 const AggClauseCosts *agg_costs,
7985 GroupPathExtraData *extra)
7986{
7987 List *grouped_live_children = NIL;
7988 List *partially_grouped_live_children = NIL;
7989 PathTarget *target = grouped_rel->reltarget;
7990 bool partial_grouping_valid = true;
7991 int i;
7992
7995 partially_grouped_rel != NULL);
7996
7997 /* Add paths for partitionwise aggregation/grouping. */
7998 i = -1;
7999 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8000 {
8001 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8002 PathTarget *child_target;
8003 AppendRelInfo **appinfos;
8004 int nappinfos;
8005 GroupPathExtraData child_extra;
8006 RelOptInfo *child_grouped_rel;
8007 RelOptInfo *child_partially_grouped_rel;
8008
8009 Assert(child_input_rel != NULL);
8010
8011 /* Dummy children can be ignored. */
8012 if (IS_DUMMY_REL(child_input_rel))
8013 continue;
8014
8015 child_target = copy_pathtarget(target);
8016
8017 /*
8018 * Copy the given "extra" structure as is and then override the
8019 * members specific to this child.
8020 */
8021 memcpy(&child_extra, extra, sizeof(child_extra));
8022
8023 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8024 &nappinfos);
8025
8026 child_target->exprs = (List *)
8028 (Node *) target->exprs,
8029 nappinfos, appinfos);
8030
8031 /* Translate havingQual and targetList. */
8032 child_extra.havingQual = (Node *)
8034 extra->havingQual,
8035 nappinfos, appinfos);
8036 child_extra.targetList = (List *)
8038 (Node *) extra->targetList,
8039 nappinfos, appinfos);
8040
8041 /*
8042 * extra->patype was the value computed for our parent rel; patype is
8043 * the value for this relation. For the child, our value is its
8044 * parent rel's value.
8045 */
8046 child_extra.patype = patype;
8047
8048 /*
8049 * Create grouping relation to hold fully aggregated grouping and/or
8050 * aggregation paths for the child.
8051 */
8052 child_grouped_rel = make_grouping_rel(root, child_input_rel,
8053 child_target,
8054 extra->target_parallel_safe,
8055 child_extra.havingQual);
8056
8057 /* Create grouping paths for this child relation. */
8058 create_ordinary_grouping_paths(root, child_input_rel,
8059 child_grouped_rel,
8060 agg_costs, gd, &child_extra,
8061 &child_partially_grouped_rel);
8062
8063 if (child_partially_grouped_rel)
8064 {
8065 partially_grouped_live_children =
8066 lappend(partially_grouped_live_children,
8067 child_partially_grouped_rel);
8068 }
8069 else
8070 partial_grouping_valid = false;
8071
8072 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8073 {
8074 set_cheapest(child_grouped_rel);
8075 grouped_live_children = lappend(grouped_live_children,
8076 child_grouped_rel);
8077 }
8078
8079 pfree(appinfos);
8080 }
8081
8082 /*
8083 * Try to create append paths for partially grouped children. For full
8084 * partitionwise aggregation, we might have paths in the partial_pathlist
8085 * if parallel aggregation is possible. For partial partitionwise
8086 * aggregation, we may have paths in both pathlist and partial_pathlist.
8087 *
8088 * NB: We must have a partially grouped path for every child in order to
8089 * generate a partially grouped path for this relation.
8090 */
8091 if (partially_grouped_rel && partial_grouping_valid)
8092 {
8093 Assert(partially_grouped_live_children != NIL);
8094
8095 add_paths_to_append_rel(root, partially_grouped_rel,
8096 partially_grouped_live_children);
8097
8098 /*
8099 * We need call set_cheapest, since the finalization step will use the
8100 * cheapest path from the rel.
8101 */
8102 if (partially_grouped_rel->pathlist)
8103 set_cheapest(partially_grouped_rel);
8104 }
8105
8106 /* If possible, create append paths for fully grouped children. */
8107 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8108 {
8109 Assert(grouped_live_children != NIL);
8110
8111 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8112 }
8113}
8114
8115/*
8116 * group_by_has_partkey
8117 *
8118 * Returns true if all the partition keys of the given relation are part of
8119 * the GROUP BY clauses, including having matching collation, false otherwise.
8120 */
8121static bool
8123 List *targetList,
8124 List *groupClause)
8125{
8126 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8127 int cnt = 0;
8128 int partnatts;
8129
8130 /* Input relation should be partitioned. */
8131 Assert(input_rel->part_scheme);
8132
8133 /* Rule out early, if there are no partition keys present. */
8134 if (!input_rel->partexprs)
8135 return false;
8136
8137 partnatts = input_rel->part_scheme->partnatts;
8138
8139 for (cnt = 0; cnt < partnatts; cnt++)
8140 {
8141 List *partexprs = input_rel->partexprs[cnt];
8142 ListCell *lc;
8143 bool found = false;
8144
8145 foreach(lc, partexprs)
8146 {
8147 ListCell *lg;
8148 Expr *partexpr = lfirst(lc);
8149 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8150
8151 foreach(lg, groupexprs)
8152 {
8153 Expr *groupexpr = lfirst(lg);
8154 Oid groupcoll = exprCollation((Node *) groupexpr);
8155
8156 /*
8157 * Note: we can assume there is at most one RelabelType node;
8158 * eval_const_expressions() will have simplified if more than
8159 * one.
8160 */
8161 if (IsA(groupexpr, RelabelType))
8162 groupexpr = ((RelabelType *) groupexpr)->arg;
8163
8164 if (equal(groupexpr, partexpr))
8165 {
8166 /*
8167 * Reject a match if the grouping collation does not match
8168 * the partitioning collation.
8169 */
8170 if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8171 partcoll != groupcoll)
8172 return false;
8173
8174 found = true;
8175 break;
8176 }
8177 }
8178
8179 if (found)
8180 break;
8181 }
8182
8183 /*
8184 * If none of the partition key expressions match with any of the
8185 * GROUP BY expression, return false.
8186 */
8187 if (!found)
8188 return false;
8189 }
8190
8191 return true;
8192}
8193
8194/*
8195 * generate_setop_child_grouplist
8196 * Build a SortGroupClause list defining the sort/grouping properties
8197 * of the child of a set operation.
8198 *
8199 * This is similar to generate_setop_grouplist() but differs as the setop
8200 * child query's targetlist entries may already have a tleSortGroupRef
8201 * assigned for other purposes, such as GROUP BYs. Here we keep the
8202 * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8203 * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8204 * any of the columns in the targetlist don't match to the setop's colTypes
8205 * then we return an empty list. This may leave some TLEs with unreferenced
8206 * ressortgroupref markings, but that's harmless.
8207 */
8208static List *
8210{
8211 List *grouplist = copyObject(op->groupClauses);
8212 ListCell *lg;
8213 ListCell *lt;
8214 ListCell *ct;
8215
8216 lg = list_head(grouplist);
8217 ct = list_head(op->colTypes);
8218 foreach(lt, targetlist)
8219 {
8220 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8221 SortGroupClause *sgc;
8222 Oid coltype;
8223
8224 /* resjunk columns could have sortgrouprefs. Leave these alone */
8225 if (tle->resjunk)
8226 continue;
8227
8228 /*
8229 * We expect every non-resjunk target to have a SortGroupClause and
8230 * colTypes.
8231 */
8232 Assert(lg != NULL);
8233 Assert(ct != NULL);
8234 sgc = (SortGroupClause *) lfirst(lg);
8235 coltype = lfirst_oid(ct);
8236
8237 /* reject if target type isn't the same as the setop target type */
8238 if (coltype != exprType((Node *) tle->expr))
8239 return NIL;
8240
8241 lg = lnext(grouplist, lg);
8242 ct = lnext(op->colTypes, ct);
8243
8244 /* assign a tleSortGroupRef, or reuse the existing one */
8245 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8246 }
8247
8248 Assert(lg == NULL);
8249 Assert(ct == NULL);
8250
8251 return grouplist;
8252}
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4234
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3220
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1321
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:753
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:682
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:541
void pprint(const void *obj)
Definition: print.c:54
void pgstat_report_plan_id(uint64 plan_id, bool force)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:346
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:608
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:975
#define Max(x, y)
Definition: c.h:969
int64_t int64
Definition: c.h:499
#define FLOAT8PASSBYVAL
Definition: c.h:606
unsigned int Index
Definition: c.h:585
#define MemSet(start, val, len)
Definition: c.h:991
#define OidIsValid(objectId)
Definition: c.h:746
size_t Size
Definition: c.h:576
bool contain_agg_clause(Node *clause)
Definition: clauses.c:179
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2397
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:229
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2256
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2289
char max_parallel_hazard(Query *parse)
Definition: clauses.c:735
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:754
bool contain_subplans(Node *clause)
Definition: clauses.c:331
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:539
double cpu_operator_cost
Definition: costsize.c:134
bool enable_partitionwise_aggregate
Definition: costsize.c:160
int max_parallel_workers_per_gather
Definition: costsize.c:143
double parallel_setup_cost
Definition: costsize.c:136
double parallel_tuple_cost
Definition: costsize.c:135
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
double compute_gather_rows(Path *path)
Definition: costsize.c:6611
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4768
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6353
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4742
bool enable_presorted_aggregate
Definition: costsize.c:164
bool enable_hashagg
Definition: costsize.c:152
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_indexscan
Definition: costsize.c:146
bool enable_incremental_sort
Definition: costsize.c:151
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6597
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:337
int errdetail(const char *fmt,...)
Definition: elog.c:1204
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:511
Datum Int64GetDatum(int64 X)
Definition: fmgr.c:1807
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:420
int max_parallel_maintenance_workers
Definition: globals.c:135
bool IsUnderPostmaster
Definition: globals.c:121
int maintenance_work_mem
Definition: globals.c:134
Assert(PointerIsAligned(start, uint64))
#define IsParallelWorker()
Definition: parallel.h:60
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lcons(void *datum, List *list)
Definition: list.c:495
List * list_delete_int(List *list, int datum)
Definition: list.c:891
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
void list_free(List *list)
Definition: list.c:1546
bool list_member_int(const List *list, int datum)
Definition: list.c:702
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
#define NoLock
Definition: lockdefs.h:34
#define AccessShareLock
Definition: lockdefs.h:36
@ LockWaitBlock
Definition: lockoptions.h:39
LockClauseStrength
Definition: lockoptions.h:22
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_NONE
Definition: lockoptions.h:23
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:1998
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2718
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:701
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:350
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:810
char * pstrdup(const char *in)
Definition: mcxt.c:2327
void pfree(void *pointer)
Definition: mcxt.c:2152
void * palloc0(Size size)
Definition: mcxt.c:1975
void * palloc(Size size)
Definition: mcxt.c:1945
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1841
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3616
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:392
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:230
double Cost
Definition: nodes.h:257
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:344
@ CMD_MERGE
Definition: nodes.h:275
@ CMD_DELETE
Definition: nodes.h:274
@ CMD_UPDATE
Definition: nodes.h:272
@ CMD_SELECT
Definition: nodes.h:271
AggStrategy
Definition: nodes.h:359
@ AGG_SORTED
Definition: nodes.h:361
@ AGG_HASHED
Definition: nodes.h:362
@ AGG_MIXED
Definition: nodes.h:363
@ AGG_PLAIN
Definition: nodes.h:360
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:393
AggSplit
Definition: nodes.h:381
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:387
@ AGGSPLIT_SIMPLE
Definition: nodes.h:383
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:385
@ LIMIT_OPTION_COUNT
Definition: nodes.h:437
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:193
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:195
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:108
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:106
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:194
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:196
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:192
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:711
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1894
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3376
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3382
@ RTE_JOIN
Definition: parsenodes.h:1028
@ RTE_VALUES
Definition: parsenodes.h:1031
@ RTE_SUBQUERY
Definition: parsenodes.h:1027
@ RTE_RESULT
Definition: parsenodes.h:1034
@ RTE_FUNCTION
Definition: parsenodes.h:1029
@ RTE_TABLEFUNC
Definition: parsenodes.h:1030
@ RTE_GROUP
Definition: parsenodes.h:1037
@ RTE_RELATION
Definition: parsenodes.h:1026
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3385
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3471
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3446
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:484
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1336
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition: pathkeys.c:1381
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1049
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2965
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2766
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3580
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3816
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2876
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1962
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:269
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:795
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3982
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:124
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:3035
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3326
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:3085
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:3130
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:461
UpperUniquePath * create_upper_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:3190
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:3243
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3880
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1586
PartitionwiseAggregateType
Definition: pathnodes.h:3413
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3416
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3415
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3414
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2083
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3398
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1685
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1089
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3397
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3399
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:859
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:881
@ PATHKEYS_BETTER2
Definition: paths.h:215
@ PATHKEYS_BETTER1
Definition: paths.h:214
@ PATHKEYS_DIFFERENT
Definition: paths.h:216
@ PATHKEYS_EQUAL
Definition: paths.h:213
void * arg
bool has_subclass(Oid relationId)
Definition: pg_inherits.c:355
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define linitial_node(type, l)
Definition: pg_list.h:181
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define lfirst_int(lc)
Definition: pg_list.h:173
#define list_make1(x1)
Definition: pg_list.h:212
#define linitial_int(l)
Definition: pg_list.h:179
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define list_make1_int(x1)
Definition: pg_list.h:227
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:333
#define llast_node(type, l)
Definition: pg_list.h:202
static int scale
Definition: pgbench.c:182
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1069
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1236
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
#define DEFAULT_CURSOR_TUPLE_FRACTION
Definition: planmain.h:21
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:92
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:4992
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5727
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5589
Expr * expression_planner_with_deps(Expr *expr, List **relationOids, List **invalItems)
Definition: planner.c:6720
#define EXPRKIND_TARGET
Definition: planner.c:81
#define EXPRKIND_APPINFO
Definition: planner.c:87
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7618
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2348
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:89
PlannedStmt * planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:286
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3916
#define EXPRKIND_GROUPEXPR
Definition: planner.c:93
planner_hook_type planner_hook
Definition: planner.c:73
double cursor_tuple_fraction
Definition: planner.c:67
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3895
bool plan_cluster_use_sort(Oid tableOid, Oid indexOid)
Definition: planner.c:6773
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1309
int plan_create_index_workers(Oid tableOid, Oid indexOid)
Definition: planner.c:6895
#define EXPRKIND_PHV
Definition: planner.c:88
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:83
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:85
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:3980
#define EXPRKIND_LIMIT
Definition: planner.c:86
#define EXPRKIND_VALUES
Definition: planner.c:84
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7701
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2526
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6531
Expr * preprocess_phv_expression(PlannerInfo *root, Expr *expr)
Definition: planner.c:1353
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5172
bool parallel_leader_participation
Definition: planner.c:69
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6107
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7743
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4739
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5764
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2460
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:652
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6577
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4809
#define EXPRKIND_QUAL
Definition: planner.c:80
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2777
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1207
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7559
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3133
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7265
static void name_active_windows(List *activeWindows)
Definition: planner.c:5987
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6355
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4569
bool enable_distinct_reordering
Definition: planner.c:70
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5692
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2130
int debug_parallel_query
Definition: planner.c:68
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2311
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3178
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5477
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3085
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:6038
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1386
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3842
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:8209
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5904
Expr * expression_planner(Expr *expr)
Definition: planner.c:6693
bool limit_needed(Query *parse)
Definition: planner.c:2711
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:76
#define EXPRKIND_TABLEFUNC
Definition: planner.c:91
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4120
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6227
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:7028
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5257
#define EXPRKIND_RTFUNC
Definition: planner.c:82
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3607
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2873
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3729
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:7978
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:90
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8122
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:302
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3402
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4482
PlannedStmt *(* planner_hook_type)(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.h:26
void(* create_upper_paths_hook_type)(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, RelOptInfo *output_rel, void *extra)
Definition: planner.h:33
RowMarkType
Definition: plannodes.h:1485
@ ROW_MARK_COPY
Definition: plannodes.h:1491
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1490
@ ROW_MARK_SHARE
Definition: plannodes.h:1488
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1486
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1487
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1489
#define snprintf
Definition: port.h:239
#define qsort(a, b, c, d)
Definition: port.h:479
#define printf(...)
Definition: port.h:245
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:390
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
#define InvalidOid
Definition: postgres_ext.h:35
unsigned int Oid
Definition: postgres_ext.h:30
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_function_rtes(PlannerInfo *root)
Definition: prepjointree.c:914
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:183
void remove_useless_result_rtes(PlannerInfo *root)
Query * expand_virtual_generated_columns(PlannerInfo *root)
Definition: prepjointree.c:969
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:468
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:410
void pull_up_subqueries(PlannerInfo *root)
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
void reduce_outer_joins(PlannerInfo *root)
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
char * c
e
Definition: preproc-init.c:82
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:64
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:93
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5210
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5097
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:414
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:94
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1458
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:192
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
@ ForwardScanDirection
Definition: sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3446
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:4176
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:288
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3669
void check_stack_depth(void)
Definition: stack_depth.c:95
List * aggrefs
Definition: pathnodes.h:3520
List * aggdistinct
Definition: primnodes.h:491
List * args
Definition: primnodes.h:485
Expr * aggfilter
Definition: primnodes.h:494
List * aggorder
Definition: primnodes.h:488
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
Cardinality limit_tuples
Definition: pathnodes.h:3460
Node * quals
Definition: primnodes.h:2338
List * fromlist
Definition: primnodes.h:2337
int num_workers
Definition: plannodes.h:1289
bool invisible
Definition: plannodes.h:1295
bool single_copy
Definition: plannodes.h:1293
Plan plan
Definition: plannodes.h:1287
int rescan_param
Definition: plannodes.h:1291
PartitionwiseAggregateType patype
Definition: pathnodes.h:3444
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3438
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3437
Cardinality numGroups
Definition: pathnodes.h:2418
Path path
Definition: pathnodes.h:1846
Definition: pg_list.h:54
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1669
List * pathkeys
Definition: pathnodes.h:1802
Cardinality rows
Definition: pathnodes.h:1796
int disabled_nodes
Definition: pathnodes.h:1797
Cost total_cost
Definition: pathnodes.h:1799
LockClauseStrength strength
Definition: plannodes.h:1550
Index prti
Definition: plannodes.h:1542
RowMarkType markType
Definition: plannodes.h:1546
LockWaitPolicy waitPolicy
Definition: plannodes.h:1552
bool isParent
Definition: plannodes.h:1554
Index rowmarkId
Definition: plannodes.h:1544
int allMarkTypes
Definition: plannodes.h:1548
struct Plan * lefttree
Definition: plannodes.h:213
Cost total_cost
Definition: plannodes.h:179
struct Plan * righttree
Definition: plannodes.h:214
bool parallel_aware
Definition: plannodes.h:193
Cost startup_cost
Definition: plannodes.h:177
List * qual
Definition: plannodes.h:211
int plan_width
Definition: plannodes.h:187
bool parallel_safe
Definition: plannodes.h:195
Cardinality plan_rows
Definition: plannodes.h:185
List * targetlist
Definition: plannodes.h:209
List * initPlan
Definition: plannodes.h:216
struct Plan * planTree
Definition: plannodes.h:83
List * firstResultRels
Definition: plannodes.h:113
bool hasModifyingCTE
Definition: plannodes.h:65
List * appendRelations
Definition: plannodes.h:116
uint64 planId
Definition: plannodes.h:59
List * permInfos
Definition: plannodes.h:102
bool canSetTag
Definition: plannodes.h:68
List * rowMarks
Definition: plannodes.h:127
int jitFlags
Definition: plannodes.h:80
Bitmapset * rewindPlanIDs
Definition: plannodes.h:124
ParseLoc stmt_len
Definition: plannodes.h:145
bool hasReturning
Definition: plannodes.h:62
ParseLoc stmt_location
Definition: plannodes.h:143
List * invalItems
Definition: plannodes.h:133
bool transientPlan
Definition: plannodes.h:71
List * resultRelations
Definition: plannodes.h:106
List * subplans
Definition: plannodes.h:121
List * relationOids
Definition: plannodes.h:130
bool dependsOnRole
Definition: plannodes.h:74
Bitmapset * unprunableRelids
Definition: plannodes.h:97
CmdType commandType
Definition: plannodes.h:53
Node * utilityStmt
Definition: plannodes.h:139
List * rtable
Definition: plannodes.h:91
List * partPruneInfos
Definition: plannodes.h:88
List * paramExecTypes
Definition: plannodes.h:136
bool parallelModeNeeded
Definition: plannodes.h:77
uint64 queryId
Definition: plannodes.h:56
Bitmapset * prunableRelids
Definition: pathnodes.h:130
int lastPlanNodeId
Definition: pathnodes.h:166
char maxParallelHazard
Definition: pathnodes.h:181
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:172
Bitmapset * allRelids
Definition: pathnodes.h:123
List * appendRelations
Definition: pathnodes.h:145
List * finalrowmarks
Definition: pathnodes.h:136
List * invalItems
Definition: pathnodes.h:154
List * relationOids
Definition: pathnodes.h:151
List * paramExecTypes
Definition: pathnodes.h:157
bool parallelModeOK
Definition: pathnodes.h:175
bool transientPlan
Definition: pathnodes.h:169
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:114
List * finalrteperminfos
Definition: pathnodes.h:133
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:160
Index lastRowMarkId
Definition: pathnodes.h:163
List * resultRelations
Definition: pathnodes.h:139
List * partPruneInfos
Definition: pathnodes.h:148
List * finalrtable
Definition: pathnodes.h:117
List * firstResultRels
Definition: pathnodes.h:142
bool parallelModeNeeded
Definition: pathnodes.h:178
Index query_level
Definition: pathnodes.h:232
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:170
CmdType commandType
Definition: parsenodes.h:121
TableFunc * tablefunc
Definition: parsenodes.h:1198
struct TableSampleClause * tablesample
Definition: parsenodes.h:1112
Query * subquery
Definition: parsenodes.h:1118
List * values_lists
Definition: parsenodes.h:1204
JoinType jointype
Definition: parsenodes.h:1165
List * functions
Definition: parsenodes.h:1191
RTEKind rtekind
Definition: parsenodes.h:1061
bool useridiscurrent
Definition: pathnodes.h:995
Relids relids
Definition: pathnodes.h:898
struct PathTarget * reltarget
Definition: pathnodes.h:920
Index relid
Definition: pathnodes.h:945
Cardinality tuples
Definition: pathnodes.h:976
bool consider_parallel
Definition: pathnodes.h:914
BlockNumber pages
Definition: pathnodes.h:975
List * pathlist
Definition: pathnodes.h:925
RelOptKind reloptkind
Definition: pathnodes.h:892
List * indexlist
Definition: pathnodes.h:971
struct Path * cheapest_startup_path
Definition: pathnodes.h:928
struct Path * cheapest_total_path
Definition: pathnodes.h:929
Oid userid
Definition: pathnodes.h:993
Oid serverid
Definition: pathnodes.h:991
Bitmapset * live_parts
Definition: pathnodes.h:1066
int rel_parallel_workers
Definition: pathnodes.h:983
List * partial_pathlist
Definition: pathnodes.h:927
Cardinality rows
Definition: pathnodes.h:904
Form_pg_class rd_rel
Definition: rel.h:111
Cardinality numGroups
Definition: pathnodes.h:2429
List * groupClause
Definition: pathnodes.h:2426
List * gsets_data
Definition: pathnodes.h:2428
bool hashable
Definition: pathnodes.h:2430
List * gsets
Definition: pathnodes.h:2427
bool is_hashed
Definition: pathnodes.h:2431
LockClauseStrength strength
Definition: parsenodes.h:1594
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1595
Index tleSortGroupRef
Definition: parsenodes.h:1452
struct WindowClause * window_clause
Definition: supportnodes.h:339
Expr * expr
Definition: primnodes.h:2219
AttrNumber resno
Definition: primnodes.h:2221
Index ressortgroupref
Definition: primnodes.h:2225
Definition: primnodes.h:262
WindowClause * wc
Definition: planner.c:116
Node * startOffset
Definition: parsenodes.h:1561
List * partitionClause
Definition: parsenodes.h:1557
Node * endOffset
Definition: parsenodes.h:1562
List * orderClause
Definition: parsenodes.h:1559
List ** windowFuncs
Definition: clauses.h:23
Index maxWinRef
Definition: clauses.h:22
int numWindowFuncs
Definition: clauses.h:21
Index winref
Definition: primnodes.h:598
Oid winfnoid
Definition: primnodes.h:584
int * tleref_to_colnum_map
Definition: planner.c:107
Bitmapset * unhashable_refs
Definition: planner.c:105
List * unsortable_sets
Definition: planner.c:106
List * hash_sets_idx
Definition: planner.c:101
double dNumHashGroups
Definition: planner.c:102
Bitmapset * unsortable_refs
Definition: planner.c:104
Definition: type.h:96
List * activeWindows
Definition: planner.c:124
grouping_sets_data * gset_data
Definition: planner.c:125
SetOperationStmt * setop
Definition: planner.c:126
Definition: regguts.h:323
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:2026
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:880
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2184
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:1971
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2368
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2312
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2248
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:422
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695
#define create_pathtarget(root, tlist)
Definition: tlist.h:53
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:968
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:114
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:789