Skip to content

Commit 5327ca9

Browse files
committed
resolve conflicts caused by the merge of 'master_parallel_nodes'
2 parents 430c03a + bda6e5b commit 5327ca9

File tree

5 files changed

+496
-0
lines changed

5 files changed

+496
-0
lines changed

src/hooks.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -339,10 +339,21 @@ pathman_rel_pathlist_hook(PlannerInfo *root,
339339
list_free_deep(rel->pathlist);
340340
rel->pathlist = NIL;
341341

342+
#if PG_VERSION_NUM >= 90600
343+
/* Clear old partial path list */
344+
list_free(rel->partial_pathlist);
345+
rel->partial_pathlist = NIL;
346+
#endif
347+
342348
/* Generate new paths using the rels we've just added */
343349
set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc);
344350
set_append_rel_size_compat(root, rel, rti);
345351

352+
#if PG_VERSION_NUM >= 90600
353+
/* consider gathering partial paths for the parent appendrel */
354+
generate_gather_paths(root, rel);
355+
#endif
356+
346357
/* No need to go further (both nodes are disabled), return */
347358
if (!(pg_pathman_enable_runtimeappend ||
348359
pg_pathman_enable_runtime_merge_append))

src/pg_compat.c

Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,14 @@
1010

1111
#include "pg_compat.h"
1212

13+
#include "catalog/pg_proc.h"
14+
#include "foreign/fdwapi.h"
15+
#include "optimizer/clauses.h"
1316
#include "optimizer/pathnode.h"
1417
#include "optimizer/prep.h"
1518
#include "port.h"
1619
#include "utils.h"
20+
#include "utils/lsyscache.h"
1721

1822
#include <math.h>
1923

@@ -101,4 +105,213 @@ make_result(List *tlist,
101105

102106
return node;
103107
}
108+
109+
/*
110+
* If this relation could possibly be scanned from within a worker, then set
111+
* its consider_parallel flag.
112+
*/
113+
void
114+
set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
115+
RangeTblEntry *rte)
116+
{
117+
/*
118+
* The flag has previously been initialized to false, so we can just
119+
* return if it becomes clear that we can't safely set it.
120+
*/
121+
Assert(!rel->consider_parallel);
122+
123+
/* Don't call this if parallelism is disallowed for the entire query. */
124+
Assert(root->glob->parallelModeOK);
125+
126+
/* This should only be called for baserels and appendrel children. */
127+
Assert(rel->reloptkind == RELOPT_BASEREL ||
128+
rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
129+
130+
/* Assorted checks based on rtekind. */
131+
switch (rte->rtekind)
132+
{
133+
case RTE_RELATION:
134+
135+
/*
136+
* Currently, parallel workers can't access the leader's temporary
137+
* tables. We could possibly relax this if the wrote all of its
138+
* local buffers at the start of the query and made no changes
139+
* thereafter (maybe we could allow hint bit changes), and if we
140+
* taught the workers to read them. Writing a large number of
141+
* temporary buffers could be expensive, though, and we don't have
142+
* the rest of the necessary infrastructure right now anyway. So
143+
* for now, bail out if we see a temporary table.
144+
*/
145+
if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
146+
return;
147+
148+
/*
149+
* Table sampling can be pushed down to workers if the sample
150+
* function and its arguments are safe.
151+
*/
152+
if (rte->tablesample != NULL)
153+
{
154+
Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
155+
156+
if (proparallel != PROPARALLEL_SAFE)
157+
return;
158+
if (has_parallel_hazard((Node *) rte->tablesample->args,
159+
false))
160+
return;
161+
}
162+
163+
/*
164+
* Ask FDWs whether they can support performing a ForeignScan
165+
* within a worker. Most often, the answer will be no. For
166+
* example, if the nature of the FDW is such that it opens a TCP
167+
* connection with a remote server, each parallel worker would end
168+
* up with a separate connection, and these connections might not
169+
* be appropriately coordinated between workers and the leader.
170+
*/
171+
if (rte->relkind == RELKIND_FOREIGN_TABLE)
172+
{
173+
Assert(rel->fdwroutine);
174+
if (!rel->fdwroutine->IsForeignScanParallelSafe)
175+
return;
176+
if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
177+
return;
178+
}
179+
180+
/*
181+
* There are additional considerations for appendrels, which we'll
182+
* deal with in set_append_rel_size and set_append_rel_pathlist.
183+
* For now, just set consider_parallel based on the rel's own
184+
* quals and targetlist.
185+
*/
186+
break;
187+
188+
case RTE_SUBQUERY:
189+
190+
/*
191+
* There's no intrinsic problem with scanning a subquery-in-FROM
192+
* (as distinct from a SubPlan or InitPlan) in a parallel worker.
193+
* If the subquery doesn't happen to have any parallel-safe paths,
194+
* then flagging it as consider_parallel won't change anything,
195+
* but that's true for plain tables, too. We must set
196+
* consider_parallel based on the rel's own quals and targetlist,
197+
* so that if a subquery path is parallel-safe but the quals and
198+
* projection we're sticking onto it are not, we correctly mark
199+
* the SubqueryScanPath as not parallel-safe. (Note that
200+
* set_subquery_pathlist() might push some of these quals down
201+
* into the subquery itself, but that doesn't change anything.)
202+
*/
203+
break;
204+
205+
case RTE_JOIN:
206+
/* Shouldn't happen; we're only considering baserels here. */
207+
Assert(false);
208+
return;
209+
210+
case RTE_FUNCTION:
211+
/* Check for parallel-restricted functions. */
212+
if (has_parallel_hazard((Node *) rte->functions, false))
213+
return;
214+
break;
215+
216+
case RTE_VALUES:
217+
/* Check for parallel-restricted functions. */
218+
if (has_parallel_hazard((Node *) rte->values_lists, false))
219+
return;
220+
break;
221+
222+
case RTE_CTE:
223+
224+
/*
225+
* CTE tuplestores aren't shared among parallel workers, so we
226+
* force all CTE scans to happen in the leader. Also, populating
227+
* the CTE would require executing a subplan that's not available
228+
* in the worker, might be parallel-restricted, and must get
229+
* executed only once.
230+
*/
231+
return;
232+
}
233+
234+
/*
235+
* If there's anything in baserestrictinfo that's parallel-restricted, we
236+
* give up on parallelizing access to this relation. We could consider
237+
* instead postponing application of the restricted quals until we're
238+
* above all the parallelism in the plan tree, but it's not clear that
239+
* that would be a win in very many cases, and it might be tricky to make
240+
* outer join clauses work correctly. It would likely break equivalence
241+
* classes, too.
242+
*/
243+
if (has_parallel_hazard((Node *) rel->baserestrictinfo, false))
244+
return;
245+
246+
/*
247+
* Likewise, if the relation's outputs are not parallel-safe, give up.
248+
* (Usually, they're just Vars, but sometimes they're not.)
249+
*/
250+
if (has_parallel_hazard((Node *) rel->reltarget->exprs, false))
251+
return;
252+
253+
/* We have a winner. */
254+
rel->consider_parallel = true;
255+
}
256+
257+
/*
258+
* create_plain_partial_paths
259+
* Build partial access paths for parallel scan of a plain relation
260+
*/
261+
void
262+
create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
263+
{
264+
int parallel_workers;
265+
266+
/*
267+
* If the user has set the parallel_workers reloption, use that; otherwise
268+
* select a default number of workers.
269+
*/
270+
if (rel->rel_parallel_workers != -1)
271+
parallel_workers = rel->rel_parallel_workers;
272+
else
273+
{
274+
int parallel_threshold;
275+
276+
/*
277+
* If this relation is too small to be worth a parallel scan, just
278+
* return without doing anything ... unless it's an inheritance child.
279+
* In that case, we want to generate a parallel path here anyway. It
280+
* might not be worthwhile just for this relation, but when combined
281+
* with all of its inheritance siblings it may well pay off.
282+
*/
283+
if (rel->pages < (BlockNumber) min_parallel_relation_size &&
284+
rel->reloptkind == RELOPT_BASEREL)
285+
return;
286+
287+
/*
288+
* Select the number of workers based on the log of the size of the
289+
* relation. This probably needs to be a good deal more
290+
* sophisticated, but we need something here for now. Note that the
291+
* upper limit of the min_parallel_relation_size GUC is chosen to
292+
* prevent overflow here.
293+
*/
294+
parallel_workers = 1;
295+
parallel_threshold = Max(min_parallel_relation_size, 1);
296+
while (rel->pages >= (BlockNumber) (parallel_threshold * 3))
297+
{
298+
parallel_workers++;
299+
parallel_threshold *= 3;
300+
if (parallel_threshold > INT_MAX / 3)
301+
break; /* avoid overflow */
302+
}
303+
}
304+
305+
/*
306+
* In no case use more than max_parallel_workers_per_gather workers.
307+
*/
308+
parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather);
309+
310+
/* If any limit was set to zero, the user doesn't want a parallel scan. */
311+
if (parallel_workers <= 0)
312+
return;
313+
314+
/* Add an unordered partial path based on a parallel sequential scan. */
315+
add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
316+
}
104317
#endif

src/pg_compat.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,15 @@ void adjust_targetlist_compat(PlannerInfo *root, RelOptInfo *dest,
4141
#define pull_var_clause_compat(node, aggbehavior, phbehavior) \
4242
pull_var_clause(node, aggbehavior | phbehavior)
4343

44+
extern void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
45+
RangeTblEntry *rte);
46+
#define set_rel_consider_parallel_compat(root, rel, rte) \
47+
set_rel_consider_parallel(root, rel, rte)
48+
49+
extern void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
50+
#define create_plain_partial_paths_compat(root, rel) \
51+
create_plain_partial_paths(root, rel)
52+
4453
extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
4554
#define make_result_compat(root, tlist, resconstantqual, subplan) \
4655
make_result(tlist, resconstantqual, subplan)
@@ -68,6 +77,10 @@ extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
6877
#define make_result_compat(root, tlist, resconstantqual, subplan) \
6978
make_result(root, tlist, resconstantqual, subplan)
7079

80+
#define set_rel_consider_parallel_compat(root, rel, rte) ((void) true)
81+
82+
#define create_plain_partial_paths_compat(root, rel) ((void) true)
83+
7184
#endif
7285

7386

src/pg_pathman.c

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1628,6 +1628,12 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
16281628
#endif
16291629
add_path(rel, path);
16301630

1631+
#if PG_VERSION_NUM >= 90600
1632+
/* If appropriate, consider parallel sequential scan */
1633+
if (rel->consider_parallel && required_outer == NULL)
1634+
create_plain_partial_paths_compat(root, rel);
1635+
#endif
1636+
16311637
/* Consider index scans */
16321638
create_index_paths(root, rel);
16331639

@@ -1675,6 +1681,10 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti,
16751681
List *live_childrels = NIL;
16761682
List *subpaths = NIL;
16771683
bool subpaths_valid = true;
1684+
#if PG_VERSION_NUM >= 90600
1685+
List *partial_subpaths = NIL;
1686+
bool partial_subpaths_valid = true;
1687+
#endif
16781688
List *all_child_pathkeys = NIL;
16791689
List *all_child_outers = NIL;
16801690
ListCell *l;
@@ -1702,6 +1712,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti,
17021712
childRTE = root->simple_rte_array[childRTindex];
17031713
childrel = root->simple_rel_array[childRTindex];
17041714

1715+
#if PG_VERSION_NUM >= 90600
1716+
/*
1717+
* If parallelism is allowable for this query in general and for parent
1718+
* appendrel, see whether it's allowable for this childrel in
1719+
* particular.
1720+
*
1721+
* For consistency, do this before calling set_rel_size() for the child.
1722+
*/
1723+
if (root->glob->parallelModeOK && rel->consider_parallel)
1724+
set_rel_consider_parallel_compat(root, childrel, childRTE);
1725+
#endif
1726+
17051727
/*
17061728
* Compute the child's access paths.
17071729
*/
@@ -1728,6 +1750,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti,
17281750
*/
17291751
live_childrels = lappend(live_childrels, childrel);
17301752

1753+
#if PG_VERSION_NUM >= 90600
1754+
/*
1755+
* If any live child is not parallel-safe, treat the whole appendrel
1756+
* as not parallel-safe. In future we might be able to generate plans
1757+
* in which some children are farmed out to workers while others are
1758+
* not; but we don't have that today, so it's a waste to consider
1759+
* partial paths anywhere in the appendrel unless it's all safe.
1760+
*/
1761+
if (!childrel->consider_parallel)
1762+
rel->consider_parallel = false;
1763+
#endif
1764+
17311765
/*
17321766
* If child has an unparameterized cheapest-total path, add that to
17331767
* the unparameterized Append path we are constructing for the parent.
@@ -1739,6 +1773,15 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti,
17391773
else
17401774
subpaths_valid = false;
17411775

1776+
#if PG_VERSION_NUM >= 90600
1777+
/* Same idea, but for a partial plan. */
1778+
if (childrel->partial_pathlist != NIL)
1779+
partial_subpaths = accumulate_append_subpath(partial_subpaths,
1780+
linitial(childrel->partial_pathlist));
1781+
else
1782+
partial_subpaths_valid = false;
1783+
#endif
1784+
17421785
/*
17431786
* Collect lists of all the available path orderings and
17441787
* parameterizations for all the children. We use these as a
@@ -1813,6 +1856,37 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti,
18131856
add_path(rel,
18141857
(Path *) create_append_path_compat(rel, subpaths, NULL, 0));
18151858

1859+
#if PG_VERSION_NUM >= 90600
1860+
/*
1861+
* Consider an append of partial unordered, unparameterized partial paths.
1862+
*/
1863+
if (partial_subpaths_valid)
1864+
{
1865+
AppendPath *appendpath;
1866+
ListCell *lc;
1867+
int parallel_workers = 0;
1868+
1869+
/*
1870+
* Decide on the number of workers to request for this append path.
1871+
* For now, we just use the maximum value from among the members. It
1872+
* might be useful to use a higher number if the Append node were
1873+
* smart enough to spread out the workers, but it currently isn't.
1874+
*/
1875+
foreach(lc, partial_subpaths)
1876+
{
1877+
Path *path = lfirst(lc);
1878+
1879+
parallel_workers = Max(parallel_workers, path->parallel_workers);
1880+
}
1881+
Assert(parallel_workers > 0);
1882+
1883+
/* Generate a partial append path. */
1884+
appendpath = create_append_path(rel, partial_subpaths, NULL,
1885+
parallel_workers);
1886+
add_partial_path(rel, (Path *) appendpath);
1887+
}
1888+
#endif
1889+
18161890
/*
18171891
* Also build unparameterized MergeAppend paths based on the collected
18181892
* list of child pathkeys.

0 commit comments

Comments
 (0)