Skip to content

Commit 378802e

Browse files
committed
Update the names of Parallel Hash Join phases.
Commit 3048898 dropped -ING from some wait event names that correspond to barrier phases. Update the phases' names to match. While we're here making cosmetic changes, also rename "DONE" to "FREE". That pairs better with "ALLOCATE", and describes the activity that actually happens in that phase (as we do for the other phases) rather than describing a state. The distinction is clearer after bugfix commit 3b8981b split the phase into two. As for the growth barriers, rename their "ALLOCATE" phase to "REALLOCATE", which is probably a better description of what happens then. Also improve the comments about the phases a bit. Discussion: https://postgr.es/m/CA%2BhUKG%2BMDpwF2Eo2LAvzd%3DpOh81wUTsrwU1uAwR-v6OGBB6%2B7g%40mail.gmail.com
1 parent 3b8981b commit 378802e

File tree

5 files changed

+104
-101
lines changed

5 files changed

+104
-101
lines changed

src/backend/executor/nodeHash.c

+36-36
Original file line numberDiff line numberDiff line change
@@ -246,10 +246,10 @@ MultiExecParallelHash(HashState *node)
246246
*/
247247
pstate = hashtable->parallel_state;
248248
build_barrier = &pstate->build_barrier;
249-
Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING);
249+
Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
250250
switch (BarrierPhase(build_barrier))
251251
{
252-
case PHJ_BUILD_ALLOCATING:
252+
case PHJ_BUILD_ALLOCATE:
253253

254254
/*
255255
* Either I just allocated the initial hash table in
@@ -259,7 +259,7 @@ MultiExecParallelHash(HashState *node)
259259
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
260260
/* Fall through. */
261261

262-
case PHJ_BUILD_HASHING_INNER:
262+
case PHJ_BUILD_HASH_INNER:
263263

264264
/*
265265
* It's time to begin hashing, or if we just arrived here then
@@ -271,10 +271,10 @@ MultiExecParallelHash(HashState *node)
271271
* below.
272272
*/
273273
if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
274-
PHJ_GROW_BATCHES_ELECTING)
274+
PHJ_GROW_BATCHES_ELECT)
275275
ExecParallelHashIncreaseNumBatches(hashtable);
276276
if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
277-
PHJ_GROW_BUCKETS_ELECTING)
277+
PHJ_GROW_BUCKETS_ELECT)
278278
ExecParallelHashIncreaseNumBuckets(hashtable);
279279
ExecParallelHashEnsureBatchAccessors(hashtable);
280280
ExecParallelHashTableSetCurrentBatch(hashtable, 0);
@@ -338,17 +338,17 @@ MultiExecParallelHash(HashState *node)
338338
* Unless we're completely done and the batch state has been freed, make
339339
* sure we have accessors.
340340
*/
341-
if (BarrierPhase(build_barrier) < PHJ_BUILD_DONE)
341+
if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
342342
ExecParallelHashEnsureBatchAccessors(hashtable);
343343

344344
/*
345345
* The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
346-
* case, which will bring the build phase to PHJ_BUILD_RUNNING (if it isn't
346+
* case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
347347
* there already).
348348
*/
349-
Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER ||
350-
BarrierPhase(build_barrier) == PHJ_BUILD_RUNNING ||
351-
BarrierPhase(build_barrier) == PHJ_BUILD_DONE);
349+
Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
350+
BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
351+
BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
352352
}
353353

354354
/* ----------------------------------------------------------------
@@ -596,8 +596,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
596596
* Attach to the build barrier. The corresponding detach operation is
597597
* in ExecHashTableDetach. Note that we won't attach to the
598598
* batch_barrier for batch 0 yet. We'll attach later and start it out
599-
* in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front
600-
* and then loaded while hashing (the standard hybrid hash join
599+
* in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
600+
* then loaded while hashing (the standard hybrid hash join
601601
* algorithm), and we'll coordinate that using build_barrier.
602602
*/
603603
build_barrier = &pstate->build_barrier;
@@ -610,7 +610,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
610610
* SharedHashJoinBatch objects and the hash table for batch 0. One
611611
* backend will be elected to do that now if necessary.
612612
*/
613-
if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING &&
613+
if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
614614
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
615615
{
616616
pstate->nbatch = nbatch;
@@ -631,7 +631,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
631631
/*
632632
* The next Parallel Hash synchronization point is in
633633
* MultiExecParallelHash(), which will progress it all the way to
634-
* PHJ_BUILD_RUNNING. The caller must not return control from this
634+
* PHJ_BUILD_RUN. The caller must not return control from this
635635
* executor node between now and then.
636636
*/
637637
}
@@ -1067,7 +1067,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
10671067
ParallelHashJoinState *pstate = hashtable->parallel_state;
10681068
int i;
10691069

1070-
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
1070+
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
10711071

10721072
/*
10731073
* It's unlikely, but we need to be prepared for new participants to show
@@ -1076,7 +1076,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
10761076
*/
10771077
switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
10781078
{
1079-
case PHJ_GROW_BATCHES_ELECTING:
1079+
case PHJ_GROW_BATCHES_ELECT:
10801080

10811081
/*
10821082
* Elect one participant to prepare to grow the number of batches.
@@ -1194,13 +1194,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
11941194
}
11951195
/* Fall through. */
11961196

1197-
case PHJ_GROW_BATCHES_ALLOCATING:
1197+
case PHJ_GROW_BATCHES_REALLOCATE:
11981198
/* Wait for the above to be finished. */
11991199
BarrierArriveAndWait(&pstate->grow_batches_barrier,
1200-
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATE);
1200+
WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
12011201
/* Fall through. */
12021202

1203-
case PHJ_GROW_BATCHES_REPARTITIONING:
1203+
case PHJ_GROW_BATCHES_REPARTITION:
12041204
/* Make sure that we have the current dimensions and buckets. */
12051205
ExecParallelHashEnsureBatchAccessors(hashtable);
12061206
ExecParallelHashTableSetCurrentBatch(hashtable, 0);
@@ -1213,7 +1213,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
12131213
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
12141214
/* Fall through. */
12151215

1216-
case PHJ_GROW_BATCHES_DECIDING:
1216+
case PHJ_GROW_BATCHES_DECIDE:
12171217

12181218
/*
12191219
* Elect one participant to clean up and decide whether further
@@ -1268,7 +1268,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
12681268
}
12691269
/* Fall through. */
12701270

1271-
case PHJ_GROW_BATCHES_FINISHING:
1271+
case PHJ_GROW_BATCHES_FINISH:
12721272
/* Wait for the above to complete. */
12731273
BarrierArriveAndWait(&pstate->grow_batches_barrier,
12741274
WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
@@ -1508,7 +1508,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
15081508
HashMemoryChunk chunk;
15091509
dsa_pointer chunk_s;
15101510

1511-
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
1511+
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
15121512

15131513
/*
15141514
* It's unlikely, but we need to be prepared for new participants to show
@@ -1517,7 +1517,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
15171517
*/
15181518
switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
15191519
{
1520-
case PHJ_GROW_BUCKETS_ELECTING:
1520+
case PHJ_GROW_BUCKETS_ELECT:
15211521
/* Elect one participant to prepare to increase nbuckets. */
15221522
if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
15231523
WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
@@ -1546,13 +1546,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
15461546
}
15471547
/* Fall through. */
15481548

1549-
case PHJ_GROW_BUCKETS_ALLOCATING:
1549+
case PHJ_GROW_BUCKETS_REALLOCATE:
15501550
/* Wait for the above to complete. */
15511551
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1552-
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATE);
1552+
WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
15531553
/* Fall through. */
15541554

1555-
case PHJ_GROW_BUCKETS_REINSERTING:
1555+
case PHJ_GROW_BUCKETS_REINSERT:
15561556
/* Reinsert all tuples into the hash table. */
15571557
ExecParallelHashEnsureBatchAccessors(hashtable);
15581558
ExecParallelHashTableSetCurrentBatch(hashtable, 0);
@@ -1708,7 +1708,7 @@ ExecParallelHashTableInsert(HashJoinTable hashtable,
17081708

17091709
/* Try to load it into memory. */
17101710
Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1711-
PHJ_BUILD_HASHING_INNER);
1711+
PHJ_BUILD_HASH_INNER);
17121712
hashTuple = ExecParallelHashTupleAlloc(hashtable,
17131713
HJTUPLE_OVERHEAD + tuple->t_len,
17141714
&shared);
@@ -2862,7 +2862,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
28622862
if (pstate->growth != PHJ_GROWTH_DISABLED)
28632863
{
28642864
Assert(curbatch == 0);
2865-
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
2865+
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
28662866

28672867
/*
28682868
* Check if our space limit would be exceeded. To avoid choking on
@@ -2982,7 +2982,7 @@ ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
29822982
{
29832983
/* Batch 0 doesn't need to be loaded. */
29842984
BarrierAttach(&shared->batch_barrier);
2985-
while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING)
2985+
while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
29862986
BarrierArriveAndWait(&shared->batch_barrier, 0);
29872987
BarrierDetach(&shared->batch_barrier);
29882988
}
@@ -3056,8 +3056,8 @@ ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
30563056

30573057
/*
30583058
* We should never see a state where the batch-tracking array is freed,
3059-
* because we should have given up sooner if we join when the build barrier
3060-
* has reached the PHJ_BUILD_DONE phase.
3059+
* because we should have given up sooner if we join when the build
3060+
* barrier has reached the PHJ_BUILD_FREE phase.
30613061
*/
30623062
Assert(DsaPointerIsValid(pstate->batches));
30633063

@@ -3140,7 +3140,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
31403140
* longer attached, but since there is no way it's moving after
31413141
* this point it seems safe to make the following assertion.
31423142
*/
3143-
Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE);
3143+
Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
31443144

31453145
/* Free shared chunks and buckets. */
31463146
while (DsaPointerIsValid(batch->chunks))
@@ -3183,12 +3183,12 @@ ExecHashTableDetach(HashJoinTable hashtable)
31833183

31843184
/*
31853185
* If we're involved in a parallel query, we must either have got all the
3186-
* way to PHJ_BUILD_RUNNING, or joined too late and be in PHJ_BUILD_DONE.
3186+
* way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
31873187
*/
31883188
Assert(!pstate ||
3189-
BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUNNING);
3189+
BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
31903190

3191-
if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUNNING)
3191+
if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
31923192
{
31933193
int i;
31943194

@@ -3211,7 +3211,7 @@ ExecHashTableDetach(HashJoinTable hashtable)
32113211
* Late joining processes will see this state and give up
32123212
* immediately.
32133213
*/
3214-
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_DONE);
3214+
Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
32153215

32163216
if (DsaPointerIsValid(pstate->batches))
32173217
{

0 commit comments

Comments
 (0)