Skip to content

Commit 06bd458

Browse files
committed
Use mul_size when multiplying by the number of parallel workers.
That way, if the result overflows size_t, you'll get an error instead of undefined behavior, which seems like a plus. This also has the effect of casting the number of workers from int to Size, which is better because it's harder to overflow int than size_t. Dilip Kumar reported this issue and provided a patch upon which this patch is based, but his version did use mul_size.
1 parent a89505f commit 06bd458

File tree

2 files changed

+15
-9
lines changed

2 files changed

+15
-9
lines changed

src/backend/access/transam/parallel.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
241241
PARALLEL_ERROR_QUEUE_SIZE,
242242
"parallel error queue size not buffer-aligned");
243243
shm_toc_estimate_chunk(&pcxt->estimator,
244-
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
244+
mul_size(PARALLEL_ERROR_QUEUE_SIZE,
245+
pcxt->nworkers));
245246
shm_toc_estimate_keys(&pcxt->estimator, 1);
246247

247248
/* Estimate how much we'll need for extension entrypoint info. */
@@ -347,7 +348,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
347348
*/
348349
error_queue_space =
349350
shm_toc_allocate(pcxt->toc,
350-
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
351+
mul_size(PARALLEL_ERROR_QUEUE_SIZE,
352+
pcxt->nworkers));
351353
for (i = 0; i < pcxt->nworkers; ++i)
352354
{
353355
char *start;

src/backend/executor/execParallel.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
287287
if (!reinitialize)
288288
tqueuespace =
289289
shm_toc_allocate(pcxt->toc,
290-
PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers);
290+
mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
291+
pcxt->nworkers));
291292
else
292293
tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
293294

@@ -296,7 +297,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
296297
{
297298
shm_mq *mq;
298299

299-
mq = shm_mq_create(tqueuespace + i * PARALLEL_TUPLE_QUEUE_SIZE,
300+
mq = shm_mq_create(tqueuespace +
301+
((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
300302
(Size) PARALLEL_TUPLE_QUEUE_SIZE);
301303

302304
shm_mq_set_receiver(mq, MyProc);
@@ -380,12 +382,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
380382
* looking at pgBufferUsage, so do it unconditionally.
381383
*/
382384
shm_toc_estimate_chunk(&pcxt->estimator,
383-
sizeof(BufferUsage) * pcxt->nworkers);
385+
mul_size(sizeof(BufferUsage), pcxt->nworkers));
384386
shm_toc_estimate_keys(&pcxt->estimator, 1);
385387

386388
/* Estimate space for tuple queues. */
387389
shm_toc_estimate_chunk(&pcxt->estimator,
388-
PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers);
390+
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
389391
shm_toc_estimate_keys(&pcxt->estimator, 1);
390392

391393
/*
@@ -404,7 +406,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
404406
sizeof(int) * e.nnodes;
405407
instrumentation_len = MAXALIGN(instrumentation_len);
406408
instrument_offset = instrumentation_len;
407-
instrumentation_len += sizeof(Instrumentation) * e.nnodes * nworkers;
409+
instrumentation_len +=
410+
mul_size(sizeof(Instrumentation),
411+
mul_size(e.nnodes, nworkers));
408412
shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
409413
shm_toc_estimate_keys(&pcxt->estimator, 1);
410414
}
@@ -432,7 +436,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
432436

433437
/* Allocate space for each worker's BufferUsage; no need to initialize. */
434438
bufusage_space = shm_toc_allocate(pcxt->toc,
435-
sizeof(BufferUsage) * pcxt->nworkers);
439+
mul_size(sizeof(BufferUsage), pcxt->nworkers));
436440
shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
437441
pei->buffer_usage = bufusage_space;
438442

@@ -511,7 +515,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
511515
InstrAggNode(planstate->instrument, &instrument[n]);
512516

513517
/* Also store the per-worker detail. */
514-
ibytes = instrumentation->num_workers * sizeof(Instrumentation);
518+
ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
515519
planstate->worker_instrument =
516520
palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
517521
planstate->worker_instrument->num_workers = instrumentation->num_workers;

0 commit comments

Comments
 (0)