Skip to content

Commit f0fc1d4

Browse files
committed
Add defenses against integer overflow in dynahash numbuckets calculations.
The dynahash code requires the number of buckets in a hash table to fit in an int; but since we calculate the desired hash table size dynamically, there are various scenarios where we might calculate too large a value. The resulting overflow can lead to infinite loops, division-by-zero crashes, etc. I (tgl) had previously installed some defenses against that in commit 299d171, but that covered only one call path. Moreover it worked by limiting the request size to work_mem, but in a 64-bit machine it's possible to set work_mem high enough that the problem appears anyway. So let's fix the problem at the root by installing limits in the dynahash.c functions themselves. Trouble report and patch by Jeff Davis.
1 parent 97a60fa commit f0fc1d4

File tree

2 files changed

+41
-12
lines changed

2 files changed

+41
-12
lines changed

src/backend/executor/nodeHash.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -501,7 +501,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
501501
* Both nbuckets and nbatch must be powers of 2 to make
502502
* ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
503503
* nbuckets to the next larger power of 2. We also force nbuckets to not
504-
* be real small, by starting the search at 2^10.
504+
* be real small, by starting the search at 2^10. (Note: above we made
505+
* sure that nbuckets is not more than INT_MAX / 2, so this loop cannot
506+
* overflow, nor can the final shift to recalculate nbuckets.)
505507
*/
506508
i = 10;
507509
while ((1 << i) < nbuckets)

src/backend/utils/hash/dynahash.c

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@
6363

6464
#include "postgres.h"
6565

66+
#include <limits.h>
67+
6668
#include "access/xact.h"
6769
#include "storage/shmem.h"
6870
#include "storage/spin.h"
@@ -200,6 +202,8 @@ static void hdefault(HTAB *hashp);
200202
static int choose_nelem_alloc(Size entrysize);
201203
static bool init_htab(HTAB *hashp, long nelem);
202204
static void hash_corrupted(HTAB *hashp);
205+
static long next_pow2_long(long num);
206+
static int next_pow2_int(long num);
203207
static void register_seq_scan(HTAB *hashp);
204208
static void deregister_seq_scan(HTAB *hashp);
205209
static bool has_seq_scans(HTAB *hashp);
@@ -374,8 +378,13 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
374378
{
375379
/* Doesn't make sense to partition a local hash table */
376380
Assert(flags & HASH_SHARED_MEM);
377-
/* # of partitions had better be a power of 2 */
378-
Assert(info->num_partitions == (1L << my_log2(info->num_partitions)));
381+
382+
/*
383+
* The number of partitions had better be a power of 2. Also, it must
384+
* be less than INT_MAX (see init_htab()), so call the int version of
385+
* next_pow2.
386+
*/
387+
Assert(info->num_partitions == next_pow2_int(info->num_partitions));
379388

380389
hctl->num_partitions = info->num_partitions;
381390
}
@@ -518,7 +527,6 @@ init_htab(HTAB *hashp, long nelem)
518527
{
519528
HASHHDR *hctl = hashp->hctl;
520529
HASHSEGMENT *segp;
521-
long lnbuckets;
522530
int nbuckets;
523531
int nsegs;
524532

@@ -533,9 +541,7 @@ init_htab(HTAB *hashp, long nelem)
533541
* number of buckets. Allocate space for the next greater power of two
534542
* number of buckets
535543
*/
536-
lnbuckets = (nelem - 1) / hctl->ffactor + 1;
537-
538-
nbuckets = 1 << my_log2(lnbuckets);
544+
nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1);
539545

540546
/*
541547
* In a partitioned table, nbuckets must be at least equal to
@@ -553,7 +559,7 @@ init_htab(HTAB *hashp, long nelem)
553559
* Figure number of directory segments needed, round up to a power of 2
554560
*/
555561
nsegs = (nbuckets - 1) / hctl->ssize + 1;
556-
nsegs = 1 << my_log2(nsegs);
562+
nsegs = next_pow2_int(nsegs);
557563

558564
/*
559565
* Make sure directory is big enough. If pre-allocated directory is too
@@ -623,9 +629,9 @@ hash_estimate_size(long num_entries, Size entrysize)
623629
elementAllocCnt;
624630

625631
/* estimate number of buckets wanted */
626-
nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);
632+
nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
627633
/* # of segments needed for nBuckets */
628-
nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);
634+
nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
629635
/* directory entries */
630636
nDirEntries = DEF_DIRSIZE;
631637
while (nDirEntries < nSegments)
@@ -666,9 +672,9 @@ hash_select_dirsize(long num_entries)
666672
nDirEntries;
667673

668674
/* estimate number of buckets wanted */
669-
nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);
675+
nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
670676
/* # of segments needed for nBuckets */
671-
nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);
677+
nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
672678
/* directory entries */
673679
nDirEntries = DEF_DIRSIZE;
674680
while (nDirEntries < nSegments)
@@ -1403,11 +1409,32 @@ my_log2(long num)
14031409
int i;
14041410
long limit;
14051411

1412+
/* guard against too-large input, which would put us into infinite loop */
1413+
if (num > LONG_MAX / 2)
1414+
num = LONG_MAX / 2;
1415+
14061416
for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
14071417
;
14081418
return i;
14091419
}
14101420

1421+
/* calculate first power of 2 >= num, bounded to what will fit in a long */
1422+
static long
1423+
next_pow2_long(long num)
1424+
{
1425+
/* my_log2's internal range check is sufficient */
1426+
return 1L << my_log2(num);
1427+
}
1428+
1429+
/* calculate first power of 2 >= num, bounded to what will fit in an int */
1430+
static int
1431+
next_pow2_int(long num)
1432+
{
1433+
if (num > INT_MAX / 2)
1434+
num = INT_MAX / 2;
1435+
return 1 << my_log2(num);
1436+
}
1437+
14111438

14121439
/************************* SEQ SCAN TRACKING ************************/
14131440

0 commit comments

Comments
 (0)