diff --git a/doc/src/sgml/func/func-datetime.sgml b/doc/src/sgml/func/func-datetime.sgml
index 98dd60aa9a7ec..8cd7150b0d313 100644
--- a/doc/src/sgml/func/func-datetime.sgml
+++ b/doc/src/sgml/func/func-datetime.sgml
@@ -935,28 +935,34 @@
random ( min date, max date )
date
-
-
- random ( min timestamp, max timestamp )
- timestamp
-
-
- random ( min timestamptz, max timestamptz )
- timestamptz
-
-
- Returns a random value in the range
- min <= x <= max.
-
-
- random('1979-02-08'::date,'2025-07-03'::date)
- 1983-04-21
-
-
- random('2000-01-01'::timestamptz, now())
- 2015-09-27 09:11:33.732707+00
-
-
+
+
+ random ( min timestamp, max timestamp )
+ timestamp
+
+
+ random ( min timestamptz, max timestamptz )
+ timestamptz
+
+
+ Returns a random value in the range
+ min <= x <= max.
+
+
+ Note that these functions use the same pseudo-random number generator
+ as the functions listed in ,
+ and respond in the same way to calling
+ setseed().
+
+
+ random('1979-02-08'::date,'2025-07-03'::date)
+ 1983-04-21
+
+
+ random('2000-01-01'::timestamptz, now())
+ 2015-09-27 09:11:33.732707+00
+
+
diff --git a/doc/src/sgml/func/func-math.sgml b/doc/src/sgml/func/func-math.sgml
index fd821c0e70677..9dcf97e7c9e06 100644
--- a/doc/src/sgml/func/func-math.sgml
+++ b/doc/src/sgml/func/func-math.sgml
@@ -1130,7 +1130,7 @@
-
+
setseed
setseed ( double precision )
diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml
index da21ef5689184..04bf919b34384 100644
--- a/doc/src/sgml/xfunc.sgml
+++ b/doc/src/sgml/xfunc.sgml
@@ -3668,11 +3668,14 @@ LWLockRelease(AddinShmemInitLock);
shmem_startup_hook provides a convenient place for the
initialization code, but it is not strictly required that all such code
- be placed in this hook. Each backend will execute the registered
- shmem_startup_hook shortly after it attaches to shared
- memory. Note that add-ins should still acquire
+ be placed in this hook. On Windows (and anywhere else where
+ EXEC_BACKEND is defined), each backend executes the
+ registered shmem_startup_hook shortly after it
+ attaches to shared memory, so add-ins should still acquire
AddinShmemInitLock within this hook, as shown in the
- example above.
+ example above. On other platforms, only the postmaster process executes
+ the shmem_startup_hook, and each backend automatically
+ inherits the pointers to shared memory.
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 7ffb217915190..0baf0ac6160af 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -8385,6 +8385,14 @@ xlog_redo(XLogReaderState *record)
checkPoint.ThisTimeLineID, replayTLI)));
RecoveryRestartPoint(&checkPoint, record);
+
+ /*
+ * After replaying a checkpoint record, free all smgr objects.
+ * Otherwise we would never do so for dropped relations, as the
+ * startup does not process shared invalidation messages or call
+ * AtEOXact_SMgr().
+ */
+ smgrdestroyall();
}
else if (info == XLOG_CHECKPOINT_ONLINE)
{
@@ -8438,6 +8446,14 @@ xlog_redo(XLogReaderState *record)
checkPoint.ThisTimeLineID, replayTLI)));
RecoveryRestartPoint(&checkPoint, record);
+
+ /*
+ * After replaying a checkpoint record, free all smgr objects.
+ * Otherwise we would never do so for dropped relations, as the
+ * startup does not process shared invalidation messages or call
+ * AtEOXact_SMgr().
+ */
+ smgrdestroyall();
}
else if (info == XLOG_OVERWRITE_CONTRECORD)
{
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index b540074935386..75087204f0c69 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -156,6 +156,12 @@ execTuplesHashPrepare(int numCols,
*
* Note that the keyColIdx, hashfunctions, and collations arrays must be
* allocated in storage that will live as long as the hashtable does.
+ *
+ * LookupTupleHashEntry, FindTupleHashEntry, and related functions may leak
+ * memory in the tempcxt. It is caller's responsibility to reset that context
+ * reasonably often, typically once per tuple. (We do it that way, rather
+ * than managing an extra context within the hashtable, because in many cases
+ * the caller can specify a tempcxt that it needs to reset per-tuple anyway.)
*/
TupleHashTable
BuildTupleHashTable(PlanState *parent,
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 377e016d73225..a4f3d30f307cc 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -267,7 +267,6 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/datum.h"
-#include "utils/dynahash.h"
#include "utils/expandeddatum.h"
#include "utils/injection_point.h"
#include "utils/logtape.h"
@@ -2115,7 +2114,7 @@ hash_choose_num_partitions(double input_groups, double hashentrysize,
npartitions = (int) dpartitions;
/* ceil(log2(npartitions)) */
- partition_bits = my_log2(npartitions);
+ partition_bits = pg_ceil_log2_32(npartitions);
/* make sure that we don't exhaust the hash bits */
if (partition_bits + used_bits >= 32)
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 8d2201ab67fa5..a3415db4e20f5 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -36,7 +36,6 @@
#include "executor/nodeHashjoin.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
-#include "utils/dynahash.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/syscache.h"
@@ -340,7 +339,7 @@ MultiExecParallelHash(HashState *node)
*/
hashtable->curbatch = -1;
hashtable->nbuckets = pstate->nbuckets;
- hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
+ hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
hashtable->totalTuples = pstate->total_tuples;
/*
@@ -480,7 +479,7 @@ ExecHashTableCreate(HashState *state)
&nbuckets, &nbatch, &num_skew_mcvs);
/* nbuckets must be a power of 2 */
- log2_nbuckets = my_log2(nbuckets);
+ log2_nbuckets = pg_ceil_log2_32(nbuckets);
Assert(nbuckets == (1 << log2_nbuckets));
/*
@@ -3499,7 +3498,7 @@ ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
dsa_get_address(hashtable->area,
hashtable->batches[batchno].shared->buckets);
hashtable->nbuckets = hashtable->parallel_state->nbuckets;
- hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
+ hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
hashtable->current_chunk = NULL;
hashtable->current_chunk_shared = InvalidDsaPointer;
hashtable->batches[batchno].at_least_one_chunk = false;
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index f7f6fc2da0b95..53fb56f7388e8 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -102,6 +102,7 @@ ExecHashSubPlan(SubPlanState *node,
ExprContext *econtext,
bool *isNull)
{
+ bool result = false;
SubPlan *subplan = node->subplan;
PlanState *planstate = node->planstate;
TupleTableSlot *slot;
@@ -132,14 +133,6 @@ ExecHashSubPlan(SubPlanState *node,
node->projLeft->pi_exprContext = econtext;
slot = ExecProject(node->projLeft);
- /*
- * Note: because we are typically called in a per-tuple context, we have
- * to explicitly clear the projected tuple before returning. Otherwise,
- * we'll have a double-free situation: the per-tuple context will probably
- * be reset before we're called again, and then the tuple slot will think
- * it still needs to free the tuple.
- */
-
/*
* If the LHS is all non-null, probe for an exact match in the main hash
* table. If we find one, the result is TRUE. Otherwise, scan the
@@ -161,19 +154,10 @@ ExecHashSubPlan(SubPlanState *node,
slot,
node->cur_eq_comp,
node->lhs_hash_expr) != NULL)
- {
- ExecClearTuple(slot);
- return BoolGetDatum(true);
- }
- if (node->havenullrows &&
- findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs))
- {
- ExecClearTuple(slot);
+ result = true;
+ else if (node->havenullrows &&
+ findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs))
*isNull = true;
- return BoolGetDatum(false);
- }
- ExecClearTuple(slot);
- return BoolGetDatum(false);
}
/*
@@ -186,34 +170,31 @@ ExecHashSubPlan(SubPlanState *node,
* aren't provably unequal to the LHS; if so, the result is UNKNOWN.
* Otherwise, the result is FALSE.
*/
- if (node->hashnulls == NULL)
- {
- ExecClearTuple(slot);
- return BoolGetDatum(false);
- }
- if (slotAllNulls(slot))
- {
- ExecClearTuple(slot);
+ else if (node->hashnulls == NULL)
+ /* just return FALSE */ ;
+ else if (slotAllNulls(slot))
*isNull = true;
- return BoolGetDatum(false);
- }
/* Scan partly-null table first, since more likely to get a match */
- if (node->havenullrows &&
- findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs))
- {
- ExecClearTuple(slot);
+ else if (node->havenullrows &&
+ findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs))
*isNull = true;
- return BoolGetDatum(false);
- }
- if (node->havehashrows &&
- findPartialMatch(node->hashtable, slot, node->cur_eq_funcs))
- {
- ExecClearTuple(slot);
+ else if (node->havehashrows &&
+ findPartialMatch(node->hashtable, slot, node->cur_eq_funcs))
*isNull = true;
- return BoolGetDatum(false);
- }
+
+ /*
+ * Note: because we are typically called in a per-tuple context, we have
+ * to explicitly clear the projected tuple before returning. Otherwise,
+ * we'll have a double-free situation: the per-tuple context will probably
+ * be reset before we're called again, and then the tuple slot will think
+ * it still needs to free the tuple.
+ */
ExecClearTuple(slot);
- return BoolGetDatum(false);
+
+ /* Also must reset the innerecontext after each hashtable lookup. */
+ ResetExprContext(node->innerecontext);
+
+ return BoolGetDatum(result);
}
/*
@@ -548,7 +529,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
0,
node->planstate->state->es_query_cxt,
node->hashtablecxt,
- node->hashtempcxt,
+ innerecontext->ecxt_per_tuple_memory,
false);
if (!subplan->unknownEqFalse)
@@ -577,7 +558,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
0,
node->planstate->state->es_query_cxt,
node->hashtablecxt,
- node->hashtempcxt,
+ innerecontext->ecxt_per_tuple_memory,
false);
}
else
@@ -639,7 +620,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
/*
* Reset innerecontext after each inner tuple to free any memory used
- * during ExecProject.
+ * during ExecProject and hashtable lookup.
*/
ResetExprContext(innerecontext);
}
@@ -858,7 +839,6 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
sstate->hashtable = NULL;
sstate->hashnulls = NULL;
sstate->hashtablecxt = NULL;
- sstate->hashtempcxt = NULL;
sstate->innerecontext = NULL;
sstate->keyColIdx = NULL;
sstate->tab_eq_funcoids = NULL;
@@ -914,11 +894,6 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
AllocSetContextCreate(CurrentMemoryContext,
"Subplan HashTable Context",
ALLOCSET_DEFAULT_SIZES);
- /* and a small one for the hash tables to use as temp storage */
- sstate->hashtempcxt =
- AllocSetContextCreate(CurrentMemoryContext,
- "Subplan HashTable Temp Context",
- ALLOCSET_SMALL_SIZES);
/* and a short-lived exprcontext for function evaluation */
sstate->innerecontext = CreateExprContext(estate);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index afcf54169c3b3..e96b38a59d503 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1461,7 +1461,6 @@ expandTableLikeClause(RangeVar *heapRel, TableLikeClause *table_like_clause)
char *ccname = constr->check[ccnum].ccname;
char *ccbin = constr->check[ccnum].ccbin;
bool ccenforced = constr->check[ccnum].ccenforced;
- bool ccvalid = constr->check[ccnum].ccvalid;
bool ccnoinherit = constr->check[ccnum].ccnoinherit;
Node *ccbin_node;
bool found_whole_row;
@@ -1492,7 +1491,7 @@ expandTableLikeClause(RangeVar *heapRel, TableLikeClause *table_like_clause)
n->conname = pstrdup(ccname);
n->location = -1;
n->is_enforced = ccenforced;
- n->initially_valid = ccvalid;
+ n->initially_valid = ccenforced; /* sic */
n->is_no_inherit = ccnoinherit;
n->raw_expr = NULL;
n->cooked_expr = nodeToString(ccbin_node);
diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
index 9d0072a49ed6d..8c061d55bdb51 100644
--- a/src/backend/replication/logical/slotsync.c
+++ b/src/backend/replication/logical/slotsync.c
@@ -1337,7 +1337,7 @@ reset_syncing_flag()
SpinLockRelease(&SlotSyncCtx->mutex);
syncing_slots = false;
-};
+}
/*
* The main loop of our worker process.
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index b3cac1023731a..ee6ac22329fdc 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -276,7 +276,6 @@
#include "storage/procarray.h"
#include "tcop/tcopprot.h"
#include "utils/acl.h"
-#include "utils/dynahash.h"
#include "utils/guc.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
@@ -5115,7 +5114,7 @@ subxact_info_read(Oid subid, TransactionId xid)
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
/* we keep the maximum as a power of 2 */
- subxact_data.nsubxacts_max = 1 << my_log2(subxact_data.nsubxacts);
+ subxact_data.nsubxacts_max = 1 << pg_ceil_log2_32(subxact_data.nsubxacts);
/*
* Allocate subxact information in the logical streaming context. We need
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index af0b99243c614..3c3d2d315c6f4 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -986,10 +986,9 @@ build_sorted_items(StatsBuildData *data, int *nitems,
{
int i,
j,
- len,
nrows;
int nvalues = data->numrows * numattrs;
-
+ Size len;
SortItem *items;
Datum *values;
bool *isnull;
@@ -997,14 +996,16 @@ build_sorted_items(StatsBuildData *data, int *nitems,
int *typlen;
/* Compute the total amount of memory we need (both items and values). */
- len = data->numrows * sizeof(SortItem) + nvalues * (sizeof(Datum) + sizeof(bool));
+ len = MAXALIGN(data->numrows * sizeof(SortItem)) +
+ nvalues * (sizeof(Datum) + sizeof(bool));
/* Allocate the memory and split it into the pieces. */
ptr = palloc0(len);
/* items to sort */
items = (SortItem *) ptr;
- ptr += data->numrows * sizeof(SortItem);
+ /* MAXALIGN ensures that the following Datums are suitably aligned */
+ ptr += MAXALIGN(data->numrows * sizeof(SortItem));
/* values and null flags */
values = (Datum *) ptr;
diff --git a/src/backend/utils/adt/meson.build b/src/backend/utils/adt/meson.build
index dac372c3bea3b..12fa0c209127c 100644
--- a/src/backend/utils/adt/meson.build
+++ b/src/backend/utils/adt/meson.build
@@ -1,5 +1,15 @@
# Copyright (c) 2022-2025, PostgreSQL Global Development Group
+# Some code in numeric.c benefits from auto-vectorization
+numeric_backend_lib = static_library('numeric_backend_lib',
+ 'numeric.c',
+ dependencies: backend_build_deps,
+ kwargs: internal_lib_args,
+ c_args: vectorize_cflags,
+)
+
+backend_link_with += numeric_backend_lib
+
backend_sources += files(
'acl.c',
'amutils.c',
@@ -61,7 +71,6 @@ backend_sources += files(
'network_gist.c',
'network_selfuncs.c',
'network_spgist.c',
- 'numeric.c',
'numutils.c',
'oid.c',
'oracle_compat.c',
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 1aeee5be42acd..ac94b9e93c6e3 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -102,7 +102,6 @@
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
-#include "utils/dynahash.h"
#include "utils/memutils.h"
@@ -281,6 +280,7 @@ static bool init_htab(HTAB *hashp, int64 nelem);
pg_noreturn static void hash_corrupted(HTAB *hashp);
static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue,
HASHBUCKET **bucketptr);
+static int my_log2(int64 num);
static int64 next_pow2_int64(int64 num);
static int next_pow2_int(int64 num);
static void register_seq_scan(HTAB *hashp);
@@ -1813,7 +1813,7 @@ hash_corrupted(HTAB *hashp)
}
/* calculate ceil(log base 2) of num */
-int
+static int
my_log2(int64 num)
{
/*
diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h
index 6fe97de4d66f1..5d4671dc4c128 100644
--- a/src/include/access/hash_xlog.h
+++ b/src/include/access/hash_xlog.h
@@ -129,7 +129,7 @@ typedef struct xl_hash_split_complete
*
* This data record is used for XLOG_HASH_MOVE_PAGE_CONTENTS
*
- * Backup Blk 0: bucket page
+ * Backup Blk 0: primary bucket page
* Backup Blk 1: page containing moved tuples
* Backup Blk 2: page from which tuples will be removed
*/
@@ -149,12 +149,13 @@ typedef struct xl_hash_move_page_contents
*
* This data record is used for XLOG_HASH_SQUEEZE_PAGE
*
- * Backup Blk 0: page containing tuples moved from freed overflow page
- * Backup Blk 1: freed overflow page
- * Backup Blk 2: page previous to the freed overflow page
- * Backup Blk 3: page next to the freed overflow page
- * Backup Blk 4: bitmap page containing info of freed overflow page
- * Backup Blk 5: meta page
+ * Backup Blk 0: primary bucket page
+ * Backup Blk 1: page containing tuples moved from freed overflow page
+ * Backup Blk 2: freed overflow page
+ * Backup Blk 3: page previous to the freed overflow page
+ * Backup Blk 4: page next to the freed overflow page
+ * Backup Blk 5: bitmap page containing info of freed overflow page
+ * Backup Blk 6: meta page
*/
typedef struct xl_hash_squeeze_page
{
@@ -245,7 +246,7 @@ typedef struct xl_hash_init_bitmap_page
*
* This data record is used for XLOG_HASH_VACUUM_ONE_PAGE
*
- * Backup Blk 0: bucket page
+ * Backup Blk 0: primary bucket page
* Backup Blk 1: meta page
*/
typedef struct xl_hash_vacuum_one_page
diff --git a/src/include/c.h b/src/include/c.h
index 39022f8a9dd75..b580cfa7d3178 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -259,8 +259,8 @@
* choose not to. But, if possible, don't force inlining in unoptimized
* debug builds.
*/
-#if (defined(__GNUC__) && __GNUC__ > 3 && defined(__OPTIMIZE__)) || defined(__SUNPRO_C)
-/* GCC > 3 and Sunpro support always_inline via __attribute__ */
+#if (defined(__GNUC__) && defined(__OPTIMIZE__)) || defined(__SUNPRO_C)
+/* GCC and Sunpro support always_inline via __attribute__ */
#define pg_attribute_always_inline __attribute__((always_inline)) inline
#elif defined(_MSC_VER)
/* MSVC has a special keyword for this */
@@ -277,7 +277,7 @@
* above, this should be placed before the function's return type and name.
*/
/* GCC and Sunpro support noinline via __attribute__ */
-#if (defined(__GNUC__) && __GNUC__ > 2) || defined(__SUNPRO_C)
+#if defined(__GNUC__) || defined(__SUNPRO_C)
#define pg_noinline __attribute__((noinline))
/* msvc via declspec */
#elif defined(_MSC_VER)
@@ -369,7 +369,7 @@
* These should only be used sparingly, in very hot code paths. It's very easy
* to mis-estimate likelihoods.
*/
-#if __GNUC__ >= 3
+#ifdef __GNUC__
#define likely(x) __builtin_expect((x) != 0, 1)
#define unlikely(x) __builtin_expect((x) != 0, 0)
#else
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index de782014b2d41..71857feae4823 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1020,7 +1020,6 @@ typedef struct SubPlanState
bool havehashrows; /* true if hashtable is not empty */
bool havenullrows; /* true if hashnulls is not empty */
MemoryContext hashtablecxt; /* memory context containing hash tables */
- MemoryContext hashtempcxt; /* temp memory context for hash tables */
ExprContext *innerecontext; /* econtext for computing inner tuples */
int numCols; /* number of columns being hashed */
/* each of the remaining fields is an array of length numCols: */
diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h
index d8f04c89ccac2..e7dfad4f0d5eb 100644
--- a/src/include/port/atomics/generic-gcc.h
+++ b/src/include/port/atomics/generic-gcc.h
@@ -30,14 +30,14 @@
#define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
/*
- * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
+ * If we're on GCC, we should be able to get a memory barrier
* out of this compiler built-in. But we prefer to rely on platform specific
* definitions where possible, and use this only as a fallback.
*/
#if !defined(pg_memory_barrier_impl)
# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
-# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+# elif defined(__GNUC__)
# define pg_memory_barrier_impl() __sync_synchronize()
# endif
#endif /* !defined(pg_memory_barrier_impl) */
diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h
deleted file mode 100644
index a4362d3f65e59..0000000000000
--- a/src/include/utils/dynahash.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * dynahash.h
- * POSTGRES dynahash.h file definitions
- *
- *
- * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * IDENTIFICATION
- * src/include/utils/dynahash.h
- *
- *-------------------------------------------------------------------------
- */
-#ifndef DYNAHASH_H
-#define DYNAHASH_H
-
-extern int my_log2(int64 num);
-
-#endif /* DYNAHASH_H */
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 756e80a2c2fcc..f21ec37da8933 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -106,7 +106,7 @@ typedef enum
* will show as "default" in pg_settings. If there is a specific reason not
* to want that, use source == PGC_S_OVERRIDE.
*
- * NB: see GucSource_Names in guc.c if you change this.
+ * NB: see GucSource_Names in guc_tables.c if you change this.
*/
typedef enum
{
diff --git a/src/test/modules/test_slru/test_slru.c b/src/test/modules/test_slru/test_slru.c
index 8c0367eeee424..e963466aef1cd 100644
--- a/src/test/modules/test_slru/test_slru.c
+++ b/src/test/modules/test_slru/test_slru.c
@@ -219,8 +219,8 @@ test_slru_shmem_startup(void)
*/
const bool long_segment_names = true;
const char slru_dir_name[] = "pg_test_slru";
- int test_tranche_id;
- int test_buffer_tranche_id;
+ int test_tranche_id = -1;
+ int test_buffer_tranche_id = -1;
if (prev_shmem_startup_hook)
prev_shmem_startup_hook();
@@ -231,10 +231,18 @@ test_slru_shmem_startup(void)
*/
(void) MakePGDirectory(slru_dir_name);
- /* initialize the SLRU facility */
- test_tranche_id = LWLockNewTrancheId("test_slru_tranche");
-
- test_buffer_tranche_id = LWLockNewTrancheId("test_buffer_tranche");
+ /*
+ * Initialize the SLRU facility. In EXEC_BACKEND builds, the
+ * shmem_startup_hook is called in the postmaster and in each backend, but
+ * we only need to generate the LWLock tranches once. Note that these
+ * tranche ID variables are not used by SimpleLruInit() when
+ * IsUnderPostmaster is true.
+ */
+ if (!IsUnderPostmaster)
+ {
+ test_tranche_id = LWLockNewTrancheId("test_slru_tranche");
+ test_buffer_tranche_id = LWLockNewTrancheId("test_buffer_tranche");
+ }
TestSlruCtl->PagePrecedes = test_slru_page_precedes_logically;
SimpleLruInit(TestSlruCtl, "TestSLRU",
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
index 29a779c2e9072..d3c35c148475d 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -320,6 +320,7 @@ DROP TABLE inhz;
-- including storage and comments
CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) ENFORCED PRIMARY KEY,
b text CHECK (length(b) > 100) NOT ENFORCED);
+ALTER TABLE ctlt1 ADD CONSTRAINT cc CHECK (length(b) > 100) NOT VALID;
CREATE INDEX ctlt1_b_key ON ctlt1 (b);
CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b));
CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1;
@@ -378,6 +379,7 @@ SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
NOTICE: merging column "a" with inherited definition
NOTICE: merging column "b" with inherited definition
+NOTICE: merging constraint "cc" with inherited definition
NOTICE: merging constraint "ctlt1_a_check" with inherited definition
NOTICE: merging constraint "ctlt1_b_check" with inherited definition
\d+ ctlt1_inh
@@ -387,6 +389,7 @@ NOTICE: merging constraint "ctlt1_b_check" with inherited definition
a | text | | not null | | main | | A
b | text | | | | extended | | B
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
Not-null constraints:
@@ -409,6 +412,7 @@ NOTICE: merging multiple inherited definitions of column "a"
b | text | | | | extended | |
c | text | | | | external | |
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
"ctlt3_a_check" CHECK (length(a) < 5)
@@ -430,6 +434,7 @@ NOTICE: merging column "a" with inherited definition
Indexes:
"ctlt13_like_expr_idx" btree ((a || c))
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
"ctlt3_a_check" CHECK (length(a) < 5)
@@ -456,6 +461,7 @@ Indexes:
"ctlt_all_b_idx" btree (b)
"ctlt_all_expr_idx" btree ((a || b))
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
Statistics objects:
@@ -499,6 +505,7 @@ Indexes:
"pg_attrdef_b_idx" btree (b)
"pg_attrdef_expr_idx" btree ((a || b))
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
Statistics objects:
@@ -524,6 +531,7 @@ Indexes:
"ctlt1_b_idx" btree (b)
"ctlt1_expr_idx" btree ((a || b))
Check constraints:
+ "cc" CHECK (length(b) > 100)
"ctlt1_a_check" CHECK (length(a) > 2)
"ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED
Statistics objects:
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
index bf8702116a74b..93389b57dbf95 100644
--- a/src/test/regress/sql/create_table_like.sql
+++ b/src/test/regress/sql/create_table_like.sql
@@ -130,6 +130,7 @@ DROP TABLE inhz;
-- including storage and comments
CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) ENFORCED PRIMARY KEY,
b text CHECK (length(b) > 100) NOT ENFORCED);
+ALTER TABLE ctlt1 ADD CONSTRAINT cc CHECK (length(b) > 100) NOT VALID;
CREATE INDEX ctlt1_b_key ON ctlt1 (b);
CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b));
CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1;
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index db0d5b464e825..880551fc69d74 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -475,6 +475,9 @@
}
);
+ # Wait until the backend enters the injection point
+ $node_B->wait_for_event('client backend', 'commit-after-delay-checkpoint');
+
# Confirm the update is suspended
$result =
$node_B->safe_psql('postgres', 'SELECT * FROM tab WHERE a = 1');