diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index eae70911df8d0..8048afd1a80fa 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -14,6 +14,9 @@ # # $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso +b27644bade0348d0dafd3036c47880a349fe9332 # 2025-06-15 13:04:24 -0400 +# Sync typedefs.list with the buildfarm. + 4672b6223910687b2aab075bcd2dd54ce90d5171 # 2025-06-01 14:55:24 -0400 # Run pgindent on the previous commit. diff --git a/contrib/amcheck/expected/check_gin.out b/contrib/amcheck/expected/check_gin.out index b4f0b110747c3..8dd01ced8d15f 100644 --- a/contrib/amcheck/expected/check_gin.out +++ b/contrib/amcheck/expected/check_gin.out @@ -76,3 +76,15 @@ SELECT gin_index_check('gin_check_jsonb_idx'); -- cleanup DROP TABLE gin_check_jsonb; +-- Test GIN multicolumn index +CREATE TABLE "gin_check_multicolumn"(a text[], b text[]); +INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}'); +CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b); +SELECT gin_index_check('gin_check_multicolumn_idx'); + gin_index_check +----------------- + +(1 row) + +-- cleanup +DROP TABLE gin_check_multicolumn; diff --git a/contrib/amcheck/meson.build b/contrib/amcheck/meson.build index b33e8c9b062fe..1f0c347ed5413 100644 --- a/contrib/amcheck/meson.build +++ b/contrib/amcheck/meson.build @@ -49,6 +49,7 @@ tests += { 't/003_cic_2pc.pl', 't/004_verify_nbtree_unique.pl', 't/005_pitr.pl', + 't/006_verify_gin.pl', ], }, } diff --git a/contrib/amcheck/sql/check_gin.sql b/contrib/amcheck/sql/check_gin.sql index 66f42c34311db..11caed3d6a81b 100644 --- a/contrib/amcheck/sql/check_gin.sql +++ b/contrib/amcheck/sql/check_gin.sql @@ -50,3 +50,13 @@ SELECT gin_index_check('gin_check_jsonb_idx'); -- cleanup DROP TABLE gin_check_jsonb; + +-- Test GIN multicolumn index +CREATE TABLE "gin_check_multicolumn"(a text[], b text[]); +INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}'); +CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b); + +SELECT gin_index_check('gin_check_multicolumn_idx'); + +-- cleanup +DROP TABLE gin_check_multicolumn; diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl new file mode 100644 index 0000000000000..e540cd6606adf --- /dev/null +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -0,0 +1,316 @@ + +# Copyright (c) 2021-2025, PostgreSQL Global Development Group + +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; + +use Test::More; + +my $node; +my $blksize; + +# to get the split fast, we want tuples to be as large as possible, but the same time we don't want them to be toasted. +my $filler_size = 1900; + +# +# Test set-up +# +$node = PostgreSQL::Test::Cluster->new('test'); +$node->init(no_data_checksums => 1); +$node->append_conf('postgresql.conf', 'autovacuum=off'); +$node->start; +$blksize = int($node->safe_psql('postgres', 'SHOW block_size;')); +$node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); +$node->safe_psql( + 'postgres', q( + CREATE OR REPLACE FUNCTION random_string( INT ) RETURNS text AS $$ + SELECT string_agg(substring('0123456789abcdefghijklmnopqrstuvwxyz', ceil(random() * 36)::integer, 1), '') from generate_series(1, $1); + $$ LANGUAGE SQL;)); + +# Tests +invalid_entry_order_leaf_page_test(); +invalid_entry_order_inner_page_test(); +invalid_entry_columns_order_test(); +inconsistent_with_parent_key__parent_key_corrupted_test(); +inconsistent_with_parent_key__child_key_corrupted_test(); +inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test(); + +sub invalid_entry_order_leaf_page_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES ('{aaaaa,bbbbb}'); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # produce wrong order by replacing aaaaa with ccccc + string_replace_block( + $relpath, + 'aaaaa', + 'ccccc', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +sub invalid_entry_order_inner_page_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # to break the order in the inner page we need at least 3 items (rightmost key in the inner level is not checked for the order) + # so fill table until we have 2 splits + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'pppppppppp' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'qqqqqqqqqq' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'rrrrrrrrrr' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'ssssssssss' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'tttttttttt' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'uuuuuuuuuu' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'vvvvvvvvvv' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'wwwwwwwwww' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr.... + string_replace_block( + $relpath, + 'rrrrrrrrrr', + 'zzzzzzzzzz', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +sub invalid_entry_columns_order_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[],b text[]); + INSERT INTO $relname (a,b) VALUES ('{aaa}','{bbb}'); + CREATE INDEX $indexname ON $relname USING gin (a,b); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # mess column numbers + # root items order before: (1,aaa), (2,bbb) + # root items order after: (2,aaa), (1,bbb) + my $attrno_1 = pack('s', 1); + my $attrno_2 = pack('s', 2); + + my $find = qr/($attrno_1)(.)(aaa)/s; + my $replace = $attrno_2 . '$2$3'; + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $find = qr/($attrno_2)(.)(bbb)/s; + $replace = $attrno_1 . '$2$3'; + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +sub inconsistent_with_parent_key__parent_key_corrupted_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # fill the table until we have a split + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys + string_replace_block( + $relpath, + 'nnnnnnnnnn', + 'aaaaaaaaaa', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + like($stderr, qr/$expected/); +} + +sub inconsistent_with_parent_key__child_key_corrupted_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # fill the table until we have a split + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 3; # leaf + + # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger + string_replace_block( + $relpath, + 'nnnnnnnnnn', + 'pppppppppp', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + like($stderr, qr/$expected/); +} + +sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) select ('{aaaaa}') from generate_series(1,10000); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 2; # posting tree root + + # we have a posting tree for 'aaaaa' key with the root at 2nd block + # and two leaf pages 3 and 4. replace 4th page's high key with (1,1) + # so that there are tid's in leaf page that are larger then the new high key. + my $find = pack('S*', 0, 4, 0) . '....'; + my $replace = pack('S*', 0, 4, 0, 1, 1); + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; + like($stderr, qr/$expected/); +} + + +# Returns the filesystem path for the named relation. +sub relation_filepath +{ + my ($relname) = @_; + + my $pgdata = $node->data_dir; + my $rel = $node->safe_psql('postgres', + qq(SELECT pg_relation_filepath('$relname'))); + die "path not found for relation $relname" unless defined $rel; + return "$pgdata/$rel"; +} + +# substitute pattern 'find' with 'replace' within the block with number 'blkno' in the file 'filename' +sub string_replace_block +{ + my ($filename, $find, $replace, $blkno) = @_; + + my $fh; + open($fh, '+<', $filename) or BAIL_OUT("open failed: $!"); + binmode $fh; + + my $offset = $blkno * $blksize; + my $buffer; + + sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!"); + sysread($fh, $buffer, $blksize) or BAIL_OUT("read failed: $!"); + + $buffer =~ s/$find/'"' . $replace . '"'/gee; + + sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!"); + syswrite($fh, $buffer) or BAIL_OUT("write failed: $!"); + + close($fh) or BAIL_OUT("close failed: $!"); + + return; +} + +done_testing(); diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index b5f363562e32a..c615d950736f6 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -38,7 +38,6 @@ typedef struct GinScanItem int depth; IndexTuple parenttup; BlockNumber parentblk; - XLogRecPtr parentlsn; BlockNumber blkno; struct GinScanItem *next; } GinScanItem; @@ -346,7 +345,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting * Check if this tuple is consistent with the downlink in the * parent. */ - if (stack->parentblk != InvalidBlockNumber && i == maxoff && + if (i == maxoff && ItemPointerIsValid(&stack->parentkey) && ItemPointerCompare(&stack->parentkey, &posting_item->key) < 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -359,14 +358,10 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting ptr->depth = stack->depth + 1; /* - * Set rightmost parent key to invalid item pointer. Its value - * is 'Infinity' and not explicitly stored. + * The rightmost parent key is always invalid item pointer. + * Its value is 'Infinity' and not explicitly stored. */ - if (rightlink == InvalidBlockNumber) - ItemPointerSetInvalid(&ptr->parentkey); - else - ptr->parentkey = posting_item->key; - + ptr->parentkey = posting_item->key; ptr->parentblk = stack->blkno; ptr->blkno = BlockIdGetBlockNumber(&posting_item->child_blkno); ptr->next = stack->next; @@ -421,7 +416,6 @@ gin_check_parent_keys_consistency(Relation rel, stack->depth = 0; stack->parenttup = NULL; stack->parentblk = InvalidBlockNumber; - stack->parentlsn = InvalidXLogRecPtr; stack->blkno = GIN_ROOT_BLKNO; while (stack) @@ -432,7 +426,6 @@ gin_check_parent_keys_consistency(Relation rel, OffsetNumber i, maxoff, prev_attnum; - XLogRecPtr lsn; IndexTuple prev_tuple; BlockNumber rightlink; @@ -442,7 +435,6 @@ gin_check_parent_keys_consistency(Relation rel, RBM_NORMAL, strategy); LockBuffer(buffer, GIN_SHARE); page = (Page) BufferGetPage(buffer); - lsn = BufferGetLSNAtomic(buffer); maxoff = PageGetMaxOffsetNumber(page); rightlink = GinPageGetOpaque(page)->rightlink; @@ -463,17 +455,18 @@ gin_check_parent_keys_consistency(Relation rel, Datum parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); + OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); ItemId iid = PageGetItemIdCareful(rel, stack->blkno, page, maxoff); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); - OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple); + OffsetNumber page_max_key_attnum = gintuple_get_attrnum(&state, idxtuple); GinNullCategory page_max_key_category; Datum page_max_key = gintuple_get_key(&state, idxtuple, &page_max_key_category); if (rightlink != InvalidBlockNumber && - ginCompareEntries(&state, attnum, page_max_key, - page_max_key_category, parent_key, - parent_key_category) > 0) + ginCompareAttEntries(&state, page_max_key_attnum, page_max_key, + page_max_key_category, parent_key_attnum, + parent_key, parent_key_category) < 0) { /* split page detected, install right link to the stack */ GinScanItem *ptr; @@ -484,7 +477,6 @@ gin_check_parent_keys_consistency(Relation rel, ptr->depth = stack->depth; ptr->parenttup = CopyIndexTuple(stack->parenttup); ptr->parentblk = stack->parentblk; - ptr->parentlsn = stack->parentlsn; ptr->blkno = rightlink; ptr->next = stack->next; stack->next = ptr; @@ -513,9 +505,7 @@ gin_check_parent_keys_consistency(Relation rel, { ItemId iid = PageGetItemIdCareful(rel, stack->blkno, page, i); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); - OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple); - GinNullCategory prev_key_category; - Datum prev_key; + OffsetNumber current_attnum = gintuple_get_attrnum(&state, idxtuple); GinNullCategory current_key_category; Datum current_key; @@ -528,20 +518,24 @@ gin_check_parent_keys_consistency(Relation rel, current_key = gintuple_get_key(&state, idxtuple, ¤t_key_category); /* - * First block is metadata, skip order check. Also, never check - * for high key on rightmost page, as this key is not really - * stored explicitly. + * Compare the entry to the preceding one. + * + * Don't check for high key on the rightmost inner page, as this + * key is not really stored explicitly. * - * Also make sure to not compare entries for different attnums, - * which may be stored on the same page. + * The entries may be for different attributes, so make sure to + * use ginCompareAttEntries for comparison. */ - if (i != FirstOffsetNumber && attnum == prev_attnum && stack->blkno != GIN_ROOT_BLKNO && - !(i == maxoff && rightlink == InvalidBlockNumber)) + if ((i != FirstOffsetNumber) && + !(i == maxoff && rightlink == InvalidBlockNumber && !GinPageIsLeaf(page))) { + Datum prev_key; + GinNullCategory prev_key_category; + prev_key = gintuple_get_key(&state, prev_tuple, &prev_key_category); - if (ginCompareEntries(&state, attnum, prev_key, - prev_key_category, current_key, - current_key_category) >= 0) + if (ginCompareAttEntries(&state, prev_attnum, prev_key, + prev_key_category, current_attnum, + current_key, current_key_category) >= 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" has wrong tuple order on entry tree page, block %u, offset %u, rightlink %u", @@ -556,13 +550,14 @@ gin_check_parent_keys_consistency(Relation rel, i == maxoff) { GinNullCategory parent_key_category; + OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); Datum parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); - if (ginCompareEntries(&state, attnum, current_key, - current_key_category, parent_key, - parent_key_category) > 0) + if (ginCompareAttEntries(&state, current_attnum, current_key, + current_key_category, parent_key_attnum, + parent_key, parent_key_category) > 0) { /* * There was a discrepancy between parent and child @@ -581,6 +576,7 @@ gin_check_parent_keys_consistency(Relation rel, stack->blkno, stack->parentblk); else { + parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); @@ -589,9 +585,9 @@ gin_check_parent_keys_consistency(Relation rel, * Check if it is properly adjusted. If succeed, * proceed to the next key. */ - if (ginCompareEntries(&state, attnum, current_key, - current_key_category, parent_key, - parent_key_category) > 0) + if (ginCompareAttEntries(&state, current_attnum, current_key, + current_key_category, parent_key_attnum, + parent_key, parent_key_category) > 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" has inconsistent records on page %u offset %u", @@ -608,13 +604,12 @@ gin_check_parent_keys_consistency(Relation rel, ptr = (GinScanItem *) palloc(sizeof(GinScanItem)); ptr->depth = stack->depth + 1; /* last tuple in layer has no high key */ - if (i != maxoff && !GinPageGetOpaque(page)->rightlink) - ptr->parenttup = CopyIndexTuple(idxtuple); - else + if (i == maxoff && rightlink == InvalidBlockNumber) ptr->parenttup = NULL; + else + ptr->parenttup = CopyIndexTuple(idxtuple); ptr->parentblk = stack->blkno; ptr->blkno = GinGetDownlink(idxtuple); - ptr->parentlsn = lsn; ptr->next = stack->next; stack->next = ptr; } @@ -644,7 +639,7 @@ gin_check_parent_keys_consistency(Relation rel, } prev_tuple = CopyIndexTuple(idxtuple); - prev_attnum = attnum; + prev_attnum = current_attnum; } LockBuffer(buffer, GIN_UNLOCK); @@ -749,7 +744,7 @@ gin_refind_parent(Relation rel, BlockNumber parentblkno, ItemId p_iid = PageGetItemIdCareful(rel, parentblkno, parentpage, o); IndexTuple itup = (IndexTuple) PageGetItem(parentpage, p_iid); - if (ItemPointerGetBlockNumber(&(itup->t_tid)) == childblkno) + if (GinGetDownlink(itup) == childblkno) { /* Found it! Make copy and return it */ result = CopyIndexTuple(itup); diff --git a/contrib/test_decoding/expected/invalidation_distribution.out b/contrib/test_decoding/expected/invalidation_distribution.out index ad0a944cbf303..ae53b1e61de3e 100644 --- a/contrib/test_decoding/expected/invalidation_distribution.out +++ b/contrib/test_decoding/expected/invalidation_distribution.out @@ -1,4 +1,4 @@ -Parsed test spec with 2 sessions +Parsed test spec with 3 sessions starting permutation: s1_insert_tbl1 s1_begin s1_insert_tbl1 s2_alter_pub_add_tbl s1_commit s1_insert_tbl1 s2_get_binary_changes step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); @@ -18,3 +18,24 @@ count stop (1 row) + +starting permutation: s1_begin s1_insert_tbl1 s3_begin s3_insert_tbl1 s2_alter_pub_add_tbl s1_insert_tbl1 s1_commit s3_commit s2_get_binary_changes +step s1_begin: BEGIN; +step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); +step s3_begin: BEGIN; +step s3_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (2, 2); +step s2_alter_pub_add_tbl: ALTER PUBLICATION pub ADD TABLE tbl1; +step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); +step s1_commit: COMMIT; +step s3_commit: COMMIT; +step s2_get_binary_changes: SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; +count +----- + 1 +(1 row) + +?column? +-------- +stop +(1 row) + diff --git a/contrib/test_decoding/specs/invalidation_distribution.spec b/contrib/test_decoding/specs/invalidation_distribution.spec index decbed627e327..67d41969ac1d6 100644 --- a/contrib/test_decoding/specs/invalidation_distribution.spec +++ b/contrib/test_decoding/specs/invalidation_distribution.spec @@ -28,5 +28,16 @@ setup { SET synchronous_commit=on; } step "s2_alter_pub_add_tbl" { ALTER PUBLICATION pub ADD TABLE tbl1; } step "s2_get_binary_changes" { SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; } +session "s3" +setup { SET synchronous_commit=on; } +step "s3_begin" { BEGIN; } +step "s3_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (2, 2); } +step "s3_commit" { COMMIT; } + # Expect to get one insert change. LOGICAL_REP_MSG_INSERT = 'I' permutation "s1_insert_tbl1" "s1_begin" "s1_insert_tbl1" "s2_alter_pub_add_tbl" "s1_commit" "s1_insert_tbl1" "s2_get_binary_changes" + +# Expect to get one insert change with LOGICAL_REP_MSG_INSERT = 'I' from +# the second "s1_insert_tbl1" executed after adding the table tbl1 to the +# publication in "s2_alter_pub_add_tbl". +permutation "s1_begin" "s1_insert_tbl1" "s3_begin" "s3_insert_tbl1" "s2_alter_pub_add_tbl" "s1_insert_tbl1" "s1_commit" "s3_commit" "s2_get_binary_changes" diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 021153b2a5f27..b265cc89c9d46 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2363,7 +2363,7 @@ include_dir 'conf.d' - + file_copy_method (enum) file_copy_method configuration parameter @@ -2779,6 +2779,7 @@ include_dir 'conf.d' + The default is worker. This parameter can only be set at server start. @@ -2893,7 +2894,8 @@ include_dir 'conf.d' Sets the maximum number of parallel workers that can be started by a single utility command. Currently, the parallel utility commands that support the use of parallel workers are - CREATE INDEX when building a B-tree or BRIN index, + CREATE INDEX when building a B-tree, + GIN, or BRIN index, and VACUUM without FULL option. Parallel workers are taken from the pool of processes established by , limited @@ -5764,7 +5766,7 @@ ANY num_sync ( + enable_self_join_elimination (boolean) enable_self_join_elimination configuration parameter diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index c67688cbf5f98..a6d79765c1a73 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1824,13 +1824,23 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in which operand falls in a histogram having count equal-width buckets spanning the range low to high. - Returns 0 + The buckets have inclusive lower bounds and exclusive upper bounds. + Returns 0 for an input less + than low, or count+1 for an input - outside that range. + greater than or equal to high. + If low > high, + the behavior is mirror-reversed, with bucket 1 + now being the one just below low, and the + inclusive bounds now being on the upper side. width_bucket(5.35, 0.024, 10.06, 5) 3 + + + width_bucket(9, 10, 0, 10) + 2 @@ -1842,8 +1852,8 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in Returns the number of the bucket in which operand falls given an array listing the - lower bounds of the buckets. Returns 0 for an - input less than the first lower + inclusive lower bounds of the buckets. + Returns 0 for an input less than the first lower bound. operand and the array elements can be of any type having standard comparison operators. The thresholds array must be @@ -29698,7 +29708,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset - + pg_logical_slot_get_binary_changes @@ -29970,7 +29980,9 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset standby server. Temporary synced slots, if any, cannot be used for logical decoding and must be dropped after promotion. See for details. - Note that this function cannot be executed if + Note that this function is primarily intended for testing and + debugging purposes and should be used with caution. Additionaly, + this function cannot be executed if sync_replication_slots is enabled and the slotsync worker is already running to perform the synchronization of slots. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 695fe958c3ed3..298c4b38ef90a 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,6 +1,6 @@ - + <application>libpq</application> — C Library @@ -2168,6 +2168,24 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname + + ssl_max_protocol_version + + + This parameter specifies the maximum SSL/TLS protocol version to allow + for the connection. Valid values are TLSv1, + TLSv1.1, TLSv1.2 and + TLSv1.3. The supported protocols depend on the + version of OpenSSL used, older versions + not supporting the most modern protocol versions. If not set, this + parameter is ignored and the connection will use the maximum bound + defined by the backend, if set. Setting the maximum protocol version + is mainly useful for testing or if some component has issues working + with a newer protocol. + + + + min_protocol_version @@ -2216,24 +2234,6 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname - - ssl_max_protocol_version - - - This parameter specifies the maximum SSL/TLS protocol version to allow - for the connection. Valid values are TLSv1, - TLSv1.1, TLSv1.2 and - TLSv1.3. The supported protocols depend on the - version of OpenSSL used, older versions - not supporting the most modern protocol versions. If not set, this - parameter is ignored and the connection will use the maximum bound - defined by the backend, if set. Setting the maximum protocol version - is mainly useful for testing or if some component has issues working - with a newer protocol. - - - - krbsrvname diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index 686dd441d0223..c32e6bc000d4d 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -2413,7 +2413,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Prepare for publisher upgrades + Prepare for Publisher Upgrades pg_upgrade attempts to migrate logical @@ -2485,7 +2485,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Prepare for subscriber upgrades + Prepare for Subscriber Upgrades Setup the @@ -2535,7 +2535,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Upgrading logical replication clusters + Upgrading Logical Replication Clusters While upgrading a subscriber, write operations can be performed in the @@ -2599,7 +2599,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Steps to upgrade a two-node logical replication cluster + Steps to Upgrade a Two-node Logical Replication Cluster Let's say publisher is in node1 and subscriber is in node2. The subscriber node2 has @@ -2743,7 +2743,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile - Steps to upgrade a cascaded logical replication cluster + Steps to Upgrade a Cascaded Logical Replication Cluster Let's say we have a cascaded logical replication setup node1->node2->node3. @@ -2972,7 +2972,7 @@ pg_ctl -D /opt/PostgreSQL/data3_upgraded start -l logfile - Steps to upgrade a two-node circular logical replication cluster + Steps to Upgrade a Two-node Circular Logical Replication Cluster Let's say we have a circular logical replication setup node1->node2 and diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index dd9e83b08eaf1..5c5957e0d37a1 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -370,10 +370,10 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU pg_create_logical_replication_slot, or by using the failover option of - CREATE SUBSCRIPTION during slot creation, and then calling - - pg_sync_replication_slots - on the standby. By setting + CREATE SUBSCRIPTION during slot creation. + Additionally, enabling + sync_replication_slots on the standby + is required. By enabling sync_replication_slots on the standby, the failover slots can be synchronized periodically in the slotsync worker. For the synchronization to work, it is mandatory to @@ -398,6 +398,52 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU receiving the WAL up to the latest flushed position on the primary server. + + + While enabling + sync_replication_slots allows for automatic + periodic synchronization of failover slots, they can also be manually + synchronized using the + pg_sync_replication_slots function on the standby. + However, this function is primarily intended for testing and debugging and + should be used with caution. Unlike automatic synchronization, it does not + include cyclic retries, making it more prone to synchronization failures, + particularly during initial sync scenarios where the required WAL files + or catalog rows for the slot may have already been removed or are at risk + of being removed on the standby. In contrast, automatic synchronization + via sync_replication_slots provides continuous slot + updates, enabling seamless failover and supporting high availability. + Therefore, it is the recommended method for synchronizing slots. + + + + + When slot synchronization is configured as recommended, + and the initial synchronization is performed either automatically or + manually via pg_sync_replication_slot, the standby can persist the + synchronized slot only if the following condition is met: The logical + replication slot on the primary must retain WALs and system catalog + rows that are still available on the standby. This ensures data + integrity and allows logical replication to continue smoothly after + promotion. + If the required WALs or catalog rows have already been purged from the + standby, the slot will not be persisted to avoid data loss. In such + cases, the following log message may appear: + + LOG: could not synchronize replication slot "failover_slot" + DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/3003F28 and catalog xmin 754, but the standby has LSN 0/3003F28 and catalog xmin 756 + + If the logical replication slot is actively used by a consumer, no + manual intervention is needed; the slot will advance automatically, + and synchronization will resume in the next cycle. However, if no + consumer is configured, it is advisable to manually advance the slot + on the primary using + pg_logical_slot_get_changes or + + pg_logical_slot_get_binary_changes, + allowing synchronization to proceed. + + The ability to resume logical replication after failover depends upon the pg_replication_slots.synced diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index bee817ea822a2..cb065bf5f88db 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -1,6 +1,6 @@ - + PL/Python — Python Procedural Language PL/Python diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index c4d3853cbf2c2..137ffc8d0b7eb 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -189,7 +189,7 @@ - Protocol versions + Protocol Versions The current, latest version of the protocol is version 3.2. However, for @@ -226,7 +226,7 @@ - Protocol versions + Protocol Versions diff --git a/doc/src/sgml/ref/alter_database.sgml b/doc/src/sgml/ref/alter_database.sgml index 9da8920e12eff..1fc051e11a311 100644 --- a/doc/src/sgml/ref/alter_database.sgml +++ b/doc/src/sgml/ref/alter_database.sgml @@ -83,7 +83,7 @@ ALTER DATABASE name RESET ALL must be empty for this database, and no one can be connected to the database. Tables and indexes in non-default tablespaces are unaffected. The method used to copy files to the new tablespace - is affected by the setting. + is affected by the setting. diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index 640c0425faec5..4da8aeebb50a2 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -140,7 +140,7 @@ CREATE DATABASE name after the creation of the new database. In some situations, this may have a noticeable negative impact on overall system performance. The FILE_COPY strategy is affected by the setting. + linkend="guc-file-copy-method"/> setting. diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml index d08834ac9d291..009fa46532bbe 100644 --- a/doc/src/sgml/ref/create_foreign_table.sgml +++ b/doc/src/sgml/ref/create_foreign_table.sgml @@ -232,7 +232,7 @@ WITH ( MODULUS numeric_literal, REM INCLUDING COMMENTS - Comments for the copied columns, constraints, and indexes will be + Comments for the copied columns and constraints will be copied. The default behavior is to exclude comments, resulting in the copied columns and constraints in the new table having no comments. diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 147a8f7587c71..b9c679c41e8db 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -814,7 +814,7 @@ Indexes: leveraging multiple CPUs in order to process the table rows faster. This feature is known as parallel index build. For index methods that support building indexes - in parallel (currently, B-tree and BRIN), + in parallel (currently, B-tree, GIN, and BRIN), maintenance_work_mem specifies the maximum amount of memory that can be used by each index build operation as a whole, regardless of how many worker processes were started. diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 4a41b2f553007..a581691818278 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -447,11 +447,6 @@ WITH ( MODULUS numeric_literal, REM the values in the new row, an error will be reported. - - Partitioned tables do not support EXCLUDE constraints; - however, you can define these constraints on individual partitions. - - See for more discussion on table partitioning. @@ -1162,6 +1157,18 @@ WITH ( MODULUS numeric_literal, REM exclusion constraint on a subset of the table; internally this creates a partial index. Note that parentheses are required around the predicate. + + + When establishing an exclusion constraint for a multi-level partition + hierarchy, all the columns in the partition key of the target + partitioned table, as well as those of all its descendant partitioned + tables, must be included in the constraint definition. Additionally, + those columns must be compared using the equality operator. These + restrictions ensure that potentially-conflicting rows will exist in the + same partition. The constraint may also refer to other columns which + are not a part of any partition key, which can be compared using any + appropriate operator. + diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index d7595a7e5468d..0d9270116549a 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -18,7 +18,7 @@ PostgreSQL documentation pg_dump - extract a PostgreSQL database into a script file or other archive file + export a PostgreSQL database as an SQL script or to other formats @@ -1277,8 +1277,8 @@ PostgreSQL documentation The data section contains actual table data, large-object - contents, statistics for tables and materialized views and - sequence values. + contents, sequence values, and statistics for tables, + materialized views, and foriegn tables. Post-data items include definitions of indexes, triggers, rules, statistics for indexes, and constraints other than validated check constraints. @@ -1359,7 +1359,8 @@ PostgreSQL documentation Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, and indexes are dumped. + Statistics for tables, materialized views, foreign tables, + and indexes are dumped. diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 723a466cfaad6..8ca68da5a5560 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -16,7 +16,10 @@ PostgreSQL documentation pg_dumpall - extract a PostgreSQL database cluster using a specified dump format + + + export a PostgreSQL database cluster as an SQL script or to other formats + @@ -33,7 +36,7 @@ PostgreSQL documentation pg_dumpall is a utility for writing out (dumping) all PostgreSQL databases - of a cluster into an archive. The archive contains + of a cluster into an SQL script file or an archive. The output contains SQL commands that can be used as input to to restore the databases. It does this by calling for each database in the cluster. @@ -690,7 +693,8 @@ exclude database PATTERN Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, and indexes are dumped. + Statistics for tables, materialized views, foreign tables, + and indexes are dumped. diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 8c88b07dcc865..b649bd3a5ae0f 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -18,8 +18,8 @@ PostgreSQL documentation pg_restore - restore a PostgreSQL database or cluster - from an archive created by pg_dump or + restore PostgreSQL databases from archives + created by pg_dump or pg_dumpall diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 15e3f3a849226..570ef21d1fce3 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1101,7 +1101,7 @@ SELECT $1 \parse stmt1 Outputs information about the current database connection, - including TLS-related information if TLS is in use. + including SSL-related information if SSL is in use. Note that the Client User field shows diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index c5e60f88fdf15..662c7d8890f9b 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -6,7 +6,7 @@ Release date: - 2025-??-??, CURRENT AS OF 2025-05-23 + 2025-??-??, CURRENT AS OF 2025-06-20 @@ -58,68 +58,80 @@ Author: Peter Eisentraut 2024-10-16 [04bec894a04] initdb: Change default to using data checksums. --> - - -Change initdb default to enable data checksums -§ - + + + Change default to enable data checksums + (Greg Sabino Mullane) + § + - -Checksums can be disabled with the new initdb option --no-data-checksums. -pg_upgrade requires matching cluster checksum settings, so this new -option can be useful to upgrade non-checksum old clusters. - - + + Checksums can be disabled with the + new initdb option + . + requires matching cluster checksum settings, so this new option can + be useful to upgrade non-checksum old clusters. + + - - -Change time zone abbreviation handling (Tom Lane) -§ - + + + Change time zone abbreviation handling (Tom Lane) + § + - -The system will now favor the current session's time zone abbreviations before checking the server variable timezone_abbreviations. Previously timezone_abbreviations was -checked first. - - + + The system will now favor the current session's time + zone abbreviations before checking the server variable + . Previously + timezone_abbreviations was checked first. + + - - -Deprecate MD5 password authentication (Nathan Bossart) -§ - + + + Deprecate MD5 password + authentication (Nathan Bossart) + § + - -Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. -These warnings can be disabled by setting the md5_password_warnings parameter to "off". - - + + Support for MD5 passwords will be removed in a future major + version release. and now emit deprecation warnings when + setting MD5 passwords. These warnings can be disabled by setting + the parameter to + off. + + - - -Change VACUUM and ANALYZE to process the inheritance children of a parent (Michael Harris) -§ - + + + Change and + to process the inheritance children of a parent (Michael Harris) + § + - -The previous behavior can be performed by using the new ONLY option. - - + + The previous behavior can be performed by using the new + ONLY option. + + - - -Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) -§ -§ - + + + Prevent COPY FROM + from treating \. as an end-of-file marker when + reading CSV files (Daniel Vérité, Tom Lane) + § + § + - -psql will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to Postgres 18 servers might experience \copy problems. This -release also enforces that \. must appear alone on a line. - - + + will still treat + \. as an end-of-file marker when reading + CSV files from STDIN. + Older psql clients connecting to + PostgreSQL 18 servers might experience \copy + problems. This release also enforces that \. + must appear alone on a line. + + - - -Disallow unlogged partitioned tables (Michael Paquier) -§ - + + + Disallow unlogged partitioned tables (Michael Paquier) + § + - -Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. - - + + Previously ALTER TABLE SET + [UN]LOGGED did nothing, and the creation of an + unlogged partitioned table did not cause its children to be unlogged. + + - - -Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe) - + + + Execute AFTER triggers as the role that was active when + trigger events were queued (Laurenz Albe) + § + - -Previously such triggers were run as the role that was active at trigger execution time (e.g., at COMMIT). This is significant for cases where the role is changed between queue time and -transaction commit. - - + + Previously such triggers were run as the role that was active at + trigger execution time (e.g., at ). + This is significant for cases where the role is changed between queue + time and transaction commit. + + - - -Remove non-functional support for RULE privileges in GRANT/REVOKE (Fujii Masao) -§ - + + + Remove non-functional support for rule privileges in / (Fujii Masao) + § + - -These have been non-functional since Postgres 8.2. - - + + These have been non-functional since + PostgreSQL 8.2. + + - - -Remove column pg_backend_memory_contexts.parent (Melih Mutlu) -§ - + + + Remove column pg_backend_memory_contexts.parent + (Melih Mutlu) + § + - -This is now longer needed since pg_backend_memory_contexts.path was added. - - + + This is no longer needed since + pg_backend_memory_contexts.path + was added. + + - - -Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao) -§ -§ -§ - + + + Change + pg_backend_memory_contexts.level + and pg_log_backend_memory_contexts() + to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, + Fujii Masao) + § + § + § + - -These were previously zero-based. - - + + These were previously zero-based. + + @@ -254,40 +292,48 @@ Author: Alexander Korotkov 2025-02-17 [fc069a3a6] Implement Self-Join Elimination --> - - -Remove some unnecessary table self-joins (Andrey Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) -§ - + + + Automatically remove some unnecessary table self-joins (Andrey + Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) + § + - -This optimization can be disabled using server variable enable_self_join_elimination. - - + + This optimization can be disabled using server variable . + + - - -Convert some 'IN (VALUES ...)' to 'x = ANY ...' for better optimizer statistics (Alena Rybakina, Andrei Lepikhov) -§ - - + + + Convert some IN (VALUES + ...) to x = ANY ... for better + optimizer statistics (Alena Rybakina, Andrei Lepikhov) + § + + - - -Allow transforming OR-clauses to arrays for faster index processing (Alexander Korotkov, Andrey Lepikhov) -§ - - + + + Allow transforming OR-clauses + to arrays for faster index processing (Alexander Korotkov, Andrey + Lepikhov) + § + + - - -Speed up the processing of INTERSECT, EXCEPT, window aggregates, and view column aliases (Tom Lane, David Rowley) -§ -§ -§ -§ - - + + + Speed up the processing of INTERSECT, + EXCEPT, window aggregates, and view column aliases (Tom Lane, + David Rowley) + § + § + § + § + + - - -Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) -§ - + + + Allow the keys of SELECT + DISTINCT to be internally reordered to avoid sorting + (Richard Guo) + § + - -This optimization can be disabled using enable_distinct_reordering. - - + + This optimization can be disabled using . + + - - -Ignore GROUP BY columns that are functionally dependent on other columns (Zhang Mingli, Jian He, David Rowley) -§ - + + + Ignore GROUP BY + columns that are functionally dependent on other columns (Zhang + Mingli, Jian He, David Rowley) + § + - -If a GROUP BY clause includes all columns of a unique index, as well as other columns of the same table, those other columns are redundant and can be dropped -from the grouping. This was already true for non-deferred primary keys. - - + + If a GROUP BY clause includes all columns of + a unique index, as well as other columns of the same table, those + other columns are redundant and can be dropped from the grouping. + This was already true for non-deferred primary keys. + + - - -Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo) -§ -§ -§ -§ - - - -This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results. - - + + + Allow some HAVING clauses + on GROUPING + SETS to be pushed to WHERE clauses + (Richard Guo) + § + § + § + § + + + + This allows earlier row filtering. This release also fixes some + GROUPING SETS queries that used to return + incorrect results. + + - - -Improve row estimates for generate_series() using numeric and timestamp values (David Rowley, Song Jinzhou) -§ -§ - - + + + Improve row estimates for generate_series() + using numeric + and timestamp + values (David Rowley, Song Jinzhou) + § + § + + - - -Allow the optimizer to use "Right Semi Join" plans (Richard Guo) -§ - + + + Allow the optimizer to use Right Semi Join plans + (Richard Guo) + § + - -Semi-joins are used when needing to find if there is at least one match. - - + + Semi-joins are used when needing to find if there is at least + one match. + + - - -Allow merge joins to use incremental sorts (Richard Guo) -§ - - + + + Allow merge joins to use incremental sorts + (Richard Guo) + § + + - - -Improve the efficiency of planning queries accessing many partitions (Ashutosh Bapat, Yuya Watari, David Rowley) -§ -§ - - + + + Improve the efficiency of planning queries accessing many partitions + (Ashutosh Bapat, Yuya Watari, David Rowley) + § + § + + - - -Allow partitionwise joins in more cases, and reduce its memory usage (Richard Guo, Tom Lane, Ashutosh Bapat) -§ -§ - - + + + Allow partitionwise + joins in more cases, and reduce its memory usage (Richard Guo, + Tom Lane, Ashutosh Bapat) + § + § + + - - -Improve cost estimates of partition queries (Nikita Malakhov, Andrei Lepikhov) -§ - - + + + Improve cost estimates of partition queries (Nikita Malakhov, + Andrei Lepikhov) + § + + - - -Improve SQL-language function plan caching (Alexander Pyhalov, Tom Lane) -§ -§ - - + + + Improve SQL-language + function plan caching (Alexander Pyhalov, Tom Lane) + § + § + + - - -Improve handling of disabled optimizer features (Robert Haas) -§ - - + + + Improve handling of disabled optimizer features (Robert Haas) + § + + @@ -496,18 +574,19 @@ Author: Peter Geoghegan 2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons. --> - - -Allow skip scans of btree indexes (Peter Geoghegan) -§ -§ - + + + Allow skip scans of btree indexes + (Peter Geoghegan) + § + § + - -This allows multi-column btree indexes to be used by queries that only -equality-reference the second or later indexed columns. - - + + This allows multi-column btree indexes to be used by queries that + only equality-reference the second or later indexed columns. + + - - -Allow non-btree unique indexes to be used as partition keys and in materialized views (Mark Dilger) -§ -§ - + + + Allow non-btree unique indexes to be used as partition keys and in + materialized views (Mark Dilger) + § + § + - -The index type must still support equality. - - + + The index type must still support equality. + + - - -Allow GIN indexes to be created in parallel (Tomas Vondra, Matthias van de Meent) -§ - - + + + Allow GIN indexes to + be created in parallel (Tomas Vondra, Matthias van de Meent) + § + + - - -Allow values to be sorted to speed rangetype GiST and btree index builds (Bernd Helmle) -§ - - + + + Allow values to be sorted to speed range-type GiST and btree + index builds (Bernd Helmle) + § + + @@ -586,41 +669,51 @@ Author: Andres Freund 2025-03-30 [2a5e709e7] Enable IO concurrency on all systems --> - - -Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavuz, Melanie Plageman) -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ - - - -This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. -This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables -effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used -for asynchronous I/O. - - + + + Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, + Nazir Bilal Yavuz, Melanie Plageman) + § + § + § + § + § + § + § + § + § + § + § + + + + This feature allows backends to queue multiple read requests, + which allows for more efficient sequential scans, bitmap + heap scans, vacuums, etc. This is enabled by server + variable , with server + variables and added to control it. + This also enables + and + values greater than zero for systems without + fadvise() support. The new system view pg_aios + shows the file handles being used for asynchronous I/O. + + - - -Improve the locking performance of queries that access many relations (Tomas Vondra) -§ - - + + + Improve the locking performance of queries that access many relations + (Tomas Vondra) + § + + - - -Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis) -§ -§ -§ -§ -§ - + + + Improve the performance and reduce memory usage of hash joins and + GROUP BY + (David Rowley, Jeff Davis) + § + § + § + § + § + - -This also improves hash set operations used by EXCEPT, and hash lookups of subplan values. - - + + This also improves hash set operations used by EXCEPT, and hash + lookups of subplan values. + + - - -Allow normal vacuums to freeze some pages, even though they are all-visible (Melanie Plageman) -§ -§ - + + + Allow normal vacuums to freeze some pages, even though they are + all-visible (Melanie Plageman) + § + § + - -This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting vacuum_max_eager_freeze_failure_rate. -Previously vacuum never processed all-visible pages until freezing was required. - - + + This reduces the overhead of later full-relation + freezing. The aggressiveness of this can be + controlled by server variable and per-table setting . Previously + vacuum never processed all-visible pages until freezing was required. + + - - -Add server variable vacuum_truncate to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) -§ - + + + Add server variable to control + file truncation during (Nathan Bossart, + Gurjeet Singh) + § + - -A storage-level parameter with the same name and behavior already existed. - - + + A storage-level parameter with the same name and behavior already + existed. + + - - -Increase server variables effective_io_concurrency's and maintenance_io_concurrency's default values to 16 (Melanie Plageman) -§ -§ - + + + Increase server variables 's and 's default values to 16 + (Melanie Plageman) + § + § + - -This more accurately reflects modern hardware. - - + + This more accurately reflects modern hardware. + + @@ -719,72 +826,90 @@ Author: Melanie Plageman 2025-03-12 [9219093ca] Modularize log_connections output --> - - -Increase the logging granularity of server variable log_connections (Melanie Plageman) -§ - + + + Increase the logging granularity of server variable (Melanie Plageman) + § + - -This server variable was previously only boolean; these options are still supported. - - + + This server variable was previously only boolean, which is still + supported. + + - - -Add log_connections option to report the duration of connection stages (Melanie Plageman) -§ - - + + + Add log_connections option to report the duration + of connection stages (Melanie Plageman) + § + + - - -Add log_line_prefix escape "%L" to output the client IP address (Greg Sabino Mullane) -§ - - + + + Add escape + %L to output the client IP + address (Greg Sabino Mullane) + § + + - - -Add server variable log_lock_failures to log lock acquisition failures (Yuki Seino) -§ - + + + Add server variable to log + lock acquisition failures (Yuki Seino, Fujii Masao) + § + § + - -Specifically it reports SELECT ... NOWAIT lock failures. - - + + Specifically it reports SELECT + ... NOWAIT lock failures. + + - - -Modify pg_stat_all_tables and its variants to report the time spent in vacuum, analyze, and their automatic variants (Sami Imseih) -§ - + + + Modify pg_stat_all_tables + and its variants to report the time spent in , , and their + automatic variants (Sami Imseih) + § + - -The new columns are total_vacuum_time, total_autovacuum_time, total_analyze_time, and total_autoanalyze_time. - - + + The new columns are total_vacuum_time, + total_autovacuum_time, + total_analyze_time, and + total_autoanalyze_time. + + - - -Add delay time reporting to VACUUM and ANALYZE (Bertrand Drouvot, Nathan Bossart) -§ -§ - + + + Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) + § + § + - -This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE -mode; tracking must be enabled with the server variable track_cost_delay_timing. - - + + This information appears in the server log, the system views pg_stat_progress_vacuum + and pg_stat_progress_analyze, + and the output of and when in VERBOSE + mode; tracking must be enabled with the server variable . + + - - -Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy) -§ -§ - - + + + Add WAL, CPU, and average + read statistics output to ANALYZE VERBOSE + (Anthonin Bonnefoy) + § + § + + - - -Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot) -§ - - + + + Add full WAL buffer count to + VACUUM/ANALYZE (VERBOSE) + and autovacuum log output (Bertrand Drouvot) + § + + - - -Add per-backend I/O statistics reporting (Bertrand Drouvot) -§ -§ - + + + Add per-backend I/O statistics reporting (Bertrand Drouvot) + § + § + - -The statistics are accessed via pg_stat_get_backend_io(). Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). - - + + The statistics are accessed via pg_stat_get_backend_io(). + Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). + + - - -Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) -§ - + + + Add pg_stat_io + columns to report I/O activity in bytes (Nazir Bilal Yavuz) + § + - -The new columns are read_bytes, write_bytes, and extend_bytes. The op_bytes column, which always equaled BLCKSZ, has been removed. - - + + The new columns are read_bytes, + write_bytes, and + extend_bytes. The + op_bytes column, which always equaled + BLCKSZ, + has been removed. + + - - -Add WAL I/O activity rows to pg_stat_io (Nazir Bilal Yavuz, Bertrand Drouvot, Michael Paquier) -§ -§ -§ - + + + Add WAL I/O activity rows to + pg_stat_io (Nazir Bilal Yavuz, Bertrand + Drouvot, Michael Paquier) + § + § + § + - -This includes WAL receiver activity and a wait event for such writes. - + + This includes WAL receiver activity and a wait + event for such writes. + - + - - -Change server variable track_wal_io_timing to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) -§ - - + + + Change server variable + to control tracking WAL timing + in pg_stat_io instead of pg_stat_wal + (Bertrand Drouvot) + § + + - - -Remove read/sync columns from pg_stat_wal (Bertrand Drouvot) -§ -§ - + + + Remove read/sync columns from pg_stat_wal + (Bertrand Drouvot) + § + § + - -This removes columns wal_write, wal_sync, wal_write_time, and wal_sync_time. - - + + This removes columns wal_write, + wal_sync, + wal_write_time, and + wal_sync_time. + + - - -Add function pg_stat_get_backend_wal() to return per-backend WAL statistics (Bertrand Drouvot) -§ - + + + Add function pg_stat_get_backend_wal() + to return per-backend WAL statistics (Bertrand + Drouvot) + § + - -Per-backend WAL statistics can be cleared via pg_stat_reset_backend_stats(). - - + + Per-backend WAL + statistics can be cleared via pg_stat_reset_backend_stats(). + + - - -Add function pg_ls_summariesdir() to specifically list the contents of PGDATA/pg_wal/summaries (Yushi Ogiwara) -§ - - + + + Add function pg_ls_summariesdir() + to specifically list the contents of PGDATA/pg_wal/summaries + (Yushi Ogiwara) + § + + - - -Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) -§ - + + + Add column pg_stat_checkpointer.num_done + to report the number of completed checkpoints (Anton A. Melnikov) + § + - -Columns num_timed and num_requested count both completed and skipped checkpoints. - - + + Columns num_timed and + num_requested count both completed and + skipped checkpoints. + + - - -Add column pg_stat_checkpointer.slru_written to report SLRU buffers written (Nitin Jadhav) -§ - + + + Add column + pg_stat_checkpointer.slru_written + to report SLRU buffers written (Nitin Jadhav) + § + - -Also, modify the checkpoint server log message to report separate shared buffer and SLRU buffer values. - - + + Also, modify the checkpoint server log message to report separate + shared buffer and SLRU buffer values. + + - - -Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau) -§ - + + + Add columns to pg_stat_database + to report parallel worker activity (Benoit Lobréau) + § + - -The new columns are parallel_workers_to_launch and parallel_workers_launched. - - + + The new columns are + parallel_workers_to_launch and + parallel_workers_launched. + + - - -Have query jumbling of arrays consider only the first and last array elements (Dmitry Dolgov, Sami Imseih) -§ -§ - + + + Have query id computation + of arrays consider only the first and last array elements (Dmitry + Dolgov, Sami Imseih) + § + § + - -Jumbling is used by pg_stat_statements. - - + + Jumbling is used by . + + - - -Adjust query jumbling to group together queries using the same relation name (Michael Paquier, Sami Imseih) -§ - + + + Adjust query id computations to group together queries using the + same relation name (Michael Paquier, Sami Imseih) + § + - -This is true even if the tables in different schemas have different column names. - - + + This is true even if the tables in different schemas have different + column names. + + - - -Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) -§ - - + + + Add column pg_backend_memory_contexts.type + to report the type of memory context (David Rowley) + § + + - - -Add column pg_backend_memory_contexts.path to show memory context parents (Melih Mutlu) -§ - - + + + Add column + pg_backend_memory_contexts.path + to show memory context parents (Melih Mutlu) + § + + @@ -1073,53 +1259,61 @@ Author: Michael Paquier 2024-07-10 [d898665bf] Extend pg_get_acl() to handle sub-object IDs --> - - -Add function pg_get_acl() to retrieve database access control details (Joel Jacobson) -§ -§ - - + + + Add function pg_get_acl() + to retrieve database access control details (Joel Jacobson) + § + § + + - - -Add function has_largeobject_privilege() to check large object privileges (Yugo Nagata) -§ - - + + + Add function has_largeobject_privilege() + to check large object privileges (Yugo Nagata) + § + + - - -Allow ALTER DEFAULT PRIVILEGES to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) -§ - - + + + Allow to define + large object default privileges (Takatsuka Haruka, Yugo Nagata, + Laurenz Albe) + § + + - - -Add predefined role pg_signal_autovacuum_worker (Kirill Reshke) -§ - + + + Add predefined role pg_signal_autovacuum_worker + (Kirill Reshke) + § + - -This allows sending signals to autovacuum workers. - - + + This allows sending signals to autovacuum workers. + + @@ -1135,68 +1329,69 @@ Author: Daniel Gustafsson 2025-02-20 [b3f0be788] Add support for OAUTHBEARER SASL mechanism --> - - -Add support for the OAuth authentication method (Jacob Champion, Daniel Gustafsson, Thomas Munro) -§ - + + + Add support for the OAuth authentication + method (Jacob Champion, Daniel Gustafsson, Thomas Munro) + § + - -This adds an "oauth" authentication method to pg_hba.conf, libpq OAuth options, a server variable oauth_validator_libraries to load token validation libraries, and -a configure flag --with-libcurl to add the required compile-time libraries. - - + + This adds an oauth authentication method to pg_hba.conf, + libpq OAuth options, a server variable to load + token validation libraries, and a configure flag + to add the required compile-time libraries. + + - - -Add server variable ssl_tls13_ciphers to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) -§ - - + + + Add server variable to allow + specification of multiple colon-separated TLSv1.3 cipher suites + (Erica Zhang, Daniel Gustafsson) + § + + - - -Change server variable ssl_groups's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) -§ - - + + + Change server variable 's default + to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) + § + + - - -Rename server variable ssl_ecdh_curve to ssl_groups and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) -§ - - -The previous name still works. - - - - + + + Rename server variable ssl_ecdh_curve to and allow multiple colon-separated + ECDH curves to be specified (Erica Zhang, + Daniel Gustafsson) + § + - - -Add function pg_check_fipsmode() to report the server's FIPS mode (Daniel Gustafsson) -§ - - + + The previous name still works. + + - - -Make cancel request keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) -§ -§ - + + + Make cancel request + keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) + § + § + - -This is only possible when the server and client support wire protocol version 3.2, introduced in this release. - - + + This is only possible when the server and client support wire + protocol version 3.2, introduced in this release. + + - - -Add server variable autovacuum_worker_slots to specify the maximum number of background workers (Nathan Bossart) -§ - + + + Add server variable + to specify the maximum number of background workers (Nathan Bossart) + § + - -With this variable set, autovacuum_max_workers can be adjusted at runtime up to this maximum without a server restart. - - + + With this variable set, + can be adjusted at runtime up to this maximum without a server + restart. + + - - -Allow specification of the fixed number of dead tuples that will trigger an autovacuum (Nathan Bossart, Frédéric Yhuel) -§ - + + + Allow specification of the fixed number of dead tuples that will + trigger an autovacuum (Nathan + Bossart, Frédéric Yhuel) + § + - -The server variable is autovacuum_vacuum_max_threshold. Percentages are still used for triggering. - - + + The server variable is . Percentages are + still used for triggering. + + - - -Change server variable max_files_per_process to limit only files opened by a backend (Andres Freund) -§ - + + + Change server variable + to limit only files opened by a backend (Andres Freund) + § + - -Previously files opened by the postmaster were also counted toward this limit. - - + + Previously files opened by the postmaster were also counted toward + this limit. + + - - -Add server variable num_os_semaphores to report the required number of semaphores (Nathan Bossart) -§ - + + + Add server variable to + report the required number of semaphores (Nathan Bossart) + § + - -This is useful for operating system configuration. - - + + This is useful for operating system configuration. + + - - -Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) -§ -§ - - + + + Add server variable to + specify the location of extension control files (Peter Eisentraut, + Matheus Alcantara) + § + § + + - + @@ -1310,28 +1519,34 @@ Author: Amit Kapila 2025-02-19 [ac0e33136] Invalidate inactive replication slots. --> - - -Allow inactive replication slots to be automatically invalided using server variable idle_replication_slot_timeout (Nisha Moond, Bharath Rupireddy) -§ - - + + + Allow inactive replication slots to be automatically invalided using + server variable + (Nisha Moond, Bharath Rupireddy) + § + + - - -Add server variable max_active_replication_origins to control the maximum active replication origins (Euler Taveira) -§ - + + + Add server variable to control the + maximum active replication origins (Euler Taveira) + § + - -This was previously controlled by max_replication_slots, but this new setting allows a higher origin count in cases where fewer slots are required. - - + + This was previously controlled by , but this new setting allows + a higher origin count in cases where fewer slots are required. + + @@ -1353,33 +1568,44 @@ Author: Amit Kapila 2025-01-30 [6252b1eaf] Doc: Generated column replication. --> - - -Allow the values of generated columns to be logically replicated (Shubham Khanna, Vignesh C, Zhijie Hou, Shlok Kyal, Peter Smith) -§ -§ -§ -§ - - - -If the publication specifies a column list, all specified columns, generated and non-generated, are published. Without a specified column list, publication option publish_generated_columns -controls whether generated columns are published. Previously generated columns were not replicated and the subscriber had to compute the values if possible; this is particularly -useful for non-Postgres subscribers which lack such a capability. - - + + + Allow the values of generated + columns to be logically replicated (Shubham Khanna, Vignesh C, + Zhijie Hou, Shlok Kyal, Peter Smith) + § + § + § + § + + + + If the publication specifies a column list, all specified + columns, generated and non-generated, are published. + Without a specified column list, publication option + publish_generated_columns controls whether + generated columns are published. Previously generated columns + were not replicated and the subscriber had to compute + the values if possible; this is particularly useful for + non-PostgreSQL subscribers which lack + such a capability. + + - - -Change the default CREATE SUBSCRIPTION streaming option from "off" to "parallel" (Vignesh C) -§ - - + + + Change the default streaming + option from off to parallel + (Vignesh C) + § + + - - -Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) -§ -§ - - + + + Allow to change the + replication slot's two-phase commit behavior (Hayato Kuroda, Ajin + Cherian, Amit Kapila, Zhijie Hou) + § + § + + - - -Log conflicts while applying logical replication changes (Zhijie Hou, Nisha Moond) -§ -§ -§ -§ -§ - + + + Log conflicts while + applying logical replication changes (Zhijie Hou, Nisha Moond) + § + § + § + § + § + - -Also report in new columns of pg_stat_subscription_stats. - - + + Also report in new columns of pg_stat_subscription_stats. + + @@ -1444,112 +1674,136 @@ Author: Richard Guo 2025-02-25 [1e4351af3] Expand virtual generated columns in the planner --> - - -Allow generated columns to be virtual, and make them the default (Peter Eisentraut, Jian He, Richard Guo, Dean Rasheed) -§ -§ -§ - + + + Allow generated + columns to be virtual, and make them the default (Peter + Eisentraut, Jian He, Richard Guo, Dean Rasheed) + § + § + § + - -Virtual generated columns generate their values when the columns are read, not written. The write behavior can still be specified via the STORED option. - - + + Virtual generated columns generate their values when the columns + are read, not written. The write behavior can still be specified + via the STORED option. + + - - -Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed) -§ - + + + Add OLD/NEW support to RETURNING in + DML queries (Dean Rasheed) + § + - -Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax -allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases "old" and "new". These aliases can be renamed to -avoid identifier conflicts. - - + + Previously RETURNING only returned new values for + and , and old + values for ; + would return the appropriate value for the internal query executed. + This new syntax allows the RETURNING list of + INSERT/UPDATE/DELETE/MERGE + to explicitly return old and new values by using the special aliases + old and new. These aliases + can be renamed to avoid identifier conflicts. + + - - -Allow foreign tables to be created like existing local tables (Zhang Mingli) -§ - + + + Allow foreign tables to be created like existing local tables + (Zhang Mingli) + § + - -The syntax is CREATE FOREIGN TABLE ... LIKE. - - + + The syntax is CREATE + FOREIGN TABLE ... LIKE. + + - - -Allow LIKE with nondeterministic collations (Peter Eisentraut) -§ - - + + + Allow LIKE + with nondeterministic + collations (Peter Eisentraut) + § + + - - -Allow text position search functions with nondeterministic collations (Peter Eisentraut) -§ - + + + Allow text position search functions with nondeterministic collations + (Peter Eisentraut) + § + - -These used to generate an error. - - + + These used to generate an error. + + - - -Add builtin collation provider PG_UNICODE_FAST (Jeff Davis) -§ - + + + Add builtin collation provider PG_UNICODE_FAST + (Jeff Davis) + § + - -This locale supports case mapping, but sorts in code point order, not natural language order. - - + + This locale supports case mapping, but sorts in code point order, + not natural language order. + + - - -Allow VACUUM and ANALYZE to process partitioned tables without processing their children (Michael Harris) -§ - + + + Allow and + to process partitioned tables without processing their children + (Michael Harris) + § + - -This is enabled with the new ONLY option. This is useful since autovacuum does not process partitioned tables, just its children. - - + + This is enabled with the new ONLY option. This is + useful since autovacuum does not process partitioned tables, just + its children. + + - - -Add functions to modify per-relation and per-column optimizer statistics (Corey Huinker) -§ -§ -§ - + + + Add functions to modify per-relation and per-column optimizer + statistics (Corey Huinker) + § + § + § + - -The functions are pg_restore_relation_stats(), pg_restore_attribute_stats(), pg_clear_relation_stats(), and pg_clear_attribute_stats. - - + + The functions are pg_restore_relation_stats(), + pg_restore_attribute_stats(), + pg_clear_relation_stats(), and + pg_clear_attribute_stats(). + + - - -Add server variable file_copy_method to control the file copying method (Nazir Bilal Yavuz) -§ - + + + Add server variable to control + the file copying method (Nazir Bilal Yavuz) + § + - -This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. - - + + This controls whether CREATE DATABASE + ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET + TABLESPACE uses file copy or clone. + + <link linkend="ddl-constraints">Constraints</link> - + - - -Allow the specification of non-overlapping PRIMARY KEY and UNIQUE constraints (Paul A. Jungwirth) -§ - + + + Allow the specification of non-overlapping PRIMARY + KEY and UNIQUE + constraints (Paul A. Jungwirth) + § + - -This is specified by WITHOUT OVERLAPS on the last column. - - + + This is specified by WITHOUT OVERLAPS on the + last specified column. + + - - -Allow CHECK and foreign key constraints to be specified as NOT ENFORCED (Amul Sul) -§ -§ - + + + Allow CHECK + and foreign + key constraints to be specified as NOT + ENFORCED (Amul Sul) + § + § + - -This also adds column pg_constraint.conenforced. - - + + This also adds column pg_constraint.conenforced. + + - - -Require primary/foreign key relationships to use either deterministic collations or the the same nondeterministic collations (Peter Eisentraut) -§ - + + + Require primary/foreign key + relationships to use either deterministic collations or the the + same nondeterministic collations (Peter Eisentraut) + § + - -The restore of a pg_dump, also used by pg_upgrade, will fail if these requirements are not met; schema changes must be made for these upgrade methods to succeed. - - + + The restore of a , also used by , will fail if these requirements are not met; + schema changes must be made for these upgrade methods to succeed. + + - - -Store column NOT NULL specifications in pg_constraint (Álvaro Herrera, Bernd Helmle) -§ - + + + Store column NOT + NULL specifications in pg_constraint + (Álvaro Herrera, Bernd Helmle) + § + - -This allows names to be specified for NOT NULL constraint. This also adds NOT NULL constraints to foreign tables and NOT NULL inheritance control to local tables. - - + + This allows names to be specified for NOT NULL + constraint. This also adds NOT NULL constraints + to foreign tables and NOT NULL inheritance + control to local tables. + + - - -Allow ALTER TABLE to set the NOT VALID attribute of NOT NULL constraints (Rushabh Lathia, Jian He) -§ - - + + + Allow to set the NOT + VALID attribute of NOT NULL constraints + (Rushabh Lathia, Jian He) + § + + - - -Allow modification of the inheritability of NOT NULL constraints (Suraj Kharage, Álvaro Herrera) -§ -§ - + + + Allow modification of the inheritability of NOT + NULL constraints (Suraj Kharage, Álvaro Herrera) + § + § + - -The syntax is ALTER TABLE ... ALTER CONSTRAINT ... [NO] INHERIT. - - + + The syntax is ALTER TABLE + ... ALTER CONSTRAINT ... [NO] INHERIT. + + - - -Allow NOT VALID foreign key constraints on partitioned tables (Amul Sul) -§ - - + + + Allow NOT VALID foreign key constraints on + partitioned tables (Amul Sul) + § + + - - -Allow dropping of constraints ONLY on partitioned tables (Álvaro Herrera) -§ - + + + Allow dropping + of constraints ONLY on partitioned tables + (Álvaro Herrera) + § + - -This was previously erroneously prohibited. - - + + This was previously erroneously prohibited. + + - + - <link linkend="sql-copy"><command>COPY</command></link> + <xref linkend="sql-copy"/> - + - - -Add REJECT_LIMIT to control the number of invalid rows COPY FROM can ignore (Atsushi Torikoshi) -§ - + + + Add REJECT_LIMIT to control the number of invalid + rows COPY FROM can ignore (Atsushi Torikoshi) + § + - -This is available when ON_ERROR = 'ignore'. - - + + This is available when ON_ERROR = 'ignore'. + + - - -Allow COPY TO to copy rows from populated materialized view (Jian He) -§ - - + + + Allow COPY TO to copy rows from populated + materialized views (Jian He) + § + + - - -Add COPY LOG_VERBOSITY level "silent" to suppress log output of ignored rows (Atsushi Torikoshi) -§ - + + + Add COPY LOG_VERBOSITY level + silent to suppress log output of ignored rows + (Atsushi Torikoshi) + § + - -This new level suppresses output for discarded input rows when on_error = 'ignore'. - - + + This new level suppresses output for discarded input rows when + on_error = 'ignore'. + + - - -Disallow COPY FREEZE on foreign tables (Nathan Bossart) -§ - + + + Disallow COPY FREEZE on foreign tables (Nathan + Bossart) + § + - -Previously, the COPY worked but the FREEZE was ignored, so disallow this command. - - + + Previously, the COPY worked but the + FREEZE was ignored, so disallow this command. + + - <link linkend="sql-explain"><command>EXPLAIN</command></link> + <xref linkend="sql-explain"/> @@ -1804,36 +2105,39 @@ Author: David Rowley 2024-12-11 [c2a4078eb] Enable BUFFERS with EXPLAIN ANALYZE by default --> - - -Automatically include BUFFERS output in EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) -§ - - + + + Automatically include BUFFERS output in + EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) + § + + - - -Add full WAL buffer count to EXPLAIN (WAL) output (Bertrand Drouvot) -§ - - + + + Add full WAL buffer count to EXPLAIN + (WAL) output (Bertrand Drouvot) + § + + - - -In EXPLAIN ANALYZE, report the number of index lookups used per index scan node (Peter Geoghegan) -§ - - + + + In EXPLAIN ANALYZE, report the number of index + lookups used per index scan node (Peter Geoghegan) + § + + - - -Modify EXPLAIN to output fractional row counts (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) -§ -§ - - + + + Modify EXPLAIN to output fractional row counts + (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) + § + § + + - - -Add memory and disk usage details to Material, Window Aggregate, and common table expression nodes in EXPLAIN (David Rowley, Tatsuo Ishii) -§ -§ -§ -§ - - + + + Add memory and disk usage details to Material, + Window Aggregate, and common table expression + nodes to EXPLAIN output (David Rowley, Tatsuo + Ishii) + § + § + § + § + + - - -Add details about window function arguments to EXPLAIN output (Tom Lane) -§ - - + + + Add details about window function arguments to + EXPLAIN output (Tom Lane) + § + + - - -Add "Parallel Bitmap Heap Scan" worker cache statistics to EXPLAIN ANALYZE (David Geier, Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) -§ - - + + + Add Parallel Bitmap Heap Scan worker cache + statistics to EXPLAIN ANALYZE (David Geier, + Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) + § + + - - -Indicate disabled nodes in EXPLAIN ANALYZE output (Robert Haas, David Rowley, Laurenz Albe) -§ -§ -§ - - + + + Indicate disabled nodes in EXPLAIN ANALYZE output + (Robert Haas, David Rowley, Laurenz Albe) + § + § + § + + @@ -1932,137 +2244,159 @@ Author: Jeff Davis 2025-01-17 [286a365b9] Support Unicode full case mapping and conversion. --> - - -Improve Unicode full case mapping and conversion (Jeff Davis) -§ -§ - + + + Improve Unicode + full case mapping and conversion (Jeff Davis) + § + § + - -This adds the ability to do conditional and title case mapping, and case map single characters to multiple characters. - - + + This adds the ability to do conditional and title case mapping, + and case map single characters to multiple characters. + + - - -Allow jsonb "null" values to be cast to scalar types as NULL (Tom Lane) -§ - + + + Allow jsonb + null values to be cast to scalar types as + NULL (Tom Lane) + § + - -Previously such casts generated an error. - - + + Previously such casts generated an error. + + - - -Add optional parameter to json{b}_strip_nulls to allow removal of null array elements (Florents Tselai) -§ - - + + + Add optional parameter to json{b}_strip_nulls + to allow removal of null array elements (Florents Tselai) + § + + - - -Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) -§ - - + + + Add function array_sort() + which sorts an array's first dimension (Junwang Zhao, Jian He) + § + + - - -Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) -§ - - + + + Add function array_reverse() + which reverses an array's first dimension (Aleksander Alekseev) + § + + - - -Add function reverse() to reverse bytea bytes (Aleksander Alekseev) -§ - - + + + Add function reverse() + to reverse bytea bytes (Aleksander Alekseev) + § + + - - -Allow casting between integer types and bytea (Aleksander Alekseev) -§ - + + + Allow casting between integer types and bytea (Aleksander + Alekseev) + § + - -The integer values are stored as bytea two's complement values. - - + + The integer values are stored as bytea two's complement + values. + + - - -Update Unicode data to Unicode 16.0.0 (Peter Eisentraut) -§ - - + + + Update Unicode data to Unicode 16.0.0 (Peter + Eisentraut) + § + + - - -Add full text search stemming for Estonian (Tom Lane) -§ - - + + + Add full text search stemming for Estonian + (Tom Lane) + § + + - - -Improve the XML error codes to more closely match the SQL standard (Tom Lane) -§ - + + + Improve the XML + error codes to more closely match the SQL standard + (Tom Lane) + § + - -These errors are reported via SQLSTATE. - - + + These errors are reported via SQLSTATE. + + @@ -2078,16 +2412,20 @@ Author: Jeff Davis 2025-01-24 [bfc599206] Add SQL function CASEFOLD(). --> - - -Add function CASEFOLD() to allow for more sophisticated case-insensitive matching (Jeff Davis) -§ - + + + Add function casefold() + to allow for more sophisticated case-insensitive matching (Jeff Davis) + § + - -Allows more accurate comparison, i.e., a character can have multiple upper or lower case equivalents, or upper or lower case conversion changes the number of characters. - - + + This allows more accurate comparisons, i.e., a character can have + multiple upper or lower case equivalents, or upper or lower case + conversion changes the number of characters. + + - - -Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) -§ -§ - - + + + Allow MIN()/MAX() + aggregates on arrays and composite types (Aleksander Alekseev, + Marat Buharov) + § + § + + - - -Add a WEEK option to EXTRACT() (Tom Lane) -§ - - + + + Add a WEEK option to EXTRACT() + (Tom Lane) + § + + - - -Improve the output EXTRACT(QUARTER ...) for negative values (Tom Lane) -§ - - + + + Improve the output EXTRACT(QUARTER ...) for + negative values (Tom Lane) + § + + - - -Add roman numeral support to to_number() (Hunaid Sohail) -§ - + + + Add roman numeral support to to_number() + (Hunaid Sohail) + § + - -This is accessed via the "RN" pattern. - - + + This is accessed via the RN pattern. + + - - -Add UUID version 7 generation function uuidv7() (Andrey Borodin) -§ - + + + Add UUID + version 7 generation function uuidv7() + (Andrey Borodin) + § + - -This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. - - + + This UUID value is + temporally sortable. Function alias uuidv4() + has been added to explicitly generate version 4 UUIDs. + + - - -Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) -§ - - + + + Add functions crc32() + and crc32c() + to compute CRC values (Aleksander Alekseev) + § + + - - -Add math functions gamma() and lgamma() (Dean Rasheed) -§ - - + + + Add math functions gamma() + and lgamma() + (Dean Rasheed) + § + + - - -Allow "=>" syntax for named cursor arguments in plpgsql (Pavel Stehule) -§ - + + + Allow => syntax for named cursor arguments in + PL/pgSQL (Pavel Stehule) + § + - -We previously only accepted ":=". - - + + We previously only accepted :=. + + - - -Allow regexp_match[es]/regexp_like/regexp_replace/regexp_count/regexp_instr/regexp_substr/regexp_split_to_table/regexp_split_to_array() to use named arguments (Jian He) -§ - - + + + Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() + to use named arguments (Jian He) + § + + - <link linkend="libpq">libpq</link> + <xref linkend="libpq"/> @@ -2226,12 +2597,15 @@ Author: Robert Haas 2024-09-09 [cdb6b0fdb] Add PQfullProtocolVersion() to surface the precise proto --> - - -Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) -§ - - + + + Add function PQfullProtocolVersion() + to report the full, including minor, protocol version number (Jacob + Champion, Jelte Fennema-Nio) + § + + - - -Add libpq connection parameters and environment variables to specify the minimum and maximum acceptable protocol version for connections (Jelte Fennema-Nio) -§ -§ - - + + + Add libpq connection parameters + and environment variables to + specify the minimum and maximum acceptable protocol version for + connections (Jelte Fennema-Nio) + § + § + + - - -Add libpq function PQservice() to return the connection service name (Michael Banck) -§ - - + + + Add libpq function PQservice() + to return the connection service name (Michael Banck) + § + + - - -Report search_path changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) -§ -§ - - + + + Report changes to the client + (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) + § + § + + - - -Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) -§ -§ -§ -§ -§ - - + + + Add PQtrace() output + for all message types, including authentication (Jelte Fennema-Nio) + § + § + § + § + § + + - - -Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) -§ - + + + Add libpq connection parameter sslkeylogfile + which dumps out SSL key material (Abhishek Chanda, + Daniel Gustafsson) + § + - -This is useful for debugging. - - + + This is useful for debugging. + + - - -Modify some libpq function signatures to use int64_t (Thomas Munro) -§ - + + + Modify some libpq function signatures to use + int64_t (Thomas Munro) + § + - -These previously used pg_int64, which is now deprecated. - - + + These previously used pg_int64, which is now + deprecated. + + - <xref linkend="app-psql"/> + <xref linkend="app-psql"/> - + - - -Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier) -§ - + + + Allow psql to parse, bind, and close + named prepared statements (Anthonin Bonnefoy, Michael Paquier) + § + - -This is accomplished with new commands \parse, \bind_named, and \close. - - + + This is accomplished with new commands \parse, + \bind_named, + and \close. + + - - -Add psql backslash commands to allowing issuance of pipeline queries (Anthonin Bonnefoy) -§ -§ -§ - + + + Add psql backslash commands to allowing + issuance of pipeline queries (Anthonin Bonnefoy) + § + § + § + - -The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. - - + + The new commands are \startpipeline, + \syncpipeline, \sendpipeline, + \endpipeline, \flushrequest, + \flush, and \getresults. + + - - -Allow adding pipeline status to the psql prompt and add related state variables (Anthonin Bonnefoy) -§ - + + + Allow adding pipeline status to the psql + prompt and add related state variables (Anthonin Bonnefoy) + § + - -The new prompt character is "%P" and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. - - + + The new prompt character is %P and + the new psql variables are PIPELINE_SYNC_COUNT, + PIPELINE_COMMAND_COUNT, + and PIPELINE_RESULT_COUNT. + + - - -Allow adding the connection service name to the psql prompt or access it via psql variable (Michael Banck) -§ - - + + + Allow adding the connection service name to the + psql prompt or access it via + psql variable (Michael Banck) + § + + - - -Add psql option to use expanded mode on all list commands (Dean Rasheed) -§ - + + + Add psql option to use expanded mode on + all list commands (Dean Rasheed) + § + - -Adding 'x' enables this. - - + + Adding backslash suffix x enables this. + + - - -Change psql's \conninfo to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) -§ - - + + + Change psql's to use tabular format + and include more information (Álvaro Herrera, Maiquel Grassi, + Hunaid Sohail) + § + + - - -Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) -§ - - + + + Add function's leakproof indicator + to psql's \df+, + \do+, \dAo+, and + \dC+ outputs (Yugo Nagata) + § + + - - -Add access method details for partitioned relations in \dP+ (Justin Pryzby) -§ - - + + + Add access method details for partitioned relations in \dP+ + (Justin Pryzby) + § + + - - -Add "default_version" to the psql \dx extension output (Magnus Hagander) -§ - - + + + Add default_version + to the psql \dx + extension output (Magnus Hagander) + § + + - - -Add psql variable WATCH_INTERVAL to set the default \watch wait time (Daniel Gustafsson) -§ - - + + + Add psql variable to set the default \watch + wait time (Daniel Gustafsson) + § + + - + - + - - Server Applications + + Server Applications - + - - -Change initdb to default to enabling checksums (Greg Sabino Mullane) -§ -§ - + + + Change to default to enabling checksums + (Greg Sabino Mullane) + § + § + - -The new initdb option --no-data-checksums disables checksums. - - + + The new initdb option + disables checksums. + + - - -Add initdb option --no-sync-data-files to avoid syncing heap/index files (Nathan Bossart) -§ - + + + Add initdb option + to avoid syncing heap/index + files (Nathan Bossart) + § + - -initdb --no-sync is still available to avoid syncing any files. - - + + initdb option + is still available to avoid syncing any files. + + - - -Add vacuumdb option --missing-stats-only to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) -§ -§ - + + + Add option + to compute only missing + optimizer statistics (Corey Huinker, Nathan Bossart) + § + § + - -This option can only be used by --analyze-only and --analyze-in-stages. - - + + This option can only be used by + and . + + - - -Add pg_combinebackup option -k/--link to enable hard linking (Israel Barth Rubio, Robert Haas) -§ - + + + Add option + / to enable hard linking + (Israel Barth Rubio, Robert Haas) + § + - -Only some files can be hard linked. This should not be used if the backups will be used independently. - - + + Only some files can be hard linked. This should not be used if the + backups will be used independently. + + - - -Allow pg_verifybackup to verify tar-format backups (Amul Sul) -§ - - + + + Allow to verify tar-format + backups (Amul Sul) + § + + - - -If pg_rewind's --source-server specifies a database name, use it in --write-recovery-conf output (Masahiko Sawada) -§ - - + + + If 's + specifies a database name, use it in + output (Masahiko Sawada) + § + + - - -Add pg_resetwal option --char-signedness to change the default char signedness (Masahiko Sawada) -§ - - + + + Add option + to change the default + char signedness (Masahiko Sawada) + § + + - + @@ -2613,28 +3053,34 @@ Author: Andrew Dunstan 2025-04-04 [1495eff7b] Non text modes for pg_dumpall, correspondingly change pg --> - - -Allow pg_dumpall to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) -§ - + + + Allow to dump in the same output + formats as pg_dump supports (Mahendra + Singh Thalor, Andrew Dunstan) + § + - -Also modify pg_restore to handle such dumps. Previously pg_dumpall only supported text format. - - + + Also modify to handle such dumps. + Previously pg_dumpall only supported + text format. + + - - -Add pg_dump options --with-schema, --with-data, and --with-statistics (Jeff Davis) -§ - - + + + Add options + , , + and (Jeff Davis) + § + + - - -Add pg_dump and pg_dumpall option --sequence-data to dump sequence data that would normally be excluded (Nathan Bossart) -§ -§ - - + + + Add pg_dump and option to + dump sequence data that would normally be excluded (Nathan Bossart) + § + § + + - - -Add pg_dump, pg_dumpall, and pg_restore options --statistics-only, --no-statistics, --no-data, and --no-schema (Corey Huinker, Jeff Davis) -§ - - + + + Add , , + and options + , , + , and + (Corey Huinker, Jeff Davis) + § + + - - -Add option --no-policies to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov) -§ - + + + Add option to disable row level + security policy processing in , + , + (Nikolay Samokhvalov) + § + - -This is useful for migrating to systems with different policies. - - + + This is useful for migrating to systems with different policies. + + - <link linkend="pgupgrade"><application>pg_upgrade</application></link> + <xref linkend="pgupgrade"/> @@ -2699,19 +3154,22 @@ Author: Jeff Davis 2025-02-20 [1fd1bd871] Transfer statistics during pg_upgrade. --> - - -Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Nathan Bossart) -§ -§ -§ -§ - + + + Allow pg_upgrade to preserve optimizer + statistics (Corey Huinker, Jeff Davis, Nathan Bossart) + § + § + § + § + - -Extended statistics are not preserved. Also add pg_upgrade option --no-statistics to disable statistics preservation. - - + + Extended statistics are not preserved. Also add + pg_upgrade option + to disable statistics preservation. + + - - -Allow pg_upgrade to process database checks in parallel (Nathan Bossart) -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ - - - -This is controlled by the existing --jobs option. - - + + + Allow pg_upgrade to process database + checks in parallel (Nathan Bossart) + § + § + § + § + § + § + § + § + § + § + § + + + + This is controlled by the existing option. + + - - -Add pg_upgrade option --swap to swap directories rather than copy, clone, or link files (Nathan Bossart) -§ - + + + Add pg_upgrade option + to swap directories rather than copy, clone, + or link files (Nathan Bossart) + § + - -This mode is potentially the fastest. - - + + This mode is potentially the fastest. + + - - -Add pg_upgrade option --set-char-signedness to set the default char signedness of new cluster (Masahiko Sawada) -§ -§ - + + + Add pg_upgrade option + to set the default + char signedness of new cluster (Masahiko Sawada) + § + § + - -This is to handle cases where a pre-Postgres 18 cluster's default CPU signedness does not match the new cluster. - - + + This is to handle cases where a + pre-PostgreSQL 18 cluster's default + CPU signedness does not match the new cluster. + + @@ -2808,60 +3273,68 @@ Author: Amit Kapila 2025-03-28 [fb2ea12f4] pg_createsubscriber: Add '- -all' option. --> - - -Add pg_createsubscriber option --all to create logical replicas for all databases (Shubham Khanna) -§ - - + + + Add option + to create logical replicas for all databases + (Shubham Khanna) + § + + - - -Add pg_createsubscriber option --remove to remove publications (Shubham Khanna) -§ - - + + + Add pg_createsubscriber option + to remove publications (Shubham Khanna) + § + + - - -Add pg_createsubscriber option --enable-two-phase to enable prepared transactions (Shubham Khanna) -§ - - + + + Add pg_createsubscriber option + to enable prepared transactions + (Shubham Khanna) + § + + - - -Add pg_recvlogical option --failover to specify failover slots (Hayato Kuroda) -§ - - + + + Add option + to specify failover slots (Hayato Kuroda) + § + + - - -Allow pg_recvlogical --drop-slot to work without --dbname (Hayato Kuroda) -§ - - + + + Allow pg_recvlogical + to work without + (Hayato Kuroda) + § + + @@ -2881,65 +3354,76 @@ Author: Michael Paquier 2024-07-18 [a0a5869a8] Add INJECTION_POINT_CACHED() to run injection points dir --> - - -Separate the loading and running of injection points (Michael Paquier, Heikki Linnakangas) -§ -§ - + + + Separate the loading and running of injection points + (Michael Paquier, Heikki Linnakangas) + § + § + - -Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). - - + + Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), + and such injection points can be run via INJECTION_POINT_CACHED(). + + - - -Support runtime arguments in injection points (Michael Paquier) -§ - - + + + Support runtime arguments in injection points (Michael Paquier) + § + + - - -Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) -§ - - + + + Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() + (Heikki Linnakangas) + § + + - - -Improve the performance of processing long JSON strings using SIMD instructions (David Rowley) -§ - - + + + Improve the performance of processing long JSON strings using + SIMD (Single Instruction Multiple Data) (David + Rowley) + § + + - - -Speed up CRC32C calculations using x86 AVX-512 instructions (Raghuveer Devulapalli, Paul Amonson) -§ - - + + + Speed up CRC32C calculations using x86 AVX-512 + instructions (Raghuveer Devulapalli, Paul Amonson) + § + + - - -Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranmoy Bhattacharya, Devanga Susmitha, Rama Malladi) -§ -§ - - + + + Add ARM Neon and SVE CPU + intrinsics for popcount (integer bit counting) (Chiranmoy + Bhattacharya, Devanga Susmitha, Rama Malladi) + § + § + + - - -Improve the speed of numeric multiplication and division (Joel Jacobson, Dean Rasheed) -§ -§ -§ -§ - - + + + Improve the speed of numeric multiplication and division (Joel + Jacobson, Dean Rasheed) + § + § + § + § + + - - -Add configure option --with-libnuma to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) -§ -§ -§ - + + + Add configure option + to enable NUMA awareness (Jakub Wartak, Bertrand + Drouvot) + § + § + § + - -The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across -NUMA nodes. - - + + The function pg_numa_available() + reports on NUMA awareness, and system views pg_shmem_allocations_numa + and pg_buffercache_numa + which report on shared memory distribution across + NUMA nodes. + + - - -Add TOAST table to pg_index to allow for very large index expression indexes (Nathan Bossart) -§ - - + + + Add TOAST table to pg_index + to allow for very large expression indexes (Nathan Bossart) + § + + - - -Remove column pg_attribute.attcacheoff (David Rowley) -§ - - + + + Remove column pg_attribute.attcacheoff + (David Rowley) + § + + - - -Add column pg_class.relallfrozen (Melanie Plageman) -§ - - + + + Add column pg_class.relallfrozen + (Melanie Plageman) + § + + - - -Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) -§ -§ - - + + + Add amgettreeheight, + amconsistentequality, and + amconsistentordering to the index access method + API (Mark Dilger) + § + § + + - - -Add GiST support function stratnum (Paul A. Jungwirth) -§ - - + + + Add GiST support function stratnum() + (Paul A. Jungwirth) + § + + - - -Record the default CPU signedness of "char" in pg_controldata (Masahiko Sawada) -§ - - + + + Record the default CPU signedness of + char in + (Masahiko Sawada) + § + + - - -Add support for Python "Limited API" in PL/Python (Peter Eisentraut) -§ -§ - + + + Add support for Python "Limited API" in (Peter Eisentraut) + § + § + - -This helps prevent problems caused by Python 3.x version mismatches. - - + + This helps prevent problems caused by + Python 3.x version mismatches. + + - - -Change the minimum supported Python version to 3.6.8 (Jacob Champion) -§ - - + + + Change the minimum supported Python + version to 3.6.8 (Jacob Champion) + § + + - - -Remove support for OpenSSL versions older than 1.1.1 (Daniel Gustafsson) -§ -§ - - + + + Remove support for OpenSSL versions older + than 1.1.1 (Daniel Gustafsson) + § + § + + - - -If LLVM is enabled, require version 14 or later (Thomas Munro) -§ - - + + + If LLVM is enabled, require version 14 + or later (Thomas Munro) + § + + - - -Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) -§ - + + + Add macro PG_MODULE_MAGIC_EXT + to allow extensions to report their name and version (Andrei Lepikhov) + § + - -This information can be access via the new function pg_get_loaded_modules(). - - + + This information can be access via the new function pg_get_loaded_modules(). + + - - -Document that SPI_connect/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) -§ - + + + Document that SPI_connect()/SPI_connect_ext() + always returns success (SPI_OK_CONNECT) (Stepan + Neretin) + § + - -Errors are always reported via ereport(). - - + + Errors are always reported via ereport(). + + - - -Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) -§ - - + + + Add documentation + section about API and ABI + compatibility (David Wheeler, Peter Eisentraut) + § + + - - -Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) -§ - - + + + Remove the experimental designation of + Meson builds on Windows (Aleksander Alekseev) + § + + - - -Remove configure options --disable-spinlocks and --disable-atomics (Thomas Munro) -§ -§ - + + + Remove configure options and + (Thomas Munro) + § + § + - -Thirty-two bit atomic operations are now required. - - + + Thirty-two-bit atomic operations are now required. + + - - -Remove support for the HPPA/PA-RISC architecture (Tom Lane) -§ - - + + + Remove support for the + HPPA/PA-RISC architecture + (Tom Lane) + § + + @@ -3234,24 +3764,27 @@ Author: Masahiko Sawada 2024-10-14 [7cdfeee32] Add contrib/pg_logicalinspect. --> - - -Add extension pg_logicalinspect to inspect logical snapshots (Bertrand Drouvot) -§ - - + + + Add extension to inspect logical + snapshots (Bertrand Drouvot) + § + + - - -Add extension pg_overexplain which adds debug details to EXPLAIN output (Robert Haas) -§ - - + + + Add extension which adds debug details + to EXPLAIN + output (Robert Haas) + § + + - - -Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) -§ -§ -§ -§ - + + + Add output columns to postgres_fdw_get_connections() + (Hayato Kuroda, Sagar Dilip Shedge) + § + § + § + § + - -New output column "used_in_xact" indicates if the foreign data wrapper is being used by a current transaction, "closed" indicates if it is closed, "user_name" indicates the -user name, and "remote_backend_pid" indicates the remote backend process identifier. - - + + New output column used_in_xact indicates + if the foreign data wrapper is being used by a current transaction, + closed indicates if it is closed, + user_name indicates the user name, and + remote_backend_pid indicates the remote + backend process identifier. + + - - -Allow SCRAM authentication from the client to be passed to postgres_fdw servers (Matheus Alcantara, Peter Eisentraut) -§ - + + + Allow SCRAM + authentication from the client to be passed to servers (Matheus Alcantara, Peter Eisentraut) + § + - -This avoids storing postgres_fdw authentication information in the database, and is enabled with the postgres_fdw "use_scram_passthrough" connection option. libpq uses new connection -parameters scram_client_key and scram_server_key. - - + + This avoids storing postgres_fdw + authentication information in the database, and is + enabled with the postgres_fdw use_scram_passthrough + connection option. libpq uses new connection parameters + and . + + - - -Allow SCRAM authentication from the client to be passed to dblink servers (Matheus Alcantara) -§ - - + + + Allow SCRAM authentication from the client to be + passed to servers (Matheus Alcantara) + § + + - - -Add on_error and log_verbosity options to file_fdw (Atsushi Torikoshi) -§ - + + + Add on_error and log_verbosity + options to (Atsushi Torikoshi) + § + - -These control how file_fdw handles and reports invalid file rows. - - + + These control how file_fdw handles and + reports invalid file rows. + + - - -Add "reject_limit" to control the number of invalid rows file_fdw can ignore (Atsushi Torikoshi) -§ - + + + Add reject_limit to control the number of + invalid rows file_fdw can ignore (Atsushi + Torikoshi) + § + - -This is active when ON_ERROR = 'ignore'. - - + + This is active when ON_ERROR = 'ignore'. + + - - -Add configurable variable min_password_length to passwordcheck (Emanuele Musella, Maurizio Boriani) -§ - + + + Add configurable variable min_password_length to + (Emanuele Musella, Maurizio Boriani) + § + - -This controls the minimum password length. - - + + This controls the minimum password length. + + - - -Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) -§ - - + + + Have report the number of failed, retried, + or skipped transactions in per-script reports (Yugo Nagata) + § + + - - -Add isn server variable "weak" to control invalid check digit acceptance (Viktor Holmberg) -§ - + + + Add server variable weak + to control invalid check digit acceptance (Viktor Holmberg) + § + - -This was previously only controlled by function isn_weak(). - - + + This was previously only controlled by function isn_weak(). + + - - -Allow values to be sorted to speed btree_gist index builds (Bernd Helmle, Andrey Borodin) -§ - - + + + Allow values to be sorted to speed + index builds (Bernd Helmle, Andrey Borodin) + § + + - - -Add amcheck function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) -§ - - + + + Add check function gin_index_check() + to verify GIN indexes (Grigory Kryachko, Heikki + Linnakangas, Andrey Borodin) + § + + - - -Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) -§ - + + + Add functions pg_buffercache_evict_relation() + and pg_buffercache_evict_all() + to evict unpinned shared buffers (Nazir Bilal Yavuz) + § + - -The existing function pg_buffercache_evict() now returns the buffer flush status. - - + + The existing function pg_buffercache_evict() + now returns the buffer flush status. + + - - -Allow extensions to install custom EXPLAIN options (Robert Haas, Sami Imseih) -§ -§ -§ - - + + + Allow extensions to install custom + options (Robert Haas, Sami Imseih) + § + § + § + + - - -Allow extensions to use the server's cumulative statistics API (Michael Paquier) -§ -§ - - + + + Allow extensions to use the server's cumulative statistics + API (Michael Paquier) + § + § + + - <link linkend="pgstatstatements"><application>pg_stat_statements</application></link> + <xref linkend="pgstatstatements"/> @@ -3469,67 +4036,78 @@ Author: Michael Paquier 2024-10-28 [6b652e6ce] Set query ID for inner queries of CREATE TABLE AS and DE --> - - -Allow the queries of CREATE TABLE AS and DECLARE to be tracked by pg_stat_statements (Anthonin Bonnefoy) -§ - + + + Allow the queries of + and to be tracked by + pg_stat_statements (Anthonin Bonnefoy) + § + - -They are also now assigned query ids. - - + + They are also now assigned query ids. + + - - -Allow the parameterization of SET values in pg_stat_statements (Greg Sabino Mullane, Michael Paquier) -§ - + + + Allow the parameterization of values in + pg_stat_statements (Greg Sabino Mullane, + Michael Paquier) + § + - -This reduces the bloat caused by SET statements with differing constants. - - + + This reduces the bloat caused by SET statements + with differing constants. + + - - -Add pg_stat_statements columns to report parallel activity (Guillaume Lelarge) -§ - + + + Add pg_stat_statements + columns to report parallel activity (Guillaume Lelarge) + § + - -The new columns are parallel_workers_to_launch and parallel_workers_launched. - - + + The new columns are + parallel_workers_to_launch and + parallel_workers_launched. + + - - -Add pg_stat_statements.wal_buffers_full to report full WAL buffers (Bertrand Drouvot) -§ - - + + + Add + pg_stat_statements.wal_buffers_full + to report full WAL buffers (Bertrand Drouvot) + § + + - <link linkend="pgcrypto"><application>pgcrypto</application></link> + <xref linkend="pgcrypto"/> @@ -3538,40 +4116,65 @@ Author: Álvaro Herrera 2025-04-05 [749a9e20c] Add modern SHA-2 based password hashes to pgcrypto. --> - - -Add pgcrypto functions sha256crypt() and sha512crypt() (Bernd Helmle) -§ - - + + + Add pgcrypto algorithms sha256crypt + and sha512crypt + (Bernd Helmle) + § + + - - -Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) -§ - - + + + Add CFB mode + to pgcrypto encryption and decryption + (Umar Hayat) + § + + + + + + + + Add function fips_mode() + to report the server's FIPS mode (Daniel + Gustafsson) + § + + - - -Add pgcrypto server variable builtin_crypto_enabled to allow disabling builtin non-FIPS mode cryptographic functions (Daniel Gustafsson, Joe Conway) -§ - + + + Add pgcrypto server variable builtin_crypto_enabled + to allow disabling builtin non-FIPS mode + cryptographic functions (Daniel Gustafsson, Joe Conway) + § + - -This is useful for guaranteeing FIPS mode behavior. - - + + This is useful for guaranteeing FIPS mode behavior. + + diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index 908857a54af5f..89928ed182913 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -1355,7 +1355,7 @@ ts_headline( config - Warning: Cross-site scripting (XSS) safety + Warning: Cross-site Scripting (XSS) Safety The output from ts_headline is not guaranteed to be safe for direct inclusion in web pages. When diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index ffd0c78f905a5..020d00cd01ce7 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -142,10 +142,17 @@ void verify_compact_attribute(TupleDesc tupdesc, int attnum) { #ifdef USE_ASSERT_CHECKING - CompactAttribute *cattr = &tupdesc->compact_attrs[attnum]; + CompactAttribute cattr; Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum); CompactAttribute tmp; + /* + * Make a temp copy of the TupleDesc's CompactAttribute. This may be a + * shared TupleDesc and the attcacheoff might get changed by another + * backend. + */ + memcpy(&cattr, &tupdesc->compact_attrs[attnum], sizeof(CompactAttribute)); + /* * Populate the temporary CompactAttribute from the corresponding * Form_pg_attribute @@ -156,11 +163,11 @@ verify_compact_attribute(TupleDesc tupdesc, int attnum) * Make the attcacheoff match since it's been reset to -1 by * populate_compact_attribute_internal. Same with attnullability. */ - tmp.attcacheoff = cattr->attcacheoff; - tmp.attnullability = cattr->attnullability; + tmp.attcacheoff = cattr.attcacheoff; + tmp.attnullability = cattr.attnullability; /* Check the freshly populated CompactAttribute matches the TupleDesc's */ - Assert(memcmp(&tmp, cattr, sizeof(CompactAttribute)) == 0); + Assert(memcmp(&tmp, &cattr, sizeof(CompactAttribute)) == 0); #endif } diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index fbaed5359ad7c..10f43c51c5af0 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1100,6 +1100,7 @@ AddNewRelationType(const char *typeName, * if false, relacl is always set NULL * allow_system_table_mods: true to allow creation in system namespaces * is_internal: is this a system-generated catalog? + * relrewrite: link to original relation during a table rewrite * * Output parameters: * typaddress: if not null, gets the object address of the new pg_type entry diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 739a92bdcc1ca..aa216683b74fe 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3020,7 +3020,7 @@ index_build(Relation heapRelation, /* * Determine worker process details for parallel CREATE INDEX. Currently, - * only btree and BRIN have support for parallel builds. + * only btree, GIN, and BRIN have support for parallel builds. * * Note that planner considers parallel safety for us. */ diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index c3ec2076a52ef..f2898fee5fcd5 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -2592,7 +2592,9 @@ makeObjectName(const char *name1, const char *name2, const char *label) * constraint names.) * * Note: it is theoretically possible to get a collision anyway, if someone - * else chooses the same name concurrently. This is fairly unlikely to be + * else chooses the same name concurrently. We shorten the race condition + * window by checking for conflicting relations using SnapshotDirty, but + * that doesn't close the window entirely. This is fairly unlikely to be * a problem in practice, especially if one is holding an exclusive lock on * the relation identified by name1. However, if choosing multiple names * within a single command, you'd better create the new object and do @@ -2608,15 +2610,45 @@ ChooseRelationName(const char *name1, const char *name2, int pass = 0; char *relname = NULL; char modlabel[NAMEDATALEN]; + SnapshotData SnapshotDirty; + Relation pgclassrel; + + /* prepare to search pg_class with a dirty snapshot */ + InitDirtySnapshot(SnapshotDirty); + pgclassrel = table_open(RelationRelationId, AccessShareLock); /* try the unmodified label first */ strlcpy(modlabel, label, sizeof(modlabel)); for (;;) { + ScanKeyData key[2]; + SysScanDesc scan; + bool collides; + relname = makeObjectName(name1, name2, modlabel); - if (!OidIsValid(get_relname_relid(relname, namespaceid))) + /* is there any conflicting relation name? */ + ScanKeyInit(&key[0], + Anum_pg_class_relname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(relname)); + ScanKeyInit(&key[1], + Anum_pg_class_relnamespace, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(namespaceid)); + + scan = systable_beginscan(pgclassrel, ClassNameNspIndexId, + true /* indexOK */ , + &SnapshotDirty, + 2, key); + + collides = HeapTupleIsValid(systable_getnext(scan)); + + systable_endscan(scan); + + /* break out of loop if no conflict */ + if (!collides) { if (!isconstraint || !ConstraintNameExists(relname, namespaceid)) @@ -2628,6 +2660,8 @@ ChooseRelationName(const char *name1, const char *name2, snprintf(modlabel, sizeof(modlabel), "%s%d", label, ++pass); } + table_close(pgclassrel, AccessShareLock); + return relname; } diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 26f0336f1e409..7aa8f5d799cac 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -876,16 +876,13 @@ try_nestloop_path(PlannerInfo *root, /* * Check to see if proposed path is still parameterized, and reject if the * parameterization wouldn't be sensible --- unless allow_star_schema_join - * says to allow it anyway. Also, we must reject if have_dangerous_phv - * doesn't like the look of it, which could only happen if the nestloop is - * still parameterized. + * says to allow it anyway. */ required_outer = calc_nestloop_required_outer(outerrelids, outer_paramrels, innerrelids, inner_paramrels); if (required_outer && - ((!bms_overlap(required_outer, extra->param_source_rels) && - !allow_star_schema_join(root, outerrelids, inner_paramrels)) || - have_dangerous_phv(root, outerrelids, inner_paramrels))) + !bms_overlap(required_outer, extra->param_source_rels) && + !allow_star_schema_join(root, outerrelids, inner_paramrels)) { /* Waste no memory when we reject a path here */ bms_free(required_outer); diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 60d65762b5d5e..aad41b940091d 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -565,9 +565,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, * Also, if the lateral reference is only indirect, we should reject * the join; whatever rel(s) the reference chain goes through must be * joined to first. - * - * Another case that might keep us from building a valid plan is the - * implementation restriction described by have_dangerous_phv(). */ lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids); lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids); @@ -584,9 +581,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, /* check there is a direct reference from rel2 to rel1 */ if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids)) return false; /* only indirect refs, so reject */ - /* check we won't have a dangerous PHV */ - if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids)) - return false; /* might be unable to handle required PHV */ } else if (lateral_rev) { @@ -599,9 +593,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, /* check there is a direct reference from rel1 to rel2 */ if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids)) return false; /* only indirect refs, so reject */ - /* check we won't have a dangerous PHV */ - if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids)) - return false; /* might be unable to handle required PHV */ } /* @@ -1278,57 +1269,6 @@ has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel) } -/* - * There's a pitfall for creating parameterized nestloops: suppose the inner - * rel (call it A) has a parameter that is a PlaceHolderVar, and that PHV's - * minimum eval_at set includes the outer rel (B) and some third rel (C). - * We might think we could create a B/A nestloop join that's parameterized by - * C. But we would end up with a plan in which the PHV's expression has to be - * evaluated as a nestloop parameter at the B/A join; and the executor is only - * set up to handle simple Vars as NestLoopParams. Rather than add complexity - * and overhead to the executor for such corner cases, it seems better to - * forbid the join. (Note that we can still make use of A's parameterized - * path with pre-joined B+C as the outer rel. have_join_order_restriction() - * ensures that we will consider making such a join even if there are not - * other reasons to do so.) - * - * So we check whether any PHVs used in the query could pose such a hazard. - * We don't have any simple way of checking whether a risky PHV would actually - * be used in the inner plan, and the case is so unusual that it doesn't seem - * worth working very hard on it. - * - * This needs to be checked in two places. If the inner rel's minimum - * parameterization would trigger the restriction, then join_is_legal() should - * reject the join altogether, because there will be no workable paths for it. - * But joinpath.c has to check again for every proposed nestloop path, because - * the inner path might have more than the minimum parameterization, causing - * some PHV to be dangerous for it that otherwise wouldn't be. - */ -bool -have_dangerous_phv(PlannerInfo *root, - Relids outer_relids, Relids inner_params) -{ - ListCell *lc; - - foreach(lc, root->placeholder_list) - { - PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc); - - if (!bms_is_subset(phinfo->ph_eval_at, inner_params)) - continue; /* ignore, could not be a nestloop param */ - if (!bms_overlap(phinfo->ph_eval_at, outer_relids)) - continue; /* ignore, not relevant to this join */ - if (bms_is_subset(phinfo->ph_eval_at, outer_relids)) - continue; /* safe, it can be eval'd within outerrel */ - /* Otherwise, it's potentially unsafe, so reject the join */ - return true; - } - - /* OK to perform the join */ - return false; -} - - /* * is_dummy_rel --- has relation been proven empty? */ diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 4ad30b7627e6e..8baf36ba4b791 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -4348,9 +4348,11 @@ create_nestloop_plan(PlannerInfo *root, List *joinrestrictclauses = best_path->jpath.joinrestrictinfo; List *joinclauses; List *otherclauses; - Relids outerrelids; List *nestParams; + List *outer_tlist; + bool outer_parallel_safe; Relids saveOuterRels = root->curOuterRels; + ListCell *lc; /* * If the inner path is parameterized by the topmost parent of the outer @@ -4412,9 +4414,47 @@ create_nestloop_plan(PlannerInfo *root, * Identify any nestloop parameters that should be supplied by this join * node, and remove them from root->curOuterParams. */ - outerrelids = best_path->jpath.outerjoinpath->parent->relids; - nestParams = identify_current_nestloop_params(root, outerrelids); + nestParams = identify_current_nestloop_params(root, + best_path->jpath.outerjoinpath); + + /* + * While nestloop parameters that are Vars had better be available from + * the outer_plan already, there are edge cases where nestloop parameters + * that are PHVs won't be. In such cases we must add them to the + * outer_plan's tlist, since the executor's NestLoopParam machinery + * requires the params to be simple outer-Var references to that tlist. + */ + outer_tlist = outer_plan->targetlist; + outer_parallel_safe = outer_plan->parallel_safe; + foreach(lc, nestParams) + { + NestLoopParam *nlp = (NestLoopParam *) lfirst(lc); + TargetEntry *tle; + + if (IsA(nlp->paramval, Var)) + continue; /* nothing to do for simple Vars */ + if (tlist_member((Expr *) nlp->paramval, outer_tlist)) + continue; /* already available */ + + /* Make a shallow copy of outer_tlist, if we didn't already */ + if (outer_tlist == outer_plan->targetlist) + outer_tlist = list_copy(outer_tlist); + /* ... and add the needed expression */ + tle = makeTargetEntry((Expr *) copyObject(nlp->paramval), + list_length(outer_tlist) + 1, + NULL, + true); + outer_tlist = lappend(outer_tlist, tle); + /* ... and track whether tlist is (still) parallel-safe */ + if (outer_parallel_safe) + outer_parallel_safe = is_parallel_safe(root, + (Node *) nlp->paramval); + } + if (outer_tlist != outer_plan->targetlist) + outer_plan = change_plan_targetlist(outer_plan, outer_tlist, + outer_parallel_safe); + /* And finally, we can build the join plan node */ join_plan = make_nestloop(tlist, joinclauses, otherclauses, diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ff65867eebee7..549aedcfa991a 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -6879,7 +6879,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid) * * tableOid is the table on which the index is to be built. indexOid is the * OID of an index to be created or reindexed (which must be an index with - * support for parallel builds - currently btree or BRIN). + * support for parallel builds - currently btree, GIN, or BRIN). * * Return value is the number of parallel worker processes to request. It * may be unsafe to proceed if this is 0. Note that this does not include the diff --git a/src/backend/optimizer/util/paramassign.c b/src/backend/optimizer/util/paramassign.c index 3bd3ce37c8fce..9836abf947995 100644 --- a/src/backend/optimizer/util/paramassign.c +++ b/src/backend/optimizer/util/paramassign.c @@ -600,7 +600,7 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params) /* * Identify any NestLoopParams that should be supplied by a NestLoop plan - * node with the specified lefthand rels. Remove them from the active + * node with the specified lefthand input path. Remove them from the active * root->curOuterParams list and return them as the result list. * * XXX Here we also hack up the returned Vars and PHVs so that they do not @@ -626,11 +626,26 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params) * subquery, which'd be unduly expensive. */ List * -identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids) +identify_current_nestloop_params(PlannerInfo *root, Path *leftpath) { List *result; + Relids leftrelids = leftpath->parent->relids; + Relids outerrelids = PATH_REQ_OUTER(leftpath); + Relids allleftrelids; ListCell *cell; + /* + * We'll be able to evaluate a PHV in the lefthand path if it uses the + * lefthand rels plus any available required-outer rels. But don't do so + * if it uses *only* required-outer rels; in that case it should be + * evaluated higher in the tree. For Vars, no such hair-splitting is + * necessary since they depend on only one relid. + */ + if (outerrelids) + allleftrelids = bms_union(leftrelids, outerrelids); + else + allleftrelids = leftrelids; + result = NIL; foreach(cell, root->curOuterParams) { @@ -653,18 +668,20 @@ identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids) leftrelids); result = lappend(result, nlp); } - else if (IsA(nlp->paramval, PlaceHolderVar) && - bms_is_subset(find_placeholder_info(root, - (PlaceHolderVar *) nlp->paramval)->ph_eval_at, - leftrelids)) + else if (IsA(nlp->paramval, PlaceHolderVar)) { PlaceHolderVar *phv = (PlaceHolderVar *) nlp->paramval; + Relids eval_at = find_placeholder_info(root, phv)->ph_eval_at; - root->curOuterParams = foreach_delete_current(root->curOuterParams, - cell); - phv->phnullingrels = bms_intersect(phv->phnullingrels, - leftrelids); - result = lappend(result, nlp); + if (bms_is_subset(eval_at, allleftrelids) && + bms_overlap(eval_at, leftrelids)) + { + root->curOuterParams = foreach_delete_current(root->curOuterParams, + cell); + phv->phnullingrels = bms_intersect(phv->phnullingrels, + leftrelids); + result = lappend(result, nlp); + } } } return result; diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 676551118753d..c4299c76fb16b 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -109,10 +109,22 @@ #include "storage/procarray.h" #include "storage/sinval.h" #include "utils/builtins.h" +#include "utils/inval.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/relfilenumbermap.h" +/* + * Each transaction has an 8MB limit for invalidation messages distributed from + * other transactions. This limit is set considering scenarios with many + * concurrent logical decoding operations. When the distributed invalidation + * messages reach this threshold, the transaction is marked as + * RBTXN_DISTR_INVAL_OVERFLOWED to invalidate the complete cache as we have lost + * some inval messages and hence don't know what needs to be invalidated. + */ +#define MAX_DISTR_INVAL_MSG_PER_TXN \ + ((8 * 1024 * 1024) / sizeof(SharedInvalidationMessage)) + /* entry for a hash table we use to map from xid to our transaction state */ typedef struct ReorderBufferTXNByIdEnt { @@ -472,6 +484,12 @@ ReorderBufferFreeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) txn->invalidations = NULL; } + if (txn->invalidations_distributed) + { + pfree(txn->invalidations_distributed); + txn->invalidations_distributed = NULL; + } + /* Reset the toast hash */ ReorderBufferToastReset(rb, txn); @@ -2661,7 +2679,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + if (rbtxn_distr_inval_overflowed(txn)) + { + Assert(txn->ninvalidations_distributed == 0); + InvalidateSystemCaches(); + } + else + { + ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed, + txn->invalidations_distributed); + } if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -2710,8 +2738,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(txn->ninvalidations, - txn->invalidations); + if (rbtxn_distr_inval_overflowed(txn)) + { + Assert(txn->ninvalidations_distributed == 0); + InvalidateSystemCaches(); + } + else + { + ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed, + txn->invalidations_distributed); + } if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -3060,7 +3097,8 @@ ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn, * We might have decoded changes for this transaction that could load * the cache as per the current transaction's view (consider DDL's * happened in this transaction). We don't want the decoding of future - * transactions to use those cache entries so execute invalidations. + * transactions to use those cache entries so execute only the inval + * messages in this transaction. */ if (txn->ninvalidations > 0) ReorderBufferImmediateInvalidation(rb, txn->ninvalidations, @@ -3147,9 +3185,10 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn) txn->final_lsn = lsn; /* - * Process cache invalidation messages if there are any. Even if we're not - * interested in the transaction's contents, it could have manipulated the - * catalog and we need to update the caches according to that. + * Process only cache invalidation messages in this transaction if there + * are any. Even if we're not interested in the transaction's contents, it + * could have manipulated the catalog and we need to update the caches + * according to that. */ if (txn->base_snapshot != NULL && txn->ninvalidations > 0) ReorderBufferImmediateInvalidation(rb, txn->ninvalidations, @@ -3421,6 +3460,57 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, txn->ntuplecids++; } +/* + * Add new invalidation messages to the reorder buffer queue. + */ +static void +ReorderBufferQueueInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs) +{ + ReorderBufferChange *change; + + change = ReorderBufferAllocChange(rb); + change->action = REORDER_BUFFER_CHANGE_INVALIDATION; + change->data.inval.ninvalidations = nmsgs; + change->data.inval.invalidations = (SharedInvalidationMessage *) + palloc(sizeof(SharedInvalidationMessage) * nmsgs); + memcpy(change->data.inval.invalidations, msgs, + sizeof(SharedInvalidationMessage) * nmsgs); + + ReorderBufferQueueChange(rb, xid, lsn, change, false); +} + +/* + * A helper function for ReorderBufferAddInvalidations() and + * ReorderBufferAddDistributedInvalidations() to accumulate the invalidation + * messages to the **invals_out. + */ +static void +ReorderBufferAccumulateInvalidations(SharedInvalidationMessage **invals_out, + uint32 *ninvals_out, + SharedInvalidationMessage *msgs_new, + Size nmsgs_new) +{ + if (*ninvals_out == 0) + { + *ninvals_out = nmsgs_new; + *invals_out = (SharedInvalidationMessage *) + palloc(sizeof(SharedInvalidationMessage) * nmsgs_new); + memcpy(*invals_out, msgs_new, sizeof(SharedInvalidationMessage) * nmsgs_new); + } + else + { + /* Enlarge the array of inval messages */ + *invals_out = (SharedInvalidationMessage *) + repalloc(*invals_out, sizeof(SharedInvalidationMessage) * + (*ninvals_out + nmsgs_new)); + memcpy(*invals_out + *ninvals_out, msgs_new, + nmsgs_new * sizeof(SharedInvalidationMessage)); + *ninvals_out += nmsgs_new; + } +} + /* * Accumulate the invalidations for executing them later. * @@ -3441,7 +3531,6 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, { ReorderBufferTXN *txn; MemoryContext oldcontext; - ReorderBufferChange *change; txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); @@ -3456,35 +3545,76 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, Assert(nmsgs > 0); - /* Accumulate invalidations. */ - if (txn->ninvalidations == 0) - { - txn->ninvalidations = nmsgs; - txn->invalidations = (SharedInvalidationMessage *) - palloc(sizeof(SharedInvalidationMessage) * nmsgs); - memcpy(txn->invalidations, msgs, - sizeof(SharedInvalidationMessage) * nmsgs); - } - else + ReorderBufferAccumulateInvalidations(&txn->invalidations, + &txn->ninvalidations, + msgs, nmsgs); + + ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Accumulate the invalidations distributed by other committed transactions + * for executing them later. + * + * This function is similar to ReorderBufferAddInvalidations() but stores + * the given inval messages to the txn->invalidations_distributed with the + * overflow check. + * + * This needs to be called by committed transactions to distribute their + * inval messages to in-progress transactions. + */ +void +ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs) +{ + ReorderBufferTXN *txn; + MemoryContext oldcontext; + + txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); + + oldcontext = MemoryContextSwitchTo(rb->context); + + /* + * Collect all the invalidations under the top transaction, if available, + * so that we can execute them all together. See comments + * ReorderBufferAddInvalidations. + */ + txn = rbtxn_get_toptxn(txn); + + Assert(nmsgs > 0); + + if (!rbtxn_distr_inval_overflowed(txn)) { - txn->invalidations = (SharedInvalidationMessage *) - repalloc(txn->invalidations, sizeof(SharedInvalidationMessage) * - (txn->ninvalidations + nmsgs)); + /* + * Check the transaction has enough space for storing distributed + * invalidation messages. + */ + if (txn->ninvalidations_distributed + nmsgs >= MAX_DISTR_INVAL_MSG_PER_TXN) + { + /* + * Mark the invalidation message as overflowed and free up the + * messages accumulated so far. + */ + txn->txn_flags |= RBTXN_DISTR_INVAL_OVERFLOWED; - memcpy(txn->invalidations + txn->ninvalidations, msgs, - nmsgs * sizeof(SharedInvalidationMessage)); - txn->ninvalidations += nmsgs; + if (txn->invalidations_distributed) + { + pfree(txn->invalidations_distributed); + txn->invalidations_distributed = NULL; + txn->ninvalidations_distributed = 0; + } + } + else + ReorderBufferAccumulateInvalidations(&txn->invalidations_distributed, + &txn->ninvalidations_distributed, + msgs, nmsgs); } - change = ReorderBufferAllocChange(rb); - change->action = REORDER_BUFFER_CHANGE_INVALIDATION; - change->data.inval.ninvalidations = nmsgs; - change->data.inval.invalidations = (SharedInvalidationMessage *) - palloc(sizeof(SharedInvalidationMessage) * nmsgs); - memcpy(change->data.inval.invalidations, msgs, - sizeof(SharedInvalidationMessage) * nmsgs); - - ReorderBufferQueueChange(rb, xid, lsn, change, false); + /* Queue the invalidation messages into the transaction */ + ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs); MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c index 656e66e0ae0a1..f1dcbebfa1ae7 100644 --- a/src/backend/replication/logical/slotsync.c +++ b/src/backend/replication/logical/slotsync.c @@ -211,9 +211,9 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid, * impact the users, so we used DEBUG1 level to log the message. */ ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1, - errmsg("could not synchronize replication slot \"%s\" because remote slot precedes local slot", + errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("The remote slot has LSN %X/%X and catalog xmin %u, but the local slot has LSN %X/%X and catalog xmin %u.", + errdetail("Synchronization could lead to data loss as the remote slot needs WAL at LSN %X/%X and catalog xmin %u, but the standby has LSN %X/%X and catalog xmin %u.", LSN_FORMAT_ARGS(remote_slot->restart_lsn), remote_slot->catalog_xmin, LSN_FORMAT_ARGS(slot->data.restart_lsn), @@ -593,7 +593,7 @@ update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid) { ereport(LOG, errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("Logical decoding could not find consistent point from local slot's LSN %X/%X.", + errdetail("Synchronization could lead to data loss as standby could not build a consistent snapshot to decode WALs at LSN %X/%X.", LSN_FORMAT_ARGS(slot->data.restart_lsn))); return false; diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 0d7bddbe4ed4e..adf18c397db43 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -794,6 +794,13 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact * contents built by the current transaction even after its decoding, * which should have been invalidated due to concurrent catalog * changing transaction. + * + * Distribute only the invalidation messages generated by the current + * committed transaction. Invalidation messages received from other + * transactions would have already been propagated to the relevant + * in-progress transactions. This transaction would have processed + * those invalidations, ensuring that subsequent transactions observe + * a consistent cache state. */ if (txn->xid != xid) { @@ -807,8 +814,9 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact { Assert(msgs != NULL); - ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn, - ninvalidations, msgs); + ReorderBufferAddDistributedInvalidations(builder->reorder, + txn->xid, lsn, + ninvalidations, msgs); } } } diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c index 6c6c0a908e21f..3643f27ad6e1b 100644 --- a/src/backend/storage/aio/aio.c +++ b/src/backend/storage/aio/aio.c @@ -556,6 +556,13 @@ bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state) { *state = ioh->state; + + /* + * Ensure that we don't see an earlier state of the handle than ioh->state + * due to compiler or CPU reordering. This protects both ->generation as + * directly used here, and other fields in the handle accessed in the + * caller if the handle was not reused. + */ pg_read_barrier(); return ioh->generation != ref_generation; @@ -773,7 +780,12 @@ pgaio_io_wait_for_free(void) * Note that no interrupts are processed between the state check * and the call to reclaim - that's important as otherwise an * interrupt could have already reclaimed the handle. + * + * Need to ensure that there's no reordering, in the more common + * paths, where we wait for IO, that's done by + * pgaio_io_was_recycled(). */ + pg_read_barrier(); pgaio_io_reclaim(ioh); reclaimed++; } @@ -852,7 +864,12 @@ pgaio_io_wait_for_free(void) * check and the call to reclaim - that's important as * otherwise an interrupt could have already reclaimed the * handle. + * + * Need to ensure that there's no reordering, in the more + * common paths, where we wait for IO, that's done by + * pgaio_io_was_recycled(). */ + pg_read_barrier(); pgaio_io_reclaim(ioh); break; } diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c index 0ad9795bb7e0c..03c9bba080267 100644 --- a/src/backend/storage/aio/aio_callback.c +++ b/src/backend/storage/aio/aio_callback.c @@ -256,6 +256,9 @@ pgaio_io_call_complete_shared(PgAioHandle *ioh) pgaio_result_status_string(result.status), result.id, result.error_data, result.result); result = ce->cb->complete_shared(ioh, result, cb_data); + + /* the callback should never transition to unknown */ + Assert(result.status != PGAIO_RS_UNKNOWN); } ioh->distilled_result = result; @@ -290,6 +293,7 @@ pgaio_io_call_complete_local(PgAioHandle *ioh) /* start with distilled result from shared callback */ result = ioh->distilled_result; + Assert(result.status != PGAIO_RS_UNKNOWN); for (int i = ioh->num_callbacks; i > 0; i--) { @@ -306,6 +310,9 @@ pgaio_io_call_complete_local(PgAioHandle *ioh) pgaio_result_status_string(result.status), result.id, result.error_data, result.result); result = ce->cb->complete_local(ioh, result, cb_data); + + /* the callback should never transition to unknown */ + Assert(result.status != PGAIO_RS_UNKNOWN); } /* diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c index 743cccc2acd18..36be179678d7a 100644 --- a/src/backend/storage/aio/method_worker.c +++ b/src/backend/storage/aio/method_worker.c @@ -461,7 +461,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) int nwakeups = 0; int worker; - /* Try to get a job to do. */ + /* + * Try to get a job to do. + * + * The lwlock acquisition also provides the necessary memory barrier + * to ensure that we don't see an outdated data in the handle. + */ LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE); if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX) { diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 6d20ae07ae7b0..ba66a9c4ce63a 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -4065,8 +4065,8 @@ float84ge(PG_FUNCTION_ARGS) * in the histogram. width_bucket() returns an integer indicating the * bucket number that 'operand' belongs to in an equiwidth histogram * with the specified characteristics. An operand smaller than the - * lower bound is assigned to bucket 0. An operand greater than the - * upper bound is assigned to an additional bucket (with number + * lower bound is assigned to bucket 0. An operand greater than or equal + * to the upper bound is assigned to an additional bucket (with number * count+1). We don't allow "NaN" for any of the float8 inputs, and we * don't allow either of the histogram bounds to be +/- infinity. */ diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index 396c2f223b4e1..fe6dce9cba3ec 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -38,7 +38,7 @@ typedef struct MemoryContextId { MemoryContext context; int context_id; -} MemoryContextId; +} MemoryContextId; /* * int_list_to_array diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 40dcbc7b6710b..58ad1a65ef7b1 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -1958,9 +1958,10 @@ generate_series_numeric_support(PG_FUNCTION_ARGS) * in the histogram. width_bucket() returns an integer indicating the * bucket number that 'operand' belongs to in an equiwidth histogram * with the specified characteristics. An operand smaller than the - * lower bound is assigned to bucket 0. An operand greater than the - * upper bound is assigned to an additional bucket (with number - * count+1). We don't allow "NaN" for any of the numeric arguments. + * lower bound is assigned to bucket 0. An operand greater than or equal + * to the upper bound is assigned to an additional bucket (with number + * count+1). We don't allow "NaN" for any of the numeric inputs, and we + * don't allow either of the histogram bounds to be +/- infinity. */ Datum width_bucket_numeric(PG_FUNCTION_ARGS) diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c index f65acc7cb1141..c43c0cbbba5a6 100644 --- a/src/bin/pg_basebackup/pg_createsubscriber.c +++ b/src/bin/pg_basebackup/pg_createsubscriber.c @@ -247,14 +247,14 @@ usage(void) printf(_(" %s [OPTION]...\n"), progname); printf(_("\nOptions:\n")); printf(_(" -a, --all create subscriptions for all databases except template\n" - " databases or databases that don't allow connections\n")); + " databases and databases that don't allow connections\n")); printf(_(" -d, --database=DBNAME database in which to create a subscription\n")); printf(_(" -D, --pgdata=DATADIR location for the subscriber data directory\n")); printf(_(" -n, --dry-run dry run, just show what would be done\n")); printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT); printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n")); printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n" - " databases on the subscriber; accepts: publications\n")); + " databases on the subscriber; accepts: \"%s\"\n"), "publications"); printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n")); printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n")); printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n")); @@ -973,7 +973,7 @@ check_publisher(const struct LogicalRepInfo *dbinfo) pg_log_warning("two_phase option will not be enabled for replication slots"); pg_log_warning_detail("Subscriptions will be created with the two_phase option disabled. " "Prepared transactions will be replicated at COMMIT PREPARED."); - pg_log_warning_hint("You can use --enable-two-phase switch to enable two_phase."); + pg_log_warning_hint("You can use the command-line option --enable-two-phase to enable two_phase."); } /* @@ -2143,7 +2143,7 @@ main(int argc, char **argv) if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg)) simple_string_list_append(&opt.objecttypes_to_remove, optarg); else - pg_fatal("object type \"%s\" is specified more than once for -R/--remove", optarg); + pg_fatal("object type \"%s\" specified more than once for -R/--remove", optarg); break; case 's': opt.socket_dir = pg_strdup(optarg); @@ -2214,7 +2214,7 @@ main(int argc, char **argv) if (bad_switch) { - pg_log_error("%s cannot be used with -a/--all", bad_switch); + pg_log_error("options %s and -a/--all cannot be used together", bad_switch); pg_log_error_hint("Try \"%s --help\" for more information.", progname); exit(1); } @@ -2341,7 +2341,7 @@ main(int argc, char **argv) else { pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val); - pg_log_error_hint("The valid option is: \"publications\""); + pg_log_error_hint("The valid value is: \"%s\"", "publications"); exit(1); } } diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index e6810efe5f0d7..4b4b545917d7d 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -91,7 +91,7 @@ usage(void) printf(_("\nOptions:\n")); printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n")); printf(_(" --failover enable replication slot synchronization to standby servers when\n" - " creating a slot\n")); + " creating a replication slot\n")); printf(_(" -f, --file=FILE receive log into this file, - for stdout\n")); printf(_(" -F --fsync-interval=SECS\n" " time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000)); diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl index 2d532fee567dd..df4924023fdf2 100644 --- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl +++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl @@ -399,7 +399,7 @@ sub generate_db '--database' => $db1, '--all', ], - qr/--database cannot be used with -a\/--all/, + qr/options --database and -a\/--all cannot be used together/, 'fail if --database is used with --all'); # run pg_createsubscriber with '--publication' and '--all' and verify @@ -416,7 +416,7 @@ sub generate_db '--all', '--publication' => 'pub1', ], - qr/--publication cannot be used with -a\/--all/, + qr/options --publication and -a\/--all cannot be used together/, 'fail if --publication is used with --all'); # run pg_createsubscriber with '--all' option diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 37432e66efd7c..db944ec223071 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1235,7 +1235,7 @@ main(int argc, char **argv) static void help(const char *progname) { - printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname); + printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]... [DBNAME]\n"), progname); @@ -6890,7 +6890,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, (relkind == RELKIND_PARTITIONED_TABLE) || (relkind == RELKIND_INDEX) || (relkind == RELKIND_PARTITIONED_INDEX) || - (relkind == RELKIND_MATVIEW)) + (relkind == RELKIND_MATVIEW || + relkind == RELKIND_FOREIGN_TABLE)) { RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo)); DumpableObject *dobj = &info->dobj; @@ -6929,6 +6930,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, case RELKIND_RELATION: case RELKIND_PARTITIONED_TABLE: case RELKIND_MATVIEW: + case RELKIND_FOREIGN_TABLE: info->section = SECTION_DATA; break; case RELKIND_INDEX: @@ -6936,7 +6938,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, info->section = SECTION_POST_DATA; break; default: - pg_fatal("cannot dump statistics for relation kind '%c'", + pg_fatal("cannot dump statistics for relation kind \"%c\"", info->relkind); } @@ -9461,7 +9463,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) int i_consrc; int i_conislocal; - pg_log_info("finding invalid not null constraints"); + pg_log_info("finding invalid not-null constraints"); resetPQExpBuffer(q); appendPQExpBuffer(q, @@ -10855,7 +10857,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te) expected_te = expected_te->next; if (te != expected_te) - pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)", + pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)", te->dumpId, te->desc, te->tag, expected_te->dumpId, expected_te->desc, expected_te->tag); @@ -10996,7 +10998,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te) appendStringLiteralAH(out, rsinfo->dobj.name, fout); if (PQgetisnull(res, rownum, i_attname)) - pg_fatal("attname cannot be NULL"); + pg_fatal("unexpected null attname"); attname = PQgetvalue(res, rownum, i_attname); /* diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 7f9c302b719ec..3cbcad65c5fb5 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -525,7 +525,7 @@ main(int argc, char *argv[]) OPF = fopen(global_path, PG_BINARY_W); if (!OPF) - pg_fatal("could not open \"%s\": %m", global_path); + pg_fatal("could not open file \"%s\": %m", global_path); } else if (filename) { @@ -699,7 +699,7 @@ main(int argc, char *argv[]) static void help(void) { - printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname); + printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]...\n"), progname); @@ -1659,14 +1659,14 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) /* Create a subdirectory with 'databases' name under main directory. */ if (mkdir(db_subdir, pg_dir_create_mode) != 0) - pg_fatal("could not create subdirectory \"%s\": %m", db_subdir); + pg_fatal("could not create directory \"%s\": %m", db_subdir); snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename); /* Create a map file (to store dboid and dbname) */ map_file = fopen(map_file_path, PG_BINARY_W); if (!map_file) - pg_fatal("could not open map file: %s", strerror(errno)); + pg_fatal("could not open file \"%s\": %m", map_file_path); } for (i = 0; i < PQntuples(res); i++) @@ -1976,7 +1976,7 @@ parseDumpFormat(const char *format) else if (pg_strcasecmp(format, "tar") == 0) archDumpFormat = archTar; else - pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", + pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", format); return archDumpFormat; diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index c4b6214d618cd..6ef789cb06d63 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -523,7 +523,7 @@ main(int argc, char **argv) */ if (!globals_only && opts->createDB != 1) { - pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall"); + pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall"); pg_log_error_hint("Try \"%s --help\" for more information.", progname); pg_log_error_hint("Individual databases can be restored using their specific archives."); exit_nicely(1); @@ -557,7 +557,7 @@ main(int argc, char **argv) if (conn) PQfinish(conn); - pg_log_info("database restoring skipped as -g/--globals-only option was specified"); + pg_log_info("database restoring skipped because option -g/--globals-only was specified"); } else { @@ -725,8 +725,8 @@ usage(const char *progname) printf(_(" --role=ROLENAME do SET ROLE before restore\n")); printf(_("\n" - "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n" - "and specified multiple times to select multiple objects.\n")); + "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n" + "combined and specified multiple times to select multiple objects.\n")); printf(_("\nIf no input file name is supplied, then standard input is used.\n\n")); printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT); printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL); @@ -946,7 +946,7 @@ get_dbnames_list_to_restore(PGconn *conn, query = createPQExpBuffer(); if (!conn) - pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore."); + pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore"); /* * Process one by one all dbnames and if specified to skip restoring, then @@ -992,7 +992,7 @@ get_dbnames_list_to_restore(PGconn *conn, if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res)) { skip_db_restore = true; - pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val); + pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val); } PQclear(res); @@ -1048,7 +1048,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi */ if (!file_exists_in_directory(dumpdirpath, "map.dat")) { - pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath); + pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath); return 0; } @@ -1058,7 +1058,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi pfile = fopen(map_file_path, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open \"%s\": %m", map_file_path); + pg_fatal("could not open file \"%s\": %m", map_file_path); initStringInfo(&linebuf); @@ -1086,10 +1086,10 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi /* Report error and exit if the file has any corrupted data. */ if (!OidIsValid(db_oid) || namelen <= 1) - pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path, + pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path, count + 1); - pg_log_info("found database \"%s\" (OID: %u) in \"%s\"", + pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"", dbname, db_oid, map_file_path); dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1); @@ -1142,11 +1142,14 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, if (dbname_oid_list.head == NULL) return process_global_sql_commands(conn, dumpdirpath, opts->filename); - pg_log_info("found %d database names in \"map.dat\"", num_total_db); + pg_log_info(ngettext("found %d database name in \"%s\"", + "found %d database names in \"%s\"", + num_total_db), + num_total_db, "map.dat"); if (!conn) { - pg_log_info("trying to connect database \"postgres\""); + pg_log_info("trying to connect to database \"%s\"", "postgres"); conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost, opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, @@ -1155,7 +1158,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, /* Try with template1. */ if (!conn) { - pg_log_info("trying to connect database \"template1\""); + pg_log_info("trying to connect to database \"%s\"", "template1"); conn = ConnectDatabase("template1", NULL, opts->cparams.pghost, opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, @@ -1179,7 +1182,9 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, /* Exit if no db needs to be restored. */ if (dbname_oid_list.head == NULL || num_db_restore == 0) { - pg_log_info("no database needs to restore out of %d databases", num_total_db); + pg_log_info(ngettext("no database needs restoring out of %d database", + "no database needs restoring out of %d databases", num_total_db), + num_total_db); return n_errors_total; } @@ -1314,7 +1319,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o pfile = fopen(global_file_path, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open \"%s\": %m", global_file_path); + pg_fatal("could not open file \"%s\": %m", global_file_path); /* * If outfile is given, then just copy all global.dat file data into @@ -1354,15 +1359,17 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o break; default: n_errors++; - pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data); + pg_log_error("could not execute query: %s", PQerrorMessage(conn)); + pg_log_error_detail("Command was: %s", sqlstatement.data); } PQclear(result); } /* Print a summary of ignored errors during global.dat. */ if (n_errors) - pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path); - + pg_log_warning(ngettext("ignored %d error in file \"%s\"", + "ignored %d errors in file \"%s\"", n_errors), + n_errors, global_file_path); fclose(pfile); return n_errors; diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index 84ca25e17d636..0be9f6dd538fd 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -261,6 +261,6 @@ command_fails_like( [ 'pg_dumpall', '--format', 'x' ], - qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/, - 'pg_dumpall: unrecognized archive format'); + qr/\Qpg_dumpall: error: unrecognized output format "x";\E/, + 'pg_dumpall: unrecognized output format'); done_testing(); diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl index 5acd49f1559d2..0ea02a3a4a940 100644 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ b/src/bin/pg_dump/t/006_pg_dumpall.pl @@ -365,7 +365,7 @@ "$tempdir/format_custom", '--format' => 'custom', '--file' => "$tempdir/error_test.sql", ], - qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/, + qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, 'When -C is not used in pg_restore with dump of pg_dumpall'); # test case 2: When --list option is used with dump of pg_dumpall diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 940fc77fc2e8c..81865cd3e4859 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -885,7 +885,7 @@ check_cluster_versions(void) */ if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 && user_opts.char_signedness != -1) - pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.", + pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.", "--set-char-signedness", "18"); check_ok(); @@ -1934,7 +1934,7 @@ check_for_unicode_update(ClusterInfo *cluster) { fclose(report.file); report_status(PG_WARNING, "warning"); - pg_log(PG_WARNING, "Your installation contains relations that may be affected by a new version of Unicode.\n" + pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n" "A list of potentially-affected relations is in the file:\n" " %s", report.path); } diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c index 2959c07f0b8d1..8d8e816a01fa4 100644 --- a/src/bin/pg_upgrade/relfilenumber.c +++ b/src/bin/pg_upgrade/relfilenumber.c @@ -290,19 +290,19 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid, /* Create directory for stuff that is moved aside. */ if (pg_mkdir_p(moved_tblspc, pg_dir_create_mode) != 0 && errno != EEXIST) - pg_fatal("could not create directory \"%s\"", moved_tblspc); + pg_fatal("could not create directory \"%s\": %m", moved_tblspc); /* Create directory for old catalog files. */ if (pg_mkdir_p(old_catalog_dir, pg_dir_create_mode) != 0) - pg_fatal("could not create directory \"%s\"", old_catalog_dir); + pg_fatal("could not create directory \"%s\": %m", old_catalog_dir); /* Move the new cluster's database directory aside. */ if (rename(new_db_dir, moved_db_dir) != 0) - pg_fatal("could not rename \"%s\" to \"%s\"", new_db_dir, moved_db_dir); + pg_fatal("could not rename directory \"%s\" to \"%s\": %m", new_db_dir, moved_db_dir); /* Move the old cluster's database directory into place. */ if (rename(old_db_dir, new_db_dir) != 0) - pg_fatal("could not rename \"%s\" to \"%s\"", old_db_dir, new_db_dir); + pg_fatal("could not rename directory \"%s\" to \"%s\": %m", old_db_dir, new_db_dir); return true; } @@ -390,7 +390,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, snprintf(dest, sizeof(dest), "%s/%s", old_catalog_dir, de->d_name); if (rename(path, dest) != 0) - pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest); + pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest); } if (errno) pg_fatal("could not read directory \"%s\": %m", new_db_dir); @@ -417,7 +417,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, snprintf(dest, sizeof(dest), "%s/%s", new_db_dir, de->d_name); if (rename(path, dest) != 0) - pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest); + pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest); /* * We don't fsync() the database files in the file synchronization diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl index 17fa0d48b15c1..cd8cff6f5132d 100644 --- a/src/bin/pg_upgrade/t/005_char_signedness.pl +++ b/src/bin/pg_upgrade/t/005_char_signedness.pl @@ -65,7 +65,7 @@ $mode ], 1, - [qr/--set-char-signedness option cannot be used/], + [qr/option --set-char-signedness cannot be used/], [], '--set-char-signedness option cannot be used for upgrading from v18 or later' ); diff --git a/src/bin/pg_upgrade/task.c b/src/bin/pg_upgrade/task.c index a48d56913908d..ee0e245715215 100644 --- a/src/bin/pg_upgrade/task.c +++ b/src/bin/pg_upgrade/task.c @@ -192,8 +192,7 @@ start_conn(const ClusterInfo *cluster, UpgradeTaskSlot *slot) slot->conn = PQconnectStart(conn_opts.data); if (!slot->conn) - pg_fatal("failed to create connection with connection string: \"%s\"", - conn_opts.data); + pg_fatal("out of memory"); termPQExpBuffer(&conn_opts); } @@ -402,7 +401,7 @@ wait_on_slots(UpgradeTaskSlot *slots, int numslots) * If we found socket(s) to wait on, wait. */ if (select_loop(maxFd, &input, &output) == -1) - pg_fatal("select() failed: %m"); + pg_fatal("%s() failed: %m", "select"); /* * Mark which sockets appear to be ready. diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 1f7635d0c235c..83e84a778411a 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -877,11 +877,11 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) printTableAddCell(&cont, _("Backend PID"), false, false); printTableAddCell(&cont, backend_pid, false, false); - /* TLS Connection */ - printTableAddCell(&cont, _("TLS Connection"), false, false); + /* SSL Connection */ + printTableAddCell(&cont, _("SSL Connection"), false, false); printTableAddCell(&cont, ssl_in_use ? _("true") : _("false"), false, false); - /* TLS Information */ + /* SSL Information */ if (ssl_in_use) { char *library, @@ -898,19 +898,19 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) compression = (char *) PQsslAttribute(pset.db, "compression"); alpn = (char *) PQsslAttribute(pset.db, "alpn"); - printTableAddCell(&cont, _("TLS Library"), false, false); + printTableAddCell(&cont, _("SSL Library"), false, false); printTableAddCell(&cont, library ? library : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Protocol"), false, false); + printTableAddCell(&cont, _("SSL Protocol"), false, false); printTableAddCell(&cont, protocol ? protocol : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Key Bits"), false, false); + printTableAddCell(&cont, _("SSL Key Bits"), false, false); printTableAddCell(&cont, key_bits ? key_bits : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Cipher"), false, false); + printTableAddCell(&cont, _("SSL Cipher"), false, false); printTableAddCell(&cont, cipher ? cipher : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Compression"), false, false); + printTableAddCell(&cont, _("SSL Compression"), false, false); printTableAddCell(&cont, (compression && strcmp(compression, "off") != 0) ? _("true") : _("false"), false, false); @@ -1949,7 +1949,7 @@ exec_command_gexec(PsqlScanState scan_state, bool active_branch) { if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\gexec not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "gexec"); clean_extended_state(); return PSQL_CMD_ERROR; } @@ -1975,7 +1975,7 @@ exec_command_gset(PsqlScanState scan_state, bool active_branch) if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\gset not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "gset"); clean_extended_state(); return PSQL_CMD_ERROR; } @@ -3287,7 +3287,7 @@ exec_command_watch(PsqlScanState scan_state, bool active_branch, if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\watch not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "watch"); clean_extended_state(); success = false; } diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 24e0100c9f0a8..dd25d2fe7b8a7 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -296,6 +296,7 @@ describeFunctions(const char *functypes, const char *func_pattern, char **arg_patterns, int num_arg_patterns, bool verbose, bool showSystem) { + const char *df_options = "anptwSx+"; bool showAggregate = strchr(functypes, 'a') != NULL; bool showNormal = strchr(functypes, 'n') != NULL; bool showProcedure = strchr(functypes, 'p') != NULL; @@ -310,9 +311,9 @@ describeFunctions(const char *functypes, const char *func_pattern, /* No "Parallel" column before 9.6 */ static const bool translate_columns_pre_96[] = {false, false, false, false, true, true, false, true, true, false, false, false, false}; - if (strlen(functypes) != strspn(functypes, "anptwSx+")) + if (strlen(functypes) != strspn(functypes, df_options)) { - pg_log_error("\\df only takes [anptwSx+] as options"); + pg_log_error("\\df only takes [%s] as options", df_options); return true; } diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index ce05b3a513255..db6adec8b692b 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -252,7 +252,8 @@ slashUsage(unsigned short int pager) HELP0(" \\dO[Sx+] [PATTERN] list collations\n"); HELP0(" \\dp[Sx] [PATTERN] list table, view, and sequence access privileges\n"); HELP0(" \\dP[itnx+] [PATTERN] list [only index/table] partitioned relations [n=nested]\n"); - HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]] list per-database role settings\n"); + HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]]\n" + " list per-database role settings\n"); HELP0(" \\drg[Sx] [PATTERN] list role grants\n"); HELP0(" \\dRp[x+] [PATTERN] list replication publications\n"); HELP0(" \\dRs[x+] [PATTERN] list replication subscriptions\n"); @@ -334,8 +335,7 @@ slashUsage(unsigned short int pager) HELP0(" \\endpipeline exit pipeline mode\n"); HELP0(" \\flush flush output data to the server\n"); HELP0(" \\flushrequest send request to the server to flush its output buffer\n"); - HELP0(" \\getresults [NUM_RES] read NUM_RES pending results. All pending results are\n" - " read if no argument is provided\n"); + HELP0(" \\getresults [NUM_RES] read NUM_RES pending results, or all if no argument\n"); HELP0(" \\parse STMT_NAME create a prepared statement\n"); HELP0(" \\sendpipeline send an extended query to an ongoing pipeline\n"); HELP0(" \\startpipeline enter pipeline mode\n"); diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c index ae2d0e5ed3f47..6b64302ebca86 100644 --- a/src/bin/psql/variables.c +++ b/src/bin/psql/variables.c @@ -204,7 +204,7 @@ ParseVariableDouble(const char *value, const char *name, double *result, double if ((value == NULL) || (*value == '\0')) { if (name) - pg_log_error("invalid input syntax for \"%s\"", name); + pg_log_error("invalid input syntax for variable \"%s\"", name); return false; } @@ -215,14 +215,14 @@ ParseVariableDouble(const char *value, const char *name, double *result, double if (dblval < min) { if (name) - pg_log_error("invalid value \"%s\" for \"%s\": must be greater than %.2f", + pg_log_error("invalid value \"%s\" for variable \"%s\": must be greater than %.2f", value, name, min); return false; } else if (dblval > max) { if (name) - pg_log_error("invalid value \"%s\" for \"%s\": must be less than %.2f", + pg_log_error("invalid value \"%s\" for variable \"%s\": must be less than %.2f", value, name, max); } *result = dblval; @@ -238,13 +238,13 @@ ParseVariableDouble(const char *value, const char *name, double *result, double (dblval == 0.0 || dblval >= HUGE_VAL || dblval <= -HUGE_VAL)) { if (name) - pg_log_error("\"%s\" is out of range for \"%s\"", value, name); + pg_log_error("value \"%s\" is out of range for variable \"%s\"", value, name); return false; } else { if (name) - pg_log_error("invalid value \"%s\" for \"%s\"", value, name); + pg_log_error("invalid value \"%s\" for variable \"%s\"", value, name); return false; } } diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index e48fe434cd393..3a9424c19c9ae 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -96,7 +96,7 @@ typedef struct HeapScanDescData uint32 rs_cindex; /* current tuple's index in vistuples */ uint32 rs_ntuples; /* number of visible tuples on page */ OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */ -} HeapScanDescData; +} HeapScanDescData; typedef struct HeapScanDescData *HeapScanDesc; typedef struct BitmapHeapScanDescData diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h index 34b82d0f5d17d..6c4891bbaeb49 100644 --- a/src/include/executor/nodeAgg.h +++ b/src/include/executor/nodeAgg.h @@ -264,7 +264,7 @@ typedef struct AggStatePerGroupData * NULL and not auto-replace it with a later input value. Only the first * non-NULL input will be auto-substituted. */ -} AggStatePerGroupData; +} AggStatePerGroupData; /* * AggStatePerPhaseData - per-grouping-set-phase state diff --git a/src/include/optimizer/paramassign.h b/src/include/optimizer/paramassign.h index 59dcb1ff05399..d30d20de29922 100644 --- a/src/include/optimizer/paramassign.h +++ b/src/include/optimizer/paramassign.h @@ -30,7 +30,7 @@ extern Param *replace_nestloop_param_placeholdervar(PlannerInfo *root, extern void process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params); extern List *identify_current_nestloop_params(PlannerInfo *root, - Relids leftrelids); + Path *leftpath); extern Param *generate_new_exec_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, Oid paramcollation); extern int assign_special_exec_param(PlannerInfo *root); diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h index a48c972179712..8410531f2d640 100644 --- a/src/include/optimizer/paths.h +++ b/src/include/optimizer/paths.h @@ -109,8 +109,6 @@ extern Relids add_outer_joins_to_relids(PlannerInfo *root, Relids input_relids, List **pushed_down_joins); extern bool have_join_order_restriction(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2); -extern bool have_dangerous_phv(PlannerInfo *root, - Relids outer_relids, Relids inner_params); extern void mark_dummy_rel(RelOptInfo *rel); extern void init_dummy_sjinfo(SpecialJoinInfo *sjinfo, Relids left_relids, Relids right_relids); diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 24e88c409ba7e..fa0745552f86c 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -176,6 +176,7 @@ typedef struct ReorderBufferChange #define RBTXN_SENT_PREPARE 0x0200 #define RBTXN_IS_COMMITTED 0x0400 #define RBTXN_IS_ABORTED 0x0800 +#define RBTXN_DISTR_INVAL_OVERFLOWED 0x1000 #define RBTXN_PREPARE_STATUS_MASK (RBTXN_IS_PREPARED | RBTXN_SKIPPED_PREPARE | RBTXN_SENT_PREPARE) @@ -265,6 +266,12 @@ typedef struct ReorderBufferChange ((txn)->txn_flags & RBTXN_SKIPPED_PREPARE) != 0 \ ) +/* Is the array of distributed inval messages overflowed? */ +#define rbtxn_distr_inval_overflowed(txn) \ +( \ + ((txn)->txn_flags & RBTXN_DISTR_INVAL_OVERFLOWED) != 0 \ +) + /* Is this a top-level transaction? */ #define rbtxn_is_toptxn(txn) \ ( \ @@ -422,6 +429,12 @@ typedef struct ReorderBufferTXN uint32 ninvalidations; SharedInvalidationMessage *invalidations; + /* + * Stores cache invalidation messages distributed by other transactions. + */ + uint32 ninvalidations_distributed; + SharedInvalidationMessage *invalidations_distributed; + /* --- * Position in one of two lists: * * list of subtransactions if we are *known* to be subxact @@ -738,6 +751,9 @@ extern void ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, CommandId cmin, CommandId cmax, CommandId combocid); extern void ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn, Size nmsgs, SharedInvalidationMessage *msgs); +extern void ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs); extern void ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations, SharedInvalidationMessage *invalidations); extern void ReorderBufferProcessXid(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn); diff --git a/src/include/storage/aio.h b/src/include/storage/aio.h index f3726bc3dc511..e7a0a234b6cf2 100644 --- a/src/include/storage/aio.h +++ b/src/include/storage/aio.h @@ -36,7 +36,7 @@ typedef enum IoMethod #ifdef IOMETHOD_IO_URING_ENABLED IOMETHOD_IO_URING, #endif -} IoMethod; +} IoMethod; /* We'll default to worker based execution. */ #define DEFAULT_IO_METHOD IOMETHOD_WORKER diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 940d74462d129..f1d7beeed1a3d 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -17,7 +17,7 @@ typedef enum FileCopyMethod { FILE_COPY_METHOD_COPY, FILE_COPY_METHOD_CLONE, -} FileCopyMethod; +} FileCopyMethod; /* GUC parameters */ extern PGDLLIMPORT int file_copy_method; diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h index 5dc5aafe5c9ff..845a5851b574e 100644 --- a/src/include/storage/sinval.h +++ b/src/include/storage/sinval.h @@ -119,7 +119,7 @@ typedef struct Oid dbId; /* database ID */ Oid relid; /* relation ID, or 0 if whole * RelationSyncCache */ -} SharedInvalRelSyncMsg; +} SharedInvalRelSyncMsg; typedef union { diff --git a/src/include/tcop/backend_startup.h b/src/include/tcop/backend_startup.h index dcb9d056643f2..e8639688c00bc 100644 --- a/src/include/tcop/backend_startup.h +++ b/src/include/tcop/backend_startup.h @@ -86,7 +86,7 @@ typedef enum LogConnectionOption LOG_CONNECTION_AUTHENTICATION | LOG_CONNECTION_AUTHORIZATION | LOG_CONNECTION_SETUP_DURATIONS, -} LogConnectionOption; +} LogConnectionOption; /* * A collection of timings of various stages of connection establishment and diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h index 5eac0e16970c3..675f4f5f4694d 100644 --- a/src/include/utils/elog.h +++ b/src/include/utils/elog.h @@ -485,7 +485,7 @@ typedef enum PGERROR_TERSE, /* single-line error messages */ PGERROR_DEFAULT, /* recommended style */ PGERROR_VERBOSE, /* all the facts, ma'am */ -} PGErrorVerbosity; +} PGErrorVerbosity; extern PGDLLIMPORT int Log_error_verbosity; extern PGDLLIMPORT char *Log_line_prefix; diff --git a/src/include/utils/skipsupport.h b/src/include/utils/skipsupport.h index bc51847cf617a..c42be001fb546 100644 --- a/src/include/utils/skipsupport.h +++ b/src/include/utils/skipsupport.h @@ -90,7 +90,7 @@ typedef struct SkipSupportData */ SkipSupportIncDec decrement; SkipSupportIncDec increment; -} SkipSupportData; +} SkipSupportData; extern SkipSupport PrepareSkipSupportFromOpclass(Oid opfamily, Oid opcintype, bool reverse); diff --git a/src/interfaces/libpq-oauth/.gitignore b/src/interfaces/libpq-oauth/.gitignore new file mode 100644 index 0000000000000..a4afe7c1c6858 --- /dev/null +++ b/src/interfaces/libpq-oauth/.gitignore @@ -0,0 +1 @@ +/exports.list diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c index 8c7c198a53071..cd3102346bfa7 100644 --- a/src/interfaces/libpq/fe-cancel.c +++ b/src/interfaces/libpq/fe-cancel.c @@ -114,7 +114,7 @@ PQcancelCreate(PGconn *conn) if (conn->be_cancel_key != NULL) { cancelConn->be_cancel_key = malloc(conn->be_cancel_key_len); - if (!conn->be_cancel_key) + if (cancelConn->be_cancel_key == NULL) goto oom_error; memcpy(cancelConn->be_cancel_key, conn->be_cancel_key, conn->be_cancel_key_len); } diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk index 0de3737e789b4..039cee3dfe5d9 100644 --- a/src/makefiles/pgxs.mk +++ b/src/makefiles/pgxs.mk @@ -376,10 +376,7 @@ endif ifdef REGRESS # things created by various check targets rm -rf $(pg_regress_clean_files) -ifeq ($(PORTNAME), win) - rm -f regress.def endif -endif # REGRESS ifdef TAP_TESTS rm -rf tmp_check/ endif diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c index 37d7efca77ce5..cc74c4df6ba67 100644 --- a/src/pl/plpython/plpy_cursorobject.c +++ b/src/pl/plpython/plpy_cursorobject.c @@ -58,9 +58,9 @@ static PyType_Slot PLyCursor_slots[] = static PyType_Spec PLyCursor_spec = { .name = "PLyCursor", - .basicsize = sizeof(PLyCursorObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyCursor_slots, + .basicsize = sizeof(PLyCursorObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyCursor_slots, }; static PyTypeObject *PLy_CursorType; diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c index 6044893afdd13..edfb76c877020 100644 --- a/src/pl/plpython/plpy_planobject.c +++ b/src/pl/plpython/plpy_planobject.c @@ -45,9 +45,9 @@ static PyType_Slot PLyPlan_slots[] = static PyType_Spec PLyPlan_spec = { .name = "PLyPlan", - .basicsize = sizeof(PLyPlanObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyPlan_slots, + .basicsize = sizeof(PLyPlanObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyPlan_slots, }; static PyTypeObject *PLy_PlanType; diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c index 0d9997cbaa32c..d433929b36039 100644 --- a/src/pl/plpython/plpy_resultobject.c +++ b/src/pl/plpython/plpy_resultobject.c @@ -70,9 +70,9 @@ static PyType_Slot PLyResult_slots[] = static PyType_Spec PLyResult_spec = { .name = "PLyResult", - .basicsize = sizeof(PLyResultObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyResult_slots, + .basicsize = sizeof(PLyResultObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyResult_slots, }; static PyTypeObject *PLy_ResultType; diff --git a/src/pl/plpython/plpy_subxactobject.c b/src/pl/plpython/plpy_subxactobject.c index c2484a99b4ae3..c225b652ab4a5 100644 --- a/src/pl/plpython/plpy_subxactobject.c +++ b/src/pl/plpython/plpy_subxactobject.c @@ -46,9 +46,9 @@ static PyType_Slot PLySubtransaction_slots[] = static PyType_Spec PLySubtransaction_spec = { .name = "PLySubtransaction", - .basicsize = sizeof(PLySubtransactionObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLySubtransaction_slots, + .basicsize = sizeof(PLySubtransactionObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLySubtransaction_slots, }; static PyTypeObject *PLy_SubtransactionType; diff --git a/src/port/pg_crc32c_sse42_choose.c b/src/port/pg_crc32c_sse42_choose.c index 74d2421ba2be9..802e47788c10c 100644 --- a/src/port/pg_crc32c_sse42_choose.c +++ b/src/port/pg_crc32c_sse42_choose.c @@ -95,7 +95,9 @@ pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len) __cpuidex(exx, 7, 0); #endif -#ifdef USE_AVX512_CRC32C_WITH_RUNTIME_CHECK +#if defined(__clang__) && !defined(__OPTIMIZE__) + /* Some versions of clang are broken at -O0 */ +#elif defined(USE_AVX512_CRC32C_WITH_RUNTIME_CHECK) if (exx[2] & (1 << 10) && /* VPCLMULQDQ */ exx[1] & (1 << 31)) /* AVX512-VL */ pg_comp_crc32c = pg_comp_crc32c_avx512; diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c index 5cdfb89210b28..c55cf6c0aac05 100644 --- a/src/test/modules/test_aio/test_aio.c +++ b/src/test/modules/test_aio/test_aio.c @@ -42,9 +42,9 @@ typedef struct InjIoErrorState bool short_read_result_set; int short_read_result; -} InjIoErrorState; +} InjIoErrorState; -static InjIoErrorState * inj_io_error_state; +static InjIoErrorState *inj_io_error_state; /* Shared memory init callbacks */ static shmem_request_hook_type prev_shmem_request_hook = NULL; diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl index b4265c4a6a53f..d67c5108d7800 100644 --- a/src/test/recovery/t/046_checkpoint_logical_slot.pl +++ b/src/test/recovery/t/046_checkpoint_logical_slot.pl @@ -21,15 +21,18 @@ $node = PostgreSQL::Test::Cluster->new('mike'); $node->init; -$node->append_conf('postgresql.conf', - "shared_preload_libraries = 'injection_points'"); $node->append_conf('postgresql.conf', "wal_level = 'logical'"); $node->start; -$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); -# Create a simple table to generate data into. -$node->safe_psql('postgres', - q{create table t (id serial primary key, b text)}); +# Check if the extension injection_points is available, as it may be +# possible that this script is run with installcheck, where the module +# would not be installed by default. +if (!$node->check_extension('injection_points')) +{ + plan skip_all => 'Extension injection_points not installed'; +} + +$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); # Create the two slots we'll need. $node->safe_psql('postgres', @@ -58,23 +61,17 @@ \q )); -# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL. -$node->safe_psql('postgres', - q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} -); +$node->advance_wal(20); # Run another checkpoint to set a new restore LSN. $node->safe_psql('postgres', q{checkpoint}); -# Another 2M rows; that's about 260MB (~20 segments) worth of WAL. -$node->safe_psql('postgres', - q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} -); +$node->advance_wal(20); # Run another checkpoint, this time in the background, and make it wait # on the injection point) so that the checkpoint stops right before # removing old WAL segments. -note('starting checkpoint\n'); +note('starting checkpoint'); my $checkpoint = $node->background_psql('postgres'); $checkpoint->query_safe( @@ -88,7 +85,7 @@ )); # Wait until the checkpoint stops right before removing WAL segments. -note('waiting for injection_point\n'); +note('waiting for injection_point'); $node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal'); note('injection_point is reached'); @@ -107,7 +104,7 @@ )); # Wait until the slot's restart_lsn points to the next WAL segment. -note('waiting for injection_point\n'); +note('waiting for injection_point'); $node->wait_for_event('client backend', 'logical-replication-slot-advance-segment'); note('injection_point is reached'); diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl index 454e56b9bd2da..a1332b5d44cbe 100644 --- a/src/test/recovery/t/047_checkpoint_physical_slot.pl +++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl @@ -21,15 +21,18 @@ $node = PostgreSQL::Test::Cluster->new('mike'); $node->init; -$node->append_conf('postgresql.conf', - "shared_preload_libraries = 'injection_points'"); $node->append_conf('postgresql.conf', "wal_level = 'replica'"); $node->start; -$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); -# Create a simple table to generate data into. -$node->safe_psql('postgres', - q{create table t (id serial primary key, b text)}); +# Check if the extension injection_points is available, as it may be +# possible that this script is run with installcheck, where the module +# would not be installed by default. +if (!$node->check_extension('injection_points')) +{ + plan skip_all => 'Extension injection_points not installed'; +} + +$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); # Create a physical replication slot. $node->safe_psql('postgres', @@ -44,9 +47,7 @@ $node->safe_psql('postgres', q{checkpoint}); # Insert 2M rows; that's about 260MB (~20 segments) worth of WAL. -$node->safe_psql('postgres', - q{insert into t (b) select md5(i::text) from generate_series(1,100000) s(i)} -); +$node->advance_wal(20); # Advance slot to the current position, just to have everything "valid". $node->safe_psql('postgres', @@ -57,9 +58,7 @@ $node->safe_psql('postgres', q{checkpoint}); # Another 2M rows; that's about 260MB (~20 segments) worth of WAL. -$node->safe_psql('postgres', - q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} -); +$node->advance_wal(20); my $restart_lsn_init = $node->safe_psql('postgres', q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'} diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index f35a0b18c37cb..c292f04fdbaab 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -3946,6 +3946,59 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; (1 row) -- variant that isn't quite a star-schema case +explain (verbose, costs off) +select ss1.d1 from + tenk1 as t1 + inner join tenk1 as t2 + on t1.tenthous = t2.ten + inner join + int8_tbl as i8 + left join int4_tbl as i4 + inner join (select 64::information_schema.cardinal_number as d1 + from tenk1 t3, + lateral (select abs(t3.unique1) + random()) ss0(x) + where t3.fivethous < 0) as ss1 + on i4.f1 = ss1.d1 + on i8.q1 = i4.f1 + on t1.tenthous = ss1.d1 +where t1.unique1 < i4.f1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: (64)::information_schema.cardinal_number + Join Filter: (t1.tenthous = ((64)::information_schema.cardinal_number)::integer) + -> Seq Scan on public.tenk1 t3 + Output: t3.unique1, t3.unique2, t3.two, t3.four, t3.ten, t3.twenty, t3.hundred, t3.thousand, t3.twothousand, t3.fivethous, t3.tenthous, t3.odd, t3.even, t3.stringu1, t3.stringu2, t3.string4 + Filter: (t3.fivethous < 0) + -> Nested Loop + Output: t1.tenthous, t2.ten + -> Nested Loop + Output: t1.tenthous, t2.ten, i4.f1 + Join Filter: (t1.unique1 < i4.f1) + -> Hash Join + Output: t1.tenthous, t1.unique1, t2.ten + Hash Cond: (t2.ten = t1.tenthous) + -> Seq Scan on public.tenk1 t2 + Output: t2.unique1, t2.unique2, t2.two, t2.four, t2.ten, t2.twenty, t2.hundred, t2.thousand, t2.twothousand, t2.fivethous, t2.tenthous, t2.odd, t2.even, t2.stringu1, t2.stringu2, t2.string4 + -> Hash + Output: t1.tenthous, t1.unique1 + -> Nested Loop + Output: t1.tenthous, t1.unique1 + -> Subquery Scan on ss0 + Output: ss0.x, (64)::information_schema.cardinal_number + -> Result + Output: ((abs(t3.unique1))::double precision + random()) + -> Index Scan using tenk1_thous_tenthous on public.tenk1 t1 + Output: t1.unique1, t1.unique2, t1.two, t1.four, t1.ten, t1.twenty, t1.hundred, t1.thousand, t1.twothousand, t1.fivethous, t1.tenthous, t1.odd, t1.even, t1.stringu1, t1.stringu2, t1.string4 + Index Cond: (t1.tenthous = (((64)::information_schema.cardinal_number))::integer) + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 + Filter: (i4.f1 = ((64)::information_schema.cardinal_number)::integer) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q1 = ((64)::information_schema.cardinal_number)::integer) +(33 rows) + select ss1.d1 from tenk1 as t1 inner join tenk1 as t2 @@ -4035,6 +4088,37 @@ select * from 1 | 2 | 2 (1 row) +-- This example demonstrates the folly of our old "have_dangerous_phv" logic +begin; +set local from_collapse_limit to 2; +explain (verbose, costs off) +select * from int8_tbl t1 + left join + (select coalesce(t2.q1 + x, 0) from int8_tbl t2, + lateral (select t3.q1 as x from int8_tbl t3, + lateral (select t2.q1, t3.q1 offset 0) s)) + on true; + QUERY PLAN +------------------------------------------------------------------ + Nested Loop Left Join + Output: t1.q1, t1.q2, (COALESCE((t2.q1 + t3.q1), '0'::bigint)) + -> Seq Scan on public.int8_tbl t1 + Output: t1.q1, t1.q2 + -> Materialize + Output: (COALESCE((t2.q1 + t3.q1), '0'::bigint)) + -> Nested Loop + Output: COALESCE((t2.q1 + t3.q1), '0'::bigint) + -> Seq Scan on public.int8_tbl t2 + Output: t2.q1, t2.q2 + -> Nested Loop + Output: t3.q1 + -> Seq Scan on public.int8_tbl t3 + Output: t3.q1, t3.q2 + -> Result + Output: NULL::bigint, NULL::bigint +(16 rows) + +rollback; -- Test proper handling of appendrel PHVs during useless-RTE removal explain (costs off) select * from diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index cc5128add4df0..88d2204e4471d 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -1277,6 +1277,23 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; -- variant that isn't quite a star-schema case +explain (verbose, costs off) +select ss1.d1 from + tenk1 as t1 + inner join tenk1 as t2 + on t1.tenthous = t2.ten + inner join + int8_tbl as i8 + left join int4_tbl as i4 + inner join (select 64::information_schema.cardinal_number as d1 + from tenk1 t3, + lateral (select abs(t3.unique1) + random()) ss0(x) + where t3.fivethous < 0) as ss1 + on i4.f1 = ss1.d1 + on i8.q1 = i4.f1 + on t1.tenthous = ss1.d1 +where t1.unique1 < i4.f1; + select ss1.d1 from tenk1 as t1 inner join tenk1 as t2 @@ -1332,6 +1349,18 @@ select * from (select 1 as x) ss1 left join (select 2 as y) ss2 on (true), lateral (select ss2.y as z limit 1) ss3; +-- This example demonstrates the folly of our old "have_dangerous_phv" logic +begin; +set local from_collapse_limit to 2; +explain (verbose, costs off) +select * from int8_tbl t1 + left join + (select coalesce(t2.q1 + x, 0) from int8_tbl t2, + lateral (select t3.q1 as x from int8_tbl t3, + lateral (select t2.q1, t3.q1 offset 0) s)) + on true; +rollback; + -- Test proper handling of appendrel PHVs during useless-RTE removal explain (costs off) select * from diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 54e138b598dfe..b7d718089248e 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -73,11 +73,14 @@ if ($sourcedir) # might make them so. For the moment we just hardwire a list of names # to add and a list of names to exclude; eventually this may need to be # easier to configure. Note that the typedefs need trailing newlines. -my @additional = ("bool\n"); +my @additional = map { "$_\n" } qw( + bool regex_t regmatch_t regoff +); my %excluded = map { +"$_\n" => 1 } qw( - ANY FD_SET U abs allocfunc boolean date digit ilist interval iterator other - pointer printfunc reference string timestamp type wrap + FD_SET LookupSet boolean date duration + element_type inquiry iterator other + pointer reference rep string timestamp type wrap ); # globals diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index a8346cda633ac..32d6e718adca0 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -6,6 +6,7 @@ ASN1_INTEGER ASN1_OBJECT ASN1_OCTET_STRING ASN1_STRING +ATAlterConstraint AV A_ArrayExpr A_Const @@ -47,7 +48,6 @@ AggSplit AggState AggStatePerAgg AggStatePerGroup -AggStatePerGroupData AggStatePerHash AggStatePerPhase AggStatePerTrans @@ -161,7 +161,6 @@ ArrayType AsyncQueueControl AsyncQueueEntry AsyncRequest -ATAlterConstraint AttInMetadata AttStatsSlot AttoptCacheEntry @@ -174,8 +173,8 @@ AttrNumber AttributeOpts AuthRequest AuthToken -AutoPrewarmSharedState AutoPrewarmReadStreamData +AutoPrewarmSharedState AutoVacOpts AutoVacuumShmemStruct AutoVacuumWorkItem @@ -222,7 +221,6 @@ BTScanInsertData BTScanKeyPreproc BTScanOpaque BTScanOpaqueData -BTScanPos BTScanPosData BTScanPosItem BTShared @@ -270,8 +268,8 @@ BitmapAndPath BitmapAndState BitmapHeapPath BitmapHeapScan -BitmapHeapScanInstrumentation BitmapHeapScanDesc +BitmapHeapScanInstrumentation BitmapHeapScanState BitmapIndexScan BitmapIndexScanState @@ -341,8 +339,8 @@ BufFile Buffer BufferAccessStrategy BufferAccessStrategyType -BufferCacheNumaRec BufferCacheNumaContext +BufferCacheNumaRec BufferCachePagesContext BufferCachePagesRec BufferDesc @@ -382,6 +380,9 @@ CTEMaterialize CTESearchClause CURL CURLM +CURLMcode +CURLMsg +CURLcode CURLoption CV CachedExpression @@ -628,6 +629,7 @@ DefElem DefElemAction DefaultACLInfo DefineStmt +DefnDumperPtr DeleteStmt DependencyGenerator DependencyGeneratorData @@ -677,9 +679,8 @@ DumpableObjectType DumpableObjectWithAcl DynamicFileList DynamicZoneAbbrev -EC_KEY -ECDerivesKey ECDerivesEntry +ECDerivesKey EDGE ENGINE EOM_flatten_into_method @@ -761,10 +762,12 @@ ExpandedRange ExpandedRecordFieldInfo ExpandedRecordHeader ExplainDirectModify_function +ExplainExtensionOption ExplainForeignModify_function ExplainForeignScan_function ExplainFormat ExplainOneQuery_hook_type +ExplainOptionHandler ExplainSerializeOption ExplainState ExplainStmt @@ -792,6 +795,7 @@ FDWCollateState FD_SET FILE FILETIME +FPI FSMAddress FSMPage FSMPageData @@ -806,7 +810,6 @@ FieldSelect FieldStore File FileBackupMethod -FileCopyMethod FileFdwExecutionState FileFdwPlanState FileNameMap @@ -1190,6 +1193,7 @@ HeapCheckContext HeapCheckReadStreamData HeapPageFreeze HeapScanDesc +HeapScanDescData HeapTuple HeapTupleData HeapTupleFields @@ -1249,6 +1253,7 @@ IndexClause IndexClauseSet IndexDeleteCounts IndexDeletePrefetchState +IndexDoCheckCallback IndexElem IndexFetchHeapData IndexFetchTableData @@ -1279,13 +1284,14 @@ InheritableSocket InitSampleScan_function InitializeDSMForeignScan_function InitializeWorkerForeignScan_function +InjIoErrorState InjectionPointCacheEntry InjectionPointCallback InjectionPointCondition InjectionPointConditionType InjectionPointEntry -InjectionPointsCtl InjectionPointSharedState +InjectionPointsCtl InlineCodeBlock InsertStmt Instrumentation @@ -1302,7 +1308,6 @@ IntoClause InvalMessageArray InvalidationInfo InvalidationMsgsGroup -IoMethod IoMethodOps IpcMemoryId IpcMemoryKey @@ -1492,8 +1497,7 @@ LLVMOrcResourceTrackerRef LLVMOrcSymbolStringPoolRef LLVMOrcThreadSafeContextRef LLVMOrcThreadSafeModuleRef -LLVMPassManagerBuilderRef -LLVMPassManagerRef +LLVMPassBuilderOptionsRef LLVMTargetMachineRef LLVMTargetRef LLVMTypeRef @@ -1563,6 +1567,7 @@ LoadStmt LocalBufferLookupEnt LocalPgBackendStatus LocalTransactionId +Location LocationIndex LocationLen LockAcquireResult @@ -1582,7 +1587,6 @@ LockTupleMode LockViewRecurse_context LockWaitPolicy LockingClause -LogConnectionOption LogOpts LogStmtLevel LogicalDecodeBeginCB @@ -1633,6 +1637,7 @@ LogicalSlotInfo LogicalSlotInfoArr LogicalTape LogicalTapeSet +LookupSet LsnReadQueue LsnReadQueueNextFun LsnReadQueueNextStatus @@ -1657,8 +1662,8 @@ ManyTestResourceKind Material MaterialPath MaterialState -MdfdVec MdPathStr +MdfdVec Memoize MemoizeEntry MemoizeInstrumentation @@ -1672,6 +1677,7 @@ MemoryContextCallback MemoryContextCallbackFunction MemoryContextCounters MemoryContextData +MemoryContextId MemoryContextMethodID MemoryContextMethods MemoryStatsPrintFunc @@ -1765,6 +1771,7 @@ NumericSortSupport NumericSumAccum NumericVar OAuthValidatorCallbacks +OAuthValidatorModuleInit OM_uint32 OP OSAPerGroupState @@ -1834,7 +1841,6 @@ PGCALL2 PGCRYPTO_SHA_t PGChecksummablePage PGContextVisibility -PGErrorVerbosity PGEvent PGEventConnDestroy PGEventConnReset @@ -1904,7 +1910,6 @@ PLpgSQL_exception PLpgSQL_exception_block PLpgSQL_execstate PLpgSQL_expr -PLpgSQL_func_hashkey PLpgSQL_function PLpgSQL_getdiag_kind PLpgSQL_if_elsif @@ -2155,10 +2160,10 @@ PermutationStepBlockerType PgAioBackend PgAioCtl PgAioHandle -PgAioHandleCallbackID -PgAioHandleCallbackStage PgAioHandleCallbackComplete +PgAioHandleCallbackID PgAioHandleCallbackReport +PgAioHandleCallbackStage PgAioHandleCallbacks PgAioHandleCallbacksEntry PgAioHandleFlags @@ -2203,9 +2208,9 @@ PgStatShared_Common PgStatShared_Database PgStatShared_Function PgStatShared_HashEntry +PgStatShared_IO PgStatShared_InjectionPoint PgStatShared_InjectionPointFixed -PgStatShared_IO PgStatShared_Relation PgStatShared_ReplSlot PgStatShared_SLRU @@ -2226,7 +2231,6 @@ PgStat_FunctionCallUsage PgStat_FunctionCounts PgStat_HashKey PgStat_IO -PgStat_Kind PgStat_KindInfo PgStat_LocalState PgStat_PendingDroppedStatsItem @@ -2354,12 +2358,12 @@ PushFilter PushFilterOps PushFunction PyCFunction -PyMappingMethods PyMethodDef PyModuleDef PyObject -PySequenceMethods PyTypeObject +PyType_Slot +PyType_Spec Py_ssize_t QPRS_STATE QTN2QTState @@ -2473,6 +2477,7 @@ RelOptInfo RelOptKind RelPathStr RelStatsInfo +RelSyncCallbackFunction RelToCheck RelToCluster RelabelType @@ -2625,7 +2630,6 @@ SQLDropObject SQLFunctionCache SQLFunctionCachePtr SQLFunctionHashEntry -SQLFunctionLink SQLFunctionParseInfo SQLFunctionParseInfoPtr SQLValueFunction @@ -2637,6 +2641,7 @@ STARTUPINFO STRLEN SV SYNCHRONIZATION_BARRIER +SYSTEM_INFO SampleScan SampleScanGetSampleSize_function SampleScanState @@ -2724,6 +2729,7 @@ SharedIncrementalSortInfo SharedIndexScanInstrumentation SharedInvalCatalogMsg SharedInvalCatcacheMsg +SharedInvalRelSyncMsg SharedInvalRelcacheMsg SharedInvalRelmapMsg SharedInvalSmgrMsg @@ -2763,7 +2769,7 @@ SingleBoundSortItem Size SkipPages SkipSupport -SkipSupportData +SkipSupportIncDec SlabBlock SlabContext SlabSlot @@ -2989,6 +2995,7 @@ TarMethodData TarMethodFile TargetEntry TclExceptionNameMap +Tcl_CmdInfo Tcl_DString Tcl_FileProc Tcl_HashEntry @@ -2996,6 +3003,7 @@ Tcl_HashTable Tcl_Interp Tcl_NotifierProcs Tcl_Obj +Tcl_Size Tcl_Time TempNamespaceStatus TestDSMRegistryStruct @@ -3141,6 +3149,7 @@ UnicodeNormalizationQC Unique UniquePath UniquePathMethod +UniqueRelInfo UniqueState UnlistenStmt UnresolvedTup @@ -3171,8 +3180,11 @@ VacuumRelation VacuumStmt ValidIOData ValidateIndexState -ValidatorModuleState ValidatorModuleResult +ValidatorModuleState +ValidatorShutdownCB +ValidatorStartupCB +ValidatorValidateCB ValuesScan ValuesScanState Var @@ -3377,10 +3389,9 @@ _resultmap _stringlist access_vector_t acquireLocksOnSubLinks_context -add_nulling_relids_context addFkConstraintSides +add_nulling_relids_context adjust_appendrel_attrs_context -allocfunc amadjustmembers_function ambeginscan_function ambuild_function @@ -3392,6 +3403,7 @@ amcostestimate_function amendscan_function amestimateparallelscan_function amgetbitmap_function +amgettreeheight_function amgettuple_function aminitparallelscan_function aminsert_function @@ -3402,13 +3414,27 @@ amparallelrescan_function amproperty_function amrescan_function amrestrpos_function -amtranslate_strategy_function amtranslatestrategy; -amtranslate_cmptype_function amtranslatecmptype; +amtranslate_cmptype_function +amtranslate_strategy_function amvacuumcleanup_function amvalidate_function array_iter array_unnest_fctx assign_collations_context +astreamer +astreamer_archive_context +astreamer_extractor +astreamer_gzip_decompressor +astreamer_gzip_writer +astreamer_lz4_frame +astreamer_member +astreamer_ops +astreamer_plain_writer +astreamer_recovery_injector +astreamer_tar_archiver +astreamer_tar_parser +astreamer_verify +astreamer_zstd_frame auth_password_hook_typ autovac_table av_relation @@ -3435,20 +3461,6 @@ bbsink_shell bbsink_state bbsink_throttle bbsink_zstd -astreamer -astreamer_archive_context -astreamer_extractor -astreamer_gzip_decompressor -astreamer_gzip_writer -astreamer_lz4_frame -astreamer_member -astreamer_ops -astreamer_plain_writer -astreamer_recovery_injector -astreamer_tar_archiver -astreamer_tar_parser -astreamer_verify -astreamer_zstd_frame bgworker_main_type bh_node_type binaryheap @@ -3488,6 +3500,13 @@ colormaprange compare_context config_handle config_var_value +conn_errorMessage_func +conn_oauth_client_id_func +conn_oauth_client_secret_func +conn_oauth_discovery_uri_func +conn_oauth_issuer_id_func +conn_oauth_scope_func +conn_sasl_state_func contain_aggs_of_level_context contain_placeholder_references_context convert_testexpr_context @@ -3504,6 +3523,9 @@ create_upper_paths_hook_type createdb_failure_params crosstab_HashEnt crosstab_cat_desc +curl_infotype +curl_socket_t +curl_version_info_data datapagemap_iterator_t datapagemap_t dateKEY @@ -3515,9 +3537,8 @@ deparse_columns deparse_context deparse_expr_cxt deparse_namespace -destructor +derives_hash dev_t -digit disassembledLeaf dlist_head dlist_iter @@ -3555,18 +3576,23 @@ dsm_handle dsm_op dsm_segment dsm_segment_detach_callback +duration eLogType ean13 eary ec_matches_callback_type ec_member_foreign_arg ec_member_matches_arg +element_type emit_log_hook_type eval_const_expressions_context exec_thread_arg execution_state exit_function explain_get_index_name_hook_type +explain_per_node_hook_type +explain_per_plan_hook_type +explain_validate_options_hook_type f_smgr fasthash_state fd_set @@ -3649,7 +3675,6 @@ gss_key_value_set_desc gss_name_t gtrgm_consistent_cache gzFile -hashfunc hbaPort heap_page_items_state help_handler @@ -3671,17 +3696,21 @@ init_function inline_cte_walker_context inline_error_callback_arg ino_t +inquiry instr_time int128 int16 int16KEY +int16_t int2vector int32 int32KEY int32_t int64 int64KEY +int64_t int8 +int8_t int8x16_t internalPQconninfoOption intptr_t @@ -3713,6 +3742,7 @@ lclContext lclTocEntry leafSegmentInfo leaf_item +libpq_gettext_func libpq_source line_t lineno_t @@ -3769,6 +3799,7 @@ mxact mxtruncinfo needs_fmgr_hook_type network_sortsupport_state +nl_item nodeitem normal_rand_fctx nsphash_hash @@ -3786,6 +3817,7 @@ openssl_tls_init_hook_typ ossl_EVP_cipher_func other output_type +overexplain_options pagetable_hash pagetable_iterator pairingheap @@ -3805,7 +3837,6 @@ pg_atomic_flag pg_atomic_uint32 pg_atomic_uint64 pg_be_sasl_mech -pg_case_map pg_category_range pg_checksum_context pg_checksum_raw_context @@ -3829,7 +3860,6 @@ pg_funcptr_t pg_gssinfo pg_hmac_ctx pg_hmac_errno -pg_int64 pg_local_to_utf_combined pg_locale_t pg_mb_radix_tree @@ -3898,7 +3928,8 @@ plperl_query_entry plpgsql_CastExprHashEntry plpgsql_CastHashEntry plpgsql_CastHashKey -plpgsql_HashEnt +plpgsql_expr_walker_callback +plpgsql_stmt_walker_callback pltcl_call_state pltcl_interp_desc pltcl_proc_desc @@ -3921,7 +3952,6 @@ printTextLineFormat printTextLineWrap printTextRule printXheaderWidthType -printfunc priv_map process_file_callback_t process_sublinks_context @@ -3961,12 +3991,9 @@ reduce_outer_joins_pass1_state reduce_outer_joins_pass2_state reference regex_arc_t -regex_t regexp regexp_matches_ctx registered_buffer -regmatch_t -regoff_t regproc relopt_bool relopt_enum @@ -3985,6 +4012,7 @@ remoteConnHashEnt remoteDep remove_nulling_relids_context rendezvousHashEntry +rep replace_rte_variables_callback replace_rte_variables_context report_error_fn @@ -4003,6 +4031,7 @@ rt_node_class_test_elem rt_radix_tree saophash_hash save_buffer +save_locale_t scram_state scram_state_enum script_error_callback_arg @@ -4010,6 +4039,8 @@ security_class_t sem_t sepgsql_context_info_t sequence_magic +set_conn_altsock_func +set_conn_oauth_token_func set_join_pathlist_hook_type set_rel_pathlist_hook_type shared_ts_iter @@ -4130,6 +4161,7 @@ uint32_t uint32x4_t uint64 uint64_t +uint64x2_t uint8 uint8_t uint8x16_t @@ -4139,7 +4171,6 @@ unicodeStyleColumnFormat unicodeStyleFormat unicodeStyleRowFormat unicode_linestyle -UniqueRelInfo unit_conversion unlogged_relation_entry utf_local_conversion_func @@ -4282,6 +4313,7 @@ xmlGenericErrorFunc xmlNodePtr xmlNodeSetPtr xmlParserCtxtPtr +xmlParserErrors xmlParserInputPtr xmlSaveCtxt xmlSaveCtxtPtr @@ -4302,6 +4334,3 @@ yyscan_t z_stream z_streamp zic_t -ExplainExtensionOption -ExplainOptionHandler -overexplain_options