Skip to content

Commit cf0612a

Browse files
committed
Fix BRIN summarization concurrent with extension
If a process is extending a table concurrently with some BRIN summarization process, it is possible for the latter to miss pages added by the former because the number of pages is computed ahead of time. Fix by determining a fresh relation size after inserting the placeholder tuple: any process that further extends the table concurrently will update the placeholder tuple, while previous pages will be processed by the heap scan. Reported-by: Tomas Vondra Reviewed-by: Tom Lane Author: Álvaro Herrera Discussion: https://postgr.es/m/083d996a-4a8a-0e13-800a-851dd09ad8cc@2ndquadrant.com Backpatch-to: 9.5
1 parent 90d61bd commit cf0612a

File tree

1 file changed

+49
-8
lines changed

1 file changed

+49
-8
lines changed

src/backend/access/brin/brin.c

Lines changed: 49 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -957,7 +957,8 @@ terminate_brin_buildstate(BrinBuildState *state)
957957
}
958958

959959
/*
960-
* Summarize the given page range of the given index.
960+
* On the given BRIN index, summarize the heap page range that corresponds
961+
* to the heap block number given.
961962
*
962963
* This routine can run in parallel with insertions into the heap. To avoid
963964
* missing those values from the summary tuple, we first insert a placeholder
@@ -967,6 +968,12 @@ terminate_brin_buildstate(BrinBuildState *state)
967968
* update of the index value happens in a loop, so that if somebody updates
968969
* the placeholder tuple after we read it, we detect the case and try again.
969970
* This ensures that the concurrently inserted tuples are not lost.
971+
*
972+
* A further corner case is this routine being asked to summarize the partial
973+
* range at the end of the table. heapNumBlocks is the (possibly outdated)
974+
* table size; if we notice that the requested range lies beyond that size,
975+
* we re-compute the table size after inserting the placeholder tuple, to
976+
* avoid missing pages that were appended recently.
970977
*/
971978
static void
972979
summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel,
@@ -987,6 +994,33 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel,
987994
state->bs_rmAccess, &phbuf,
988995
heapBlk, phtup, phsz);
989996

997+
/*
998+
* Compute range end. We hold ShareUpdateExclusive lock on table, so it
999+
* cannot shrink concurrently (but it can grow).
1000+
*/
1001+
Assert(heapBlk % state->bs_pagesPerRange == 0);
1002+
if (heapBlk + state->bs_pagesPerRange > heapNumBlks)
1003+
{
1004+
/*
1005+
* If we're asked to scan what we believe to be the final range on the
1006+
* table (i.e. a range that might be partial) we need to recompute our
1007+
* idea of what the latest page is after inserting the placeholder
1008+
* tuple. Anyone that grows the table later will update the
1009+
* placeholder tuple, so it doesn't matter that we won't scan these
1010+
* pages ourselves. Careful: the table might have been extended
1011+
* beyond the current range, so clamp our result.
1012+
*
1013+
* Fortunately, this should occur infrequently.
1014+
*/
1015+
scanNumBlks = Min(RelationGetNumberOfBlocks(heapRel) - heapBlk,
1016+
state->bs_pagesPerRange);
1017+
}
1018+
else
1019+
{
1020+
/* Easy case: range is known to be complete */
1021+
scanNumBlks = state->bs_pagesPerRange;
1022+
}
1023+
9901024
/*
9911025
* Execute the partial heap scan covering the heap blocks in the specified
9921026
* page range, summarizing the heap tuples in it. This scan stops just
@@ -997,8 +1031,6 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel,
9971031
* by transactions that are still in progress, among other corner cases.
9981032
*/
9991033
state->bs_currRangeStart = heapBlk;
1000-
scanNumBlks = heapBlk + state->bs_pagesPerRange <= heapNumBlks ?
1001-
state->bs_pagesPerRange : heapNumBlks - heapBlk;
10021034
IndexBuildHeapRangeScan(heapRel, state->bs_irel, indexInfo, false, true,
10031035
heapBlk, scanNumBlks,
10041036
brinbuildCallback, (void *) state);
@@ -1074,25 +1106,34 @@ brinsummarize(Relation index, Relation heapRel, double *numSummarized,
10741106
BrinBuildState *state = NULL;
10751107
IndexInfo *indexInfo = NULL;
10761108
BlockNumber heapNumBlocks;
1077-
BlockNumber heapBlk;
10781109
BlockNumber pagesPerRange;
10791110
Buffer buf;
1111+
BlockNumber startBlk;
10801112

10811113
revmap = brinRevmapInitialize(index, &pagesPerRange);
10821114

1115+
/* determine range of pages to process: always start from the beginning */
1116+
heapNumBlocks = RelationGetNumberOfBlocks(heapRel);
1117+
startBlk = 0;
1118+
10831119
/*
10841120
* Scan the revmap to find unsummarized items.
10851121
*/
10861122
buf = InvalidBuffer;
1087-
heapNumBlocks = RelationGetNumberOfBlocks(heapRel);
1088-
for (heapBlk = 0; heapBlk < heapNumBlocks; heapBlk += pagesPerRange)
1123+
for (; startBlk < heapNumBlocks; startBlk += pagesPerRange)
10891124
{
10901125
BrinTuple *tup;
10911126
OffsetNumber off;
10921127

1128+
/*
1129+
* Go away now if we think the next range is partial.
1130+
*/
1131+
if (startBlk + pagesPerRange > heapNumBlocks)
1132+
break;
1133+
10931134
CHECK_FOR_INTERRUPTS();
10941135

1095-
tup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, NULL,
1136+
tup = brinGetTupleForHeapBlock(revmap, startBlk, &buf, &off, NULL,
10961137
BUFFER_LOCK_SHARE);
10971138
if (tup == NULL)
10981139
{
@@ -1105,7 +1146,7 @@ brinsummarize(Relation index, Relation heapRel, double *numSummarized,
11051146
pagesPerRange);
11061147
indexInfo = BuildIndexInfo(index);
11071148
}
1108-
summarize_range(indexInfo, state, heapRel, heapBlk, heapNumBlocks);
1149+
summarize_range(indexInfo, state, heapRel, startBlk, heapNumBlocks);
11091150

11101151
/* and re-initialize state for the next range */
11111152
brin_memtuple_initialize(state->bs_dtuple, state->bs_bdesc);

0 commit comments

Comments
 (0)