@@ -951,7 +951,8 @@ terminate_brin_buildstate(BrinBuildState *state)
951
951
}
952
952
953
953
/*
954
- * Summarize the given page range of the given index.
954
+ * On the given BRIN index, summarize the heap page range that corresponds
955
+ * to the heap block number given.
955
956
*
956
957
* This routine can run in parallel with insertions into the heap. To avoid
957
958
* missing those values from the summary tuple, we first insert a placeholder
@@ -961,6 +962,12 @@ terminate_brin_buildstate(BrinBuildState *state)
961
962
* update of the index value happens in a loop, so that if somebody updates
962
963
* the placeholder tuple after we read it, we detect the case and try again.
963
964
* This ensures that the concurrently inserted tuples are not lost.
965
+ *
966
+ * A further corner case is this routine being asked to summarize the partial
967
+ * range at the end of the table. heapNumBlocks is the (possibly outdated)
968
+ * table size; if we notice that the requested range lies beyond that size,
969
+ * we re-compute the table size after inserting the placeholder tuple, to
970
+ * avoid missing pages that were appended recently.
964
971
*/
965
972
static void
966
973
summarize_range (IndexInfo * indexInfo , BrinBuildState * state , Relation heapRel ,
@@ -981,6 +988,33 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel,
981
988
state -> bs_rmAccess , & phbuf ,
982
989
heapBlk , phtup , phsz );
983
990
991
+ /*
992
+ * Compute range end. We hold ShareUpdateExclusive lock on table, so it
993
+ * cannot shrink concurrently (but it can grow).
994
+ */
995
+ Assert (heapBlk % state -> bs_pagesPerRange == 0 );
996
+ if (heapBlk + state -> bs_pagesPerRange > heapNumBlks )
997
+ {
998
+ /*
999
+ * If we're asked to scan what we believe to be the final range on the
1000
+ * table (i.e. a range that might be partial) we need to recompute our
1001
+ * idea of what the latest page is after inserting the placeholder
1002
+ * tuple. Anyone that grows the table later will update the
1003
+ * placeholder tuple, so it doesn't matter that we won't scan these
1004
+ * pages ourselves. Careful: the table might have been extended
1005
+ * beyond the current range, so clamp our result.
1006
+ *
1007
+ * Fortunately, this should occur infrequently.
1008
+ */
1009
+ scanNumBlks = Min (RelationGetNumberOfBlocks (heapRel ) - heapBlk ,
1010
+ state -> bs_pagesPerRange );
1011
+ }
1012
+ else
1013
+ {
1014
+ /* Easy case: range is known to be complete */
1015
+ scanNumBlks = state -> bs_pagesPerRange ;
1016
+ }
1017
+
984
1018
/*
985
1019
* Execute the partial heap scan covering the heap blocks in the specified
986
1020
* page range, summarizing the heap tuples in it. This scan stops just
@@ -991,8 +1025,6 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel,
991
1025
* by transactions that are still in progress, among other corner cases.
992
1026
*/
993
1027
state -> bs_currRangeStart = heapBlk ;
994
- scanNumBlks = heapBlk + state -> bs_pagesPerRange <= heapNumBlks ?
995
- state -> bs_pagesPerRange : heapNumBlks - heapBlk ;
996
1028
IndexBuildHeapRangeScan (heapRel , state -> bs_irel , indexInfo , false, true,
997
1029
heapBlk , scanNumBlks ,
998
1030
brinbuildCallback , (void * ) state );
@@ -1069,25 +1101,34 @@ brinsummarize(Relation index, Relation heapRel, double *numSummarized,
1069
1101
BrinBuildState * state = NULL ;
1070
1102
IndexInfo * indexInfo = NULL ;
1071
1103
BlockNumber heapNumBlocks ;
1072
- BlockNumber heapBlk ;
1073
1104
BlockNumber pagesPerRange ;
1074
1105
Buffer buf ;
1106
+ BlockNumber startBlk ;
1075
1107
1076
1108
revmap = brinRevmapInitialize (index , & pagesPerRange , NULL );
1077
1109
1110
+ /* determine range of pages to process: always start from the beginning */
1111
+ heapNumBlocks = RelationGetNumberOfBlocks (heapRel );
1112
+ startBlk = 0 ;
1113
+
1078
1114
/*
1079
1115
* Scan the revmap to find unsummarized items.
1080
1116
*/
1081
1117
buf = InvalidBuffer ;
1082
- heapNumBlocks = RelationGetNumberOfBlocks (heapRel );
1083
- for (heapBlk = 0 ; heapBlk < heapNumBlocks ; heapBlk += pagesPerRange )
1118
+ for (; startBlk < heapNumBlocks ; startBlk += pagesPerRange )
1084
1119
{
1085
1120
BrinTuple * tup ;
1086
1121
OffsetNumber off ;
1087
1122
1123
+ /*
1124
+ * Go away now if we think the next range is partial.
1125
+ */
1126
+ if (startBlk + pagesPerRange > heapNumBlocks )
1127
+ break ;
1128
+
1088
1129
CHECK_FOR_INTERRUPTS ();
1089
1130
1090
- tup = brinGetTupleForHeapBlock (revmap , heapBlk , & buf , & off , NULL ,
1131
+ tup = brinGetTupleForHeapBlock (revmap , startBlk , & buf , & off , NULL ,
1091
1132
BUFFER_LOCK_SHARE , NULL );
1092
1133
if (tup == NULL )
1093
1134
{
@@ -1100,7 +1141,7 @@ brinsummarize(Relation index, Relation heapRel, double *numSummarized,
1100
1141
pagesPerRange );
1101
1142
indexInfo = BuildIndexInfo (index );
1102
1143
}
1103
- summarize_range (indexInfo , state , heapRel , heapBlk , heapNumBlocks );
1144
+ summarize_range (indexInfo , state , heapRel , startBlk , heapNumBlocks );
1104
1145
1105
1146
/* and re-initialize state for the next range */
1106
1147
brin_memtuple_initialize (state -> bs_dtuple , state -> bs_bdesc );
0 commit comments