@@ -838,6 +838,7 @@ lazy_scan_heap(LVRelState *vacrel)
838
838
Page page ;
839
839
bool all_visible_according_to_vm ;
840
840
bool has_lpdead_items ;
841
+ bool got_cleanup_lock = false;
841
842
842
843
if (blkno == next_unskippable_block )
843
844
{
@@ -931,109 +932,86 @@ lazy_scan_heap(LVRelState *vacrel)
931
932
*/
932
933
visibilitymap_pin (vacrel -> rel , blkno , & vmbuffer );
933
934
935
+ buf = ReadBufferExtended (vacrel -> rel , MAIN_FORKNUM , blkno , RBM_NORMAL ,
936
+ vacrel -> bstrategy );
937
+ page = BufferGetPage (buf );
938
+
934
939
/*
935
940
* We need a buffer cleanup lock to prune HOT chains and defragment
936
941
* the page in lazy_scan_prune. But when it's not possible to acquire
937
942
* a cleanup lock right away, we may be able to settle for reduced
938
943
* processing using lazy_scan_noprune.
939
944
*/
940
- buf = ReadBufferExtended (vacrel -> rel , MAIN_FORKNUM , blkno , RBM_NORMAL ,
941
- vacrel -> bstrategy );
942
- page = BufferGetPage (buf );
943
- if (!ConditionalLockBufferForCleanup (buf ))
944
- {
945
- LockBuffer (buf , BUFFER_LOCK_SHARE );
945
+ got_cleanup_lock = ConditionalLockBufferForCleanup (buf );
946
946
947
- /* Check for new or empty pages before lazy_scan_noprune call */
948
- if (lazy_scan_new_or_empty (vacrel , buf , blkno , page , true,
949
- vmbuffer ))
950
- {
951
- /* Processed as new/empty page (lock and pin released) */
952
- continue ;
953
- }
954
-
955
- /*
956
- * Collect LP_DEAD items in dead_items array, count tuples,
957
- * determine if rel truncation is safe
958
- */
959
- if (lazy_scan_noprune (vacrel , buf , blkno , page , & has_lpdead_items ))
960
- {
961
- Size freespace = 0 ;
962
- bool recordfreespace ;
947
+ if (!got_cleanup_lock )
948
+ LockBuffer (buf , BUFFER_LOCK_SHARE );
963
949
964
- /*
965
- * We processed the page successfully (without a cleanup
966
- * lock).
967
- *
968
- * Update the FSM, just as we would in the case where
969
- * lazy_scan_prune() is called. Our goal is to update the
970
- * freespace map the last time we touch the page. If the
971
- * relation has no indexes, or if index vacuuming is disabled,
972
- * there will be no second heap pass; if this particular page
973
- * has no dead items, the second heap pass will not touch this
974
- * page. So, in those cases, update the FSM now.
975
- *
976
- * After a call to lazy_scan_prune(), we would also try to
977
- * adjust the page-level all-visible bit and the visibility
978
- * map, but we skip that step in this path.
979
- */
980
- recordfreespace = vacrel -> nindexes == 0
981
- || !vacrel -> do_index_vacuuming
982
- || !has_lpdead_items ;
983
- if (recordfreespace )
984
- freespace = PageGetHeapFreeSpace (page );
985
- UnlockReleaseBuffer (buf );
986
- if (recordfreespace )
987
- RecordPageWithFreeSpace (vacrel -> rel , blkno , freespace );
988
- continue ;
989
- }
950
+ /* Check for new or empty pages before lazy_scan_[no]prune call */
951
+ if (lazy_scan_new_or_empty (vacrel , buf , blkno , page , !got_cleanup_lock ,
952
+ vmbuffer ))
953
+ {
954
+ /* Processed as new/empty page (lock and pin released) */
955
+ continue ;
956
+ }
990
957
958
+ /*
959
+ * If we didn't get the cleanup lock, we can still collect LP_DEAD
960
+ * items in the dead_items array for later vacuuming, count live and
961
+ * recently dead tuples for vacuum logging, and determine if this
962
+ * block could later be truncated. If we encounter any xid/mxids that
963
+ * require advancing the relfrozenxid/relminxid, we'll have to wait
964
+ * for a cleanup lock and call lazy_scan_prune().
965
+ */
966
+ if (!got_cleanup_lock &&
967
+ !lazy_scan_noprune (vacrel , buf , blkno , page , & has_lpdead_items ))
968
+ {
991
969
/*
992
970
* lazy_scan_noprune could not do all required processing. Wait
993
971
* for a cleanup lock, and call lazy_scan_prune in the usual way.
994
972
*/
995
973
Assert (vacrel -> aggressive );
996
974
LockBuffer (buf , BUFFER_LOCK_UNLOCK );
997
975
LockBufferForCleanup (buf );
998
- }
999
-
1000
- /* Check for new or empty pages before lazy_scan_prune call */
1001
- if (lazy_scan_new_or_empty (vacrel , buf , blkno , page , false, vmbuffer ))
1002
- {
1003
- /* Processed as new/empty page (lock and pin released) */
1004
- continue ;
976
+ got_cleanup_lock = true;
1005
977
}
1006
978
1007
979
/*
1008
- * Prune, freeze, and count tuples.
980
+ * If we have a cleanup lock, we must now prune, freeze, and count
981
+ * tuples. We may have acquired the cleanup lock originally, or we may
982
+ * have gone back and acquired it after lazy_scan_noprune() returned
983
+ * false. Either way, the page hasn't been processed yet.
1009
984
*
1010
- * Accumulates details of remaining LP_DEAD line pointers on page in
1011
- * dead_items array. This includes LP_DEAD line pointers that we
1012
- * pruned ourselves, as well as existing LP_DEAD line pointers that
1013
- * were pruned some time earlier. Also considers freezing XIDs in the
1014
- * tuple headers of remaining items with storage. It also determines
1015
- * if truncating this block is safe .
985
+ * Like lazy_scan_noprune(), lazy_scan_prune() will count
986
+ * recently_dead_tuples and live tuples for vacuum logging, determine
987
+ * if the block can later be truncated, and accumulate the details of
988
+ * remaining LP_DEAD line pointers on the page in the dead_items
989
+ * array. These dead items include those pruned by lazy_scan_prune()
990
+ * as well we line pointers previously marked LP_DEAD .
1016
991
*/
1017
- lazy_scan_prune (vacrel , buf , blkno , page ,
1018
- vmbuffer , all_visible_according_to_vm ,
1019
- & has_lpdead_items );
992
+ if (got_cleanup_lock )
993
+ lazy_scan_prune (vacrel , buf , blkno , page ,
994
+ vmbuffer , all_visible_according_to_vm ,
995
+ & has_lpdead_items );
1020
996
1021
997
/*
1022
- * Final steps for block: drop cleanup lock, record free space in the
1023
- * FSM.
998
+ * Now drop the buffer lock and, potentially, update the FSM.
1024
999
*
1025
- * If we will likely do index vacuuming, wait until
1026
- * lazy_vacuum_heap_rel() to save free space. This doesn't just save
1027
- * us some cycles; it also allows us to record any additional free
1028
- * space that lazy_vacuum_heap_page() will make available in cases
1029
- * where it's possible to truncate the page's line pointer array.
1000
+ * Our goal is to update the freespace map the last time we touch the
1001
+ * page. If we'll process a block in the second pass, we may free up
1002
+ * additional space on the page, so it is better to update the FSM
1003
+ * after the second pass. If the relation has no indexes, or if index
1004
+ * vacuuming is disabled, there will be no second heap pass; if this
1005
+ * particular page has no dead items, the second heap pass will not
1006
+ * touch this page. So, in those cases, update the FSM now.
1030
1007
*
1031
- * Note: It's not in fact 100% certain that we really will call
1032
- * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
1033
- * vacuuming (and so must skip heap vacuuming). This is deemed okay
1034
- * because it only happens in emergencies, or when there is very
1035
- * little free space anyway. (Besides, we start recording free space
1036
- * in the FSM once index vacuuming has been abandoned.)
1008
+ * Note: In corner cases, it's possible to miss updating the FSM
1009
+ * entirely. If index vacuuming is currently enabled, we'll skip the
1010
+ * FSM update now. But if failsafe mode is later activated, or there
1011
+ * are so few dead tuples that index vacuuming is bypassed, there will
1012
+ * also be no opportunity to update the FSM later, because we'll never
1013
+ * revisit this page. Since updating the FSM is desirable but not
1014
+ * absolutely required, that's OK.
1037
1015
*/
1038
1016
if (vacrel -> nindexes == 0
1039
1017
|| !vacrel -> do_index_vacuuming
@@ -1047,9 +1025,10 @@ lazy_scan_heap(LVRelState *vacrel)
1047
1025
/*
1048
1026
* Periodically perform FSM vacuuming to make newly-freed space
1049
1027
* visible on upper FSM pages. This is done after vacuuming if the
1050
- * table has indexes.
1028
+ * table has indexes. There will only be newly-freed space if we
1029
+ * held the cleanup lock and lazy_scan_prune() was called.
1051
1030
*/
1052
- if (vacrel -> nindexes == 0 && has_lpdead_items &&
1031
+ if (got_cleanup_lock && vacrel -> nindexes == 0 && has_lpdead_items &&
1053
1032
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES )
1054
1033
{
1055
1034
FreeSpaceMapVacuumRange (vacrel -> rel , next_fsm_block_to_vacuum ,
0 commit comments