@@ -117,6 +117,7 @@ static BufferAccessStrategy vac_strategy;
117
117
static void lazy_scan_heap (Relation onerel , LVRelStats * vacrelstats ,
118
118
Relation * Irel , int nindexes , bool scan_all );
119
119
static void lazy_vacuum_heap (Relation onerel , LVRelStats * vacrelstats );
120
+ static bool lazy_check_needs_freeze (Buffer buf );
120
121
static void lazy_vacuum_index (Relation indrel ,
121
122
IndexBulkDeleteResult * * stats ,
122
123
LVRelStats * vacrelstats );
@@ -453,8 +454,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
453
454
454
455
vacuum_delay_point ();
455
456
456
- vacrelstats -> scanned_pages ++ ;
457
-
458
457
/*
459
458
* If we are close to overrunning the available space for dead-tuple
460
459
* TIDs, pause and do a cycle of vacuuming before we tackle this page.
@@ -486,7 +485,41 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
486
485
RBM_NORMAL , vac_strategy );
487
486
488
487
/* We need buffer cleanup lock so that we can prune HOT chains. */
489
- LockBufferForCleanup (buf );
488
+ if (!ConditionalLockBufferForCleanup (buf ))
489
+ {
490
+ /*
491
+ * It's OK to skip vacuuming a page, as long as its not got data
492
+ * that needs to be cleaned for wraparound avoidance.
493
+ */
494
+ if (!scan_all )
495
+ {
496
+ ReleaseBuffer (buf );
497
+ continue ;
498
+ }
499
+
500
+ /*
501
+ * If this is a wraparound checking vacuum, then we read the page
502
+ * with share lock to see if any xids need to be frozen. If the
503
+ * page doesn't need attention we just skip and continue. If it
504
+ * does, we wait for cleanup lock.
505
+ *
506
+ * We could defer the lock request further by remembering the page
507
+ * and coming back to it later, of we could even register
508
+ * ourselves for multiple buffers and then service whichever one
509
+ * is received first. For now, this seems good enough.
510
+ */
511
+ LockBuffer (buf , BUFFER_LOCK_SHARE );
512
+ if (!lazy_check_needs_freeze (buf ))
513
+ {
514
+ UnlockReleaseBuffer (buf );
515
+ continue ;
516
+ }
517
+ LockBuffer (buf , BUFFER_LOCK_UNLOCK );
518
+ LockBufferForCleanup (buf );
519
+ /* drop through to normal processing */
520
+ }
521
+
522
+ vacrelstats -> scanned_pages ++ ;
490
523
491
524
page = BufferGetPage (buf );
492
525
@@ -932,7 +965,8 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
932
965
tblk = ItemPointerGetBlockNumber (& vacrelstats -> dead_tuples [tupindex ]);
933
966
buf = ReadBufferExtended (onerel , MAIN_FORKNUM , tblk , RBM_NORMAL ,
934
967
vac_strategy );
935
- LockBufferForCleanup (buf );
968
+ if (!ConditionalLockBufferForCleanup (buf ))
969
+ continue ;
936
970
tupindex = lazy_vacuum_page (onerel , tblk , buf , tupindex , vacrelstats );
937
971
938
972
/* Now that we've compacted the page, record its available space */
@@ -1009,6 +1043,50 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1009
1043
return tupindex ;
1010
1044
}
1011
1045
1046
+ /*
1047
+ * lazy_check_needs_freeze() -- scan page to see if any tuples
1048
+ * need to be cleaned to avoid wraparound
1049
+ *
1050
+ * Returns true if the page needs to be vacuumed using cleanup lock.
1051
+ */
1052
+ static bool
1053
+ lazy_check_needs_freeze (Buffer buf )
1054
+ {
1055
+ Page page ;
1056
+ OffsetNumber offnum ,
1057
+ maxoff ;
1058
+ HeapTupleHeader tupleheader ;
1059
+
1060
+ page = BufferGetPage (buf );
1061
+
1062
+ if (PageIsNew (page ) || PageIsEmpty (page ))
1063
+ {
1064
+ /* PageIsNew probably shouldn't happen... */
1065
+ return false;
1066
+ }
1067
+
1068
+ maxoff = PageGetMaxOffsetNumber (page );
1069
+ for (offnum = FirstOffsetNumber ;
1070
+ offnum <= maxoff ;
1071
+ offnum = OffsetNumberNext (offnum ))
1072
+ {
1073
+ ItemId itemid ;
1074
+
1075
+ itemid = PageGetItemId (page , offnum );
1076
+
1077
+ if (!ItemIdIsNormal (itemid ))
1078
+ continue ;
1079
+
1080
+ tupleheader = (HeapTupleHeader ) PageGetItem (page , itemid );
1081
+
1082
+ if (heap_tuple_needs_freeze (tupleheader , FreezeLimit , buf ))
1083
+ return true;
1084
+ } /* scan along page */
1085
+
1086
+ return false;
1087
+ }
1088
+
1089
+
1012
1090
/*
1013
1091
* lazy_vacuum_index() -- vacuum one index relation.
1014
1092
*
0 commit comments