@@ -260,7 +260,7 @@ static void lazy_vacuum(LVRelState *vacrel);
260
260
static bool lazy_vacuum_all_indexes (LVRelState * vacrel );
261
261
static void lazy_vacuum_heap_rel (LVRelState * vacrel );
262
262
static int lazy_vacuum_heap_page (LVRelState * vacrel , BlockNumber blkno ,
263
- Buffer buffer , int index , Buffer * vmbuffer );
263
+ Buffer buffer , int index , Buffer vmbuffer );
264
264
static bool lazy_check_wraparound_failsafe (LVRelState * vacrel );
265
265
static void lazy_cleanup_all_indexes (LVRelState * vacrel );
266
266
static IndexBulkDeleteResult * lazy_vacuum_one_index (Relation indrel ,
@@ -945,17 +945,15 @@ lazy_scan_heap(LVRelState *vacrel)
945
945
*/
946
946
visibilitymap_pin (vacrel -> rel , blkno , & vmbuffer );
947
947
948
- /* Finished preparatory checks. Actually scan the page. */
949
- buf = ReadBufferExtended (vacrel -> rel , MAIN_FORKNUM , blkno ,
950
- RBM_NORMAL , vacrel -> bstrategy );
951
- page = BufferGetPage (buf );
952
-
953
948
/*
954
949
* We need a buffer cleanup lock to prune HOT chains and defragment
955
950
* the page in lazy_scan_prune. But when it's not possible to acquire
956
951
* a cleanup lock right away, we may be able to settle for reduced
957
952
* processing using lazy_scan_noprune.
958
953
*/
954
+ buf = ReadBufferExtended (vacrel -> rel , MAIN_FORKNUM , blkno , RBM_NORMAL ,
955
+ vacrel -> bstrategy );
956
+ page = BufferGetPage (buf );
959
957
if (!ConditionalLockBufferForCleanup (buf ))
960
958
{
961
959
bool hastup ,
@@ -1040,7 +1038,7 @@ lazy_scan_heap(LVRelState *vacrel)
1040
1038
{
1041
1039
Size freespace ;
1042
1040
1043
- lazy_vacuum_heap_page (vacrel , blkno , buf , 0 , & vmbuffer );
1041
+ lazy_vacuum_heap_page (vacrel , blkno , buf , 0 , vmbuffer );
1044
1042
1045
1043
/* Forget the LP_DEAD items that we just vacuumed */
1046
1044
dead_items -> num_items = 0 ;
@@ -1092,7 +1090,10 @@ lazy_scan_heap(LVRelState *vacrel)
1092
1090
uint8 flags = VISIBILITYMAP_ALL_VISIBLE ;
1093
1091
1094
1092
if (prunestate .all_frozen )
1093
+ {
1094
+ Assert (!TransactionIdIsValid (prunestate .visibility_cutoff_xid ));
1095
1095
flags |= VISIBILITYMAP_ALL_FROZEN ;
1096
+ }
1096
1097
1097
1098
/*
1098
1099
* It should never be the case that the visibility map page is set
@@ -1120,8 +1121,8 @@ lazy_scan_heap(LVRelState *vacrel)
1120
1121
* got cleared after lazy_scan_skip() was called, so we must recheck
1121
1122
* with buffer lock before concluding that the VM is corrupt.
1122
1123
*/
1123
- else if (all_visible_according_to_vm && !PageIsAllVisible (page )
1124
- && VM_ALL_VISIBLE (vacrel -> rel , blkno , & vmbuffer ))
1124
+ else if (all_visible_according_to_vm && !PageIsAllVisible (page ) &&
1125
+ visibilitymap_get_status (vacrel -> rel , blkno , & vmbuffer ) != 0 )
1125
1126
{
1126
1127
elog (WARNING , "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u" ,
1127
1128
vacrel -> relname , blkno );
@@ -1164,12 +1165,27 @@ lazy_scan_heap(LVRelState *vacrel)
1164
1165
!VM_ALL_FROZEN (vacrel -> rel , blkno , & vmbuffer ))
1165
1166
{
1166
1167
/*
1167
- * We can pass InvalidTransactionId as the cutoff XID here,
1168
- * because setting the all-frozen bit doesn't cause recovery
1169
- * conflicts.
1168
+ * Avoid relying on all_visible_according_to_vm as a proxy for the
1169
+ * page-level PD_ALL_VISIBLE bit being set, since it might have
1170
+ * become stale -- even when all_visible is set in prunestate
1171
+ */
1172
+ if (!PageIsAllVisible (page ))
1173
+ {
1174
+ PageSetAllVisible (page );
1175
+ MarkBufferDirty (buf );
1176
+ }
1177
+
1178
+ /*
1179
+ * Set the page all-frozen (and all-visible) in the VM.
1180
+ *
1181
+ * We can pass InvalidTransactionId as our visibility_cutoff_xid,
1182
+ * since a snapshotConflictHorizon sufficient to make everything
1183
+ * safe for REDO was logged when the page's tuples were frozen.
1170
1184
*/
1185
+ Assert (!TransactionIdIsValid (prunestate .visibility_cutoff_xid ));
1171
1186
visibilitymap_set (vacrel -> rel , blkno , buf , InvalidXLogRecPtr ,
1172
1187
vmbuffer , InvalidTransactionId ,
1188
+ VISIBILITYMAP_ALL_VISIBLE |
1173
1189
VISIBILITYMAP_ALL_FROZEN );
1174
1190
}
1175
1191
@@ -1311,7 +1327,11 @@ lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block,
1311
1327
1312
1328
/* DISABLE_PAGE_SKIPPING makes all skipping unsafe */
1313
1329
if (!vacrel -> skipwithvm )
1330
+ {
1331
+ /* Caller shouldn't rely on all_visible_according_to_vm */
1332
+ * next_unskippable_allvis = false;
1314
1333
break ;
1334
+ }
1315
1335
1316
1336
/*
1317
1337
* Aggressive VACUUM caller can't skip pages just because they are
@@ -1807,8 +1827,6 @@ lazy_scan_prune(LVRelState *vacrel,
1807
1827
{
1808
1828
TransactionId snapshotConflictHorizon ;
1809
1829
1810
- Assert (prunestate -> hastup );
1811
-
1812
1830
vacrel -> frozen_pages ++ ;
1813
1831
1814
1832
/*
@@ -1818,7 +1836,11 @@ lazy_scan_prune(LVRelState *vacrel,
1818
1836
* cutoff by stepping back from OldestXmin.
1819
1837
*/
1820
1838
if (prunestate -> all_visible && prunestate -> all_frozen )
1839
+ {
1840
+ /* Using same cutoff when setting VM is now unnecessary */
1821
1841
snapshotConflictHorizon = prunestate -> visibility_cutoff_xid ;
1842
+ prunestate -> visibility_cutoff_xid = InvalidTransactionId ;
1843
+ }
1822
1844
else
1823
1845
{
1824
1846
/* Avoids false conflicts when hot_standby_feedback in use */
@@ -2417,10 +2439,19 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)
2417
2439
2418
2440
blkno = ItemPointerGetBlockNumber (& vacrel -> dead_items -> items [index ]);
2419
2441
vacrel -> blkno = blkno ;
2442
+
2443
+ /*
2444
+ * Pin the visibility map page in case we need to mark the page
2445
+ * all-visible. In most cases this will be very cheap, because we'll
2446
+ * already have the correct page pinned anyway.
2447
+ */
2448
+ visibilitymap_pin (vacrel -> rel , blkno , & vmbuffer );
2449
+
2450
+ /* We need a non-cleanup exclusive lock to mark dead_items unused */
2420
2451
buf = ReadBufferExtended (vacrel -> rel , MAIN_FORKNUM , blkno , RBM_NORMAL ,
2421
2452
vacrel -> bstrategy );
2422
2453
LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
2423
- index = lazy_vacuum_heap_page (vacrel , blkno , buf , index , & vmbuffer );
2454
+ index = lazy_vacuum_heap_page (vacrel , blkno , buf , index , vmbuffer );
2424
2455
2425
2456
/* Now that we've vacuumed the page, record its available space */
2426
2457
page = BufferGetPage (buf );
@@ -2457,15 +2488,16 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)
2457
2488
* vacrel->dead_items array.
2458
2489
*
2459
2490
* Caller must have an exclusive buffer lock on the buffer (though a full
2460
- * cleanup lock is also acceptable).
2491
+ * cleanup lock is also acceptable). vmbuffer must be valid and already have
2492
+ * a pin on blkno's visibility map page.
2461
2493
*
2462
2494
* index is an offset into the vacrel->dead_items array for the first listed
2463
2495
* LP_DEAD item on the page. The return value is the first index immediately
2464
2496
* after all LP_DEAD items for the same page in the array.
2465
2497
*/
2466
2498
static int
2467
2499
lazy_vacuum_heap_page (LVRelState * vacrel , BlockNumber blkno , Buffer buffer ,
2468
- int index , Buffer * vmbuffer )
2500
+ int index , Buffer vmbuffer )
2469
2501
{
2470
2502
VacDeadItems * dead_items = vacrel -> dead_items ;
2471
2503
Page page = BufferGetPage (buffer );
@@ -2546,31 +2578,21 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
2546
2578
* dirty, exclusively locked, and, if needed, a full page image has been
2547
2579
* emitted.
2548
2580
*/
2581
+ Assert (!PageIsAllVisible (page ));
2549
2582
if (heap_page_is_all_visible (vacrel , buffer , & visibility_cutoff_xid ,
2550
2583
& all_frozen ))
2551
- PageSetAllVisible (page );
2552
-
2553
- /*
2554
- * All the changes to the heap page have been done. If the all-visible
2555
- * flag is now set, also set the VM all-visible bit (and, if possible, the
2556
- * all-frozen bit) unless this has already been done previously.
2557
- */
2558
- if (PageIsAllVisible (page ))
2559
2584
{
2560
- uint8 flags = 0 ;
2561
- uint8 vm_status = visibilitymap_get_status (vacrel -> rel ,
2562
- blkno , vmbuffer );
2563
-
2564
- /* Set the VM all-frozen bit to flag, if needed */
2565
- if ((vm_status & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
2566
- flags |= VISIBILITYMAP_ALL_VISIBLE ;
2567
- if ((vm_status & VISIBILITYMAP_ALL_FROZEN ) == 0 && all_frozen )
2585
+ uint8 flags = VISIBILITYMAP_ALL_VISIBLE ;
2586
+
2587
+ if (all_frozen )
2588
+ {
2589
+ Assert (!TransactionIdIsValid (visibility_cutoff_xid ));
2568
2590
flags |= VISIBILITYMAP_ALL_FROZEN ;
2591
+ }
2569
2592
2570
- Assert (BufferIsValid (* vmbuffer ));
2571
- if (flags != 0 )
2572
- visibilitymap_set (vacrel -> rel , blkno , buffer , InvalidXLogRecPtr ,
2573
- * vmbuffer , visibility_cutoff_xid , flags );
2593
+ PageSetAllVisible (page );
2594
+ visibilitymap_set (vacrel -> rel , blkno , buffer , InvalidXLogRecPtr ,
2595
+ vmbuffer , visibility_cutoff_xid , flags );
2574
2596
}
2575
2597
2576
2598
/* Revert to the previous phase information for error traceback */
0 commit comments