Skip to content

Commit 7309243

Browse files
Ensure we MarkBufferDirty before visibilitymap_set()
logs the heap page and sets the LSN. Otherwise a checkpoint could occur between those actions and leave us in an inconsistent state. Jeff Davis
1 parent fdea253 commit 7309243

File tree

1 file changed

+26
-21
lines changed

1 file changed

+26
-21
lines changed

src/backend/commands/vacuumlazy.c

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -894,26 +894,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
894894
freespace = PageGetHeapFreeSpace(page);
895895

896896
/* mark page all-visible, if appropriate */
897-
if (all_visible)
897+
if (all_visible && !all_visible_according_to_vm)
898898
{
899-
if (!PageIsAllVisible(page))
900-
{
901-
PageSetAllVisible(page);
902-
MarkBufferDirty(buf);
903-
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
904-
vmbuffer, visibility_cutoff_xid);
905-
}
906-
else if (!all_visible_according_to_vm)
907-
{
908-
/*
909-
* It should never be the case that the visibility map page is
910-
* set while the page-level bit is clear, but the reverse is
911-
* allowed. Set the visibility map bit as well so that we get
912-
* back in sync.
913-
*/
914-
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
915-
vmbuffer, visibility_cutoff_xid);
916-
}
899+
/*
900+
* It should never be the case that the visibility map page is set
901+
* while the page-level bit is clear, but the reverse is allowed
902+
* (if checksums are not enabled). Regardless, set the both bits
903+
* so that we get back in sync.
904+
*
905+
* NB: If the heap page is all-visible but the VM bit is not set,
906+
* we don't need to dirty the heap page. However, if checksums are
907+
* enabled, we do need to make sure that the heap page is dirtied
908+
* before passing it to visibilitymap_set(), because it may be
909+
* logged. Given that this situation should only happen in rare
910+
* cases after a crash, it is not worth optimizing.
911+
*/
912+
PageSetAllVisible(page);
913+
MarkBufferDirty(buf);
914+
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
915+
vmbuffer, visibility_cutoff_xid);
917916
}
918917

919918
/*
@@ -1138,6 +1137,14 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
11381137

11391138
PageRepairFragmentation(page);
11401139

1140+
/*
1141+
* Mark buffer dirty before we write WAL.
1142+
*
1143+
* If checksums are enabled, visibilitymap_set() may log the heap page, so
1144+
* we must mark heap buffer dirty before calling visibilitymap_set().
1145+
*/
1146+
MarkBufferDirty(buffer);
1147+
11411148
/*
11421149
* Now that we have removed the dead tuples from the page, once again check
11431150
* if the page has become all-visible.
@@ -1151,8 +1158,6 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
11511158
visibility_cutoff_xid);
11521159
}
11531160

1154-
MarkBufferDirty(buffer);
1155-
11561161
/* XLOG stuff */
11571162
if (RelationNeedsWAL(onerel))
11581163
{

0 commit comments

Comments
 (0)