@@ -8827,12 +8827,6 @@ heap_xlog_visible(XLogReaderState *record)
8827
8827
* full-page writes. This exposes us to torn page hazards, but since
8828
8828
* we're not inspecting the existing page contents in any way, we
8829
8829
* don't care.
8830
- *
8831
- * However, all operations that clear the visibility map bit *do* bump
8832
- * the LSN, and those operations will only be replayed if the XLOG LSN
8833
- * follows the page LSN. Thus, if the page LSN has advanced past our
8834
- * XLOG record's LSN, we mustn't mark the page all-visible, because
8835
- * the subsequent update won't be replayed to clear the flag.
8836
8830
*/
8837
8831
page = BufferGetPage (buffer );
8838
8832
@@ -8901,20 +8895,8 @@ heap_xlog_visible(XLogReaderState *record)
8901
8895
reln = CreateFakeRelcacheEntry (rlocator );
8902
8896
visibilitymap_pin (reln , blkno , & vmbuffer );
8903
8897
8904
- /*
8905
- * Don't set the bit if replay has already passed this point.
8906
- *
8907
- * It might be safe to do this unconditionally; if replay has passed
8908
- * this point, we'll replay at least as far this time as we did
8909
- * before, and if this bit needs to be cleared, the record responsible
8910
- * for doing so should be again replayed, and clear it. For right
8911
- * now, out of an abundance of conservatism, we use the same test here
8912
- * we did for the heap page. If this results in a dropped bit, no
8913
- * real harm is done; and the next VACUUM will fix it.
8914
- */
8915
- if (lsn > PageGetLSN (vmpage ))
8916
- visibilitymap_set (reln , blkno , InvalidBuffer , lsn , vmbuffer ,
8917
- xlrec -> cutoff_xid , xlrec -> flags );
8898
+ visibilitymap_set (reln , blkno , InvalidBuffer , lsn , vmbuffer ,
8899
+ xlrec -> cutoff_xid , xlrec -> flags );
8918
8900
8919
8901
ReleaseBuffer (vmbuffer );
8920
8902
FreeFakeRelcacheEntry (reln );
0 commit comments