@@ -35,6 +35,8 @@ typedef struct
35
35
36
36
/* tuple visibility test, initialized for the relation */
37
37
GlobalVisState * vistest ;
38
+ /* whether or not dead items can be set LP_UNUSED during pruning */
39
+ bool mark_unused_now ;
38
40
39
41
TransactionId new_prune_xid ; /* new prune hint value for page */
40
42
TransactionId snapshotConflictHorizon ; /* latest xid removed */
@@ -67,6 +69,7 @@ static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
67
69
static void heap_prune_record_redirect (PruneState * prstate ,
68
70
OffsetNumber offnum , OffsetNumber rdoffnum );
69
71
static void heap_prune_record_dead (PruneState * prstate , OffsetNumber offnum );
72
+ static void heap_prune_record_dead_or_unused (PruneState * prstate , OffsetNumber offnum );
70
73
static void heap_prune_record_unused (PruneState * prstate , OffsetNumber offnum );
71
74
static void page_verify_redirects (Page page );
72
75
@@ -148,7 +151,13 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
148
151
{
149
152
PruneResult presult ;
150
153
151
- heap_page_prune (relation , buffer , vistest , & presult , NULL );
154
+ /*
155
+ * For now, pass mark_unused_now as false regardless of whether or
156
+ * not the relation has indexes, since we cannot safely determine
157
+ * that during on-access pruning with the current implementation.
158
+ */
159
+ heap_page_prune (relation , buffer , vistest , false,
160
+ & presult , NULL );
152
161
153
162
/*
154
163
* Report the number of tuples reclaimed to pgstats. This is
@@ -193,6 +202,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
193
202
* (see heap_prune_satisfies_vacuum and
194
203
* HeapTupleSatisfiesVacuum).
195
204
*
205
+ * mark_unused_now indicates whether or not dead items can be set LP_UNUSED during
206
+ * pruning.
207
+ *
196
208
* off_loc is the offset location required by the caller to use in error
197
209
* callback.
198
210
*
@@ -203,6 +215,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
203
215
void
204
216
heap_page_prune (Relation relation , Buffer buffer ,
205
217
GlobalVisState * vistest ,
218
+ bool mark_unused_now ,
206
219
PruneResult * presult ,
207
220
OffsetNumber * off_loc )
208
221
{
@@ -227,6 +240,7 @@ heap_page_prune(Relation relation, Buffer buffer,
227
240
prstate .new_prune_xid = InvalidTransactionId ;
228
241
prstate .rel = relation ;
229
242
prstate .vistest = vistest ;
243
+ prstate .mark_unused_now = mark_unused_now ;
230
244
prstate .snapshotConflictHorizon = InvalidTransactionId ;
231
245
prstate .nredirected = prstate .ndead = prstate .nunused = 0 ;
232
246
memset (prstate .marked , 0 , sizeof (prstate .marked ));
@@ -306,9 +320,9 @@ heap_page_prune(Relation relation, Buffer buffer,
306
320
if (off_loc )
307
321
* off_loc = offnum ;
308
322
309
- /* Nothing to do if slot is empty or already dead */
323
+ /* Nothing to do if slot is empty */
310
324
itemid = PageGetItemId (page , offnum );
311
- if (!ItemIdIsUsed (itemid ) || ItemIdIsDead ( itemid ) )
325
+ if (!ItemIdIsUsed (itemid ))
312
326
continue ;
313
327
314
328
/* Process this item or chain of items */
@@ -581,7 +595,17 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
581
595
* function.)
582
596
*/
583
597
if (ItemIdIsDead (lp ))
598
+ {
599
+ /*
600
+ * If the caller set mark_unused_now true, we can set dead line
601
+ * pointers LP_UNUSED now. We don't increment ndeleted here since
602
+ * the LP was already marked dead.
603
+ */
604
+ if (unlikely (prstate -> mark_unused_now ))
605
+ heap_prune_record_unused (prstate , offnum );
606
+
584
607
break ;
608
+ }
585
609
586
610
Assert (ItemIdIsNormal (lp ));
587
611
htup = (HeapTupleHeader ) PageGetItem (dp , lp );
@@ -715,7 +739,7 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
715
739
* redirect the root to the correct chain member.
716
740
*/
717
741
if (i >= nchain )
718
- heap_prune_record_dead (prstate , rootoffnum );
742
+ heap_prune_record_dead_or_unused (prstate , rootoffnum );
719
743
else
720
744
heap_prune_record_redirect (prstate , rootoffnum , chainitems [i ]);
721
745
}
@@ -726,9 +750,9 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
726
750
* item. This can happen if the loop in heap_page_prune caused us to
727
751
* visit the dead successor of a redirect item before visiting the
728
752
* redirect item. We can clean up by setting the redirect item to
729
- * DEAD state.
753
+ * DEAD state or LP_UNUSED if the caller indicated .
730
754
*/
731
- heap_prune_record_dead (prstate , rootoffnum );
755
+ heap_prune_record_dead_or_unused (prstate , rootoffnum );
732
756
}
733
757
734
758
return ndeleted ;
@@ -774,6 +798,27 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
774
798
prstate -> marked [offnum ] = true;
775
799
}
776
800
801
+ /*
802
+ * Depending on whether or not the caller set mark_unused_now to true, record that a
803
+ * line pointer should be marked LP_DEAD or LP_UNUSED. There are other cases in
804
+ * which we will mark line pointers LP_UNUSED, but we will not mark line
805
+ * pointers LP_DEAD if mark_unused_now is true.
806
+ */
807
+ static void
808
+ heap_prune_record_dead_or_unused (PruneState * prstate , OffsetNumber offnum )
809
+ {
810
+ /*
811
+ * If the caller set mark_unused_now to true, we can remove dead tuples
812
+ * during pruning instead of marking their line pointers dead. Set this
813
+ * tuple's line pointer LP_UNUSED. We hint that this option is less
814
+ * likely.
815
+ */
816
+ if (unlikely (prstate -> mark_unused_now ))
817
+ heap_prune_record_unused (prstate , offnum );
818
+ else
819
+ heap_prune_record_dead (prstate , offnum );
820
+ }
821
+
777
822
/* Record line pointer to be marked unused */
778
823
static void
779
824
heap_prune_record_unused (PruneState * prstate , OffsetNumber offnum )
@@ -903,13 +948,24 @@ heap_page_prune_execute(Buffer buffer,
903
948
#ifdef USE_ASSERT_CHECKING
904
949
905
950
/*
906
- * Only heap-only tuples can become LP_UNUSED during pruning. They
907
- * don't need to be left in place as LP_DEAD items until VACUUM gets
908
- * around to doing index vacuuming.
951
+ * When heap_page_prune() was called, mark_unused_now may have been
952
+ * passed as true, which allows would-be LP_DEAD items to be made
953
+ * LP_UNUSED instead. This is only possible if the relation has no
954
+ * indexes. If there are any dead items, then mark_unused_now was not
955
+ * true and every item being marked LP_UNUSED must refer to a
956
+ * heap-only tuple.
909
957
*/
910
- Assert (ItemIdHasStorage (lp ) && ItemIdIsNormal (lp ));
911
- htup = (HeapTupleHeader ) PageGetItem (page , lp );
912
- Assert (HeapTupleHeaderIsHeapOnly (htup ));
958
+ if (ndead > 0 )
959
+ {
960
+ Assert (ItemIdHasStorage (lp ) && ItemIdIsNormal (lp ));
961
+ htup = (HeapTupleHeader ) PageGetItem (page , lp );
962
+ Assert (HeapTupleHeaderIsHeapOnly (htup ));
963
+ }
964
+ else
965
+ {
966
+ Assert (ItemIdIsUsed (lp ));
967
+ }
968
+
913
969
#endif
914
970
915
971
ItemIdSetUnused (lp );
0 commit comments