@@ -167,9 +167,10 @@ typedef struct LVRelState
167
167
MultiXactId relminmxid ;
168
168
double old_live_tuples ; /* previous value of pg_class.reltuples */
169
169
170
- /* VACUUM operation's cutoff for pruning */
170
+ /* VACUUM operation's cutoffs for freezing and pruning */
171
171
TransactionId OldestXmin ;
172
- /* VACUUM operation's cutoff for freezing XIDs and MultiXactIds */
172
+ GlobalVisState * vistest ;
173
+ /* VACUUM operation's target cutoffs for freezing XIDs and MultiXactIds */
173
174
TransactionId FreezeLimit ;
174
175
MultiXactId MultiXactCutoff ;
175
176
/* Are FreezeLimit/MultiXactCutoff still valid? */
@@ -185,8 +186,6 @@ typedef struct LVRelState
185
186
bool verbose ; /* VACUUM VERBOSE? */
186
187
187
188
/*
188
- * State managed by lazy_scan_heap() follows.
189
- *
190
189
* dead_items stores TIDs whose index tuples are deleted by index
191
190
* vacuuming. Each TID points to an LP_DEAD line pointer from a heap page
192
191
* that has been processed by lazy_scan_prune. Also needed by
@@ -252,7 +251,6 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
252
251
bool sharelock , Buffer vmbuffer );
253
252
static void lazy_scan_prune (LVRelState * vacrel , Buffer buf ,
254
253
BlockNumber blkno , Page page ,
255
- GlobalVisState * vistest ,
256
254
LVPagePruneState * prunestate );
257
255
static bool lazy_scan_noprune (LVRelState * vacrel , Buffer buf ,
258
256
BlockNumber blkno , Page page ,
@@ -281,7 +279,7 @@ static void dead_items_alloc(LVRelState *vacrel, int nworkers);
281
279
static void dead_items_cleanup (LVRelState * vacrel );
282
280
static bool heap_page_is_all_visible (LVRelState * vacrel , Buffer buf ,
283
281
TransactionId * visibility_cutoff_xid , bool * all_frozen );
284
- static void update_index_statistics (LVRelState * vacrel );
282
+ static void update_relstats_all_indexes (LVRelState * vacrel );
285
283
static void vacuum_error_callback (void * arg );
286
284
static void update_vacuum_error_info (LVRelState * vacrel ,
287
285
LVSavedErrInfo * saved_vacrel ,
@@ -296,7 +294,8 @@ static void restore_vacuum_error_info(LVRelState *vacrel,
296
294
*
297
295
* This routine sets things up for and then calls lazy_scan_heap, where
298
296
* almost all work actually takes place. Finalizes everything after call
299
- * returns by managing rel truncation and updating pg_class statistics.
297
+ * returns by managing relation truncation and updating rel's pg_class
298
+ * entry. (Also updates pg_class entries for any indexes that need it.)
300
299
*
301
300
* At entry, we have already established a transaction and opened
302
301
* and locked the relation.
@@ -468,9 +467,51 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
468
467
vacrel -> relminmxid = rel -> rd_rel -> relminmxid ;
469
468
vacrel -> old_live_tuples = rel -> rd_rel -> reltuples ;
470
469
471
- /* Set cutoffs for entire VACUUM */
470
+ /* Initialize page counters explicitly (be tidy) */
471
+ vacrel -> scanned_pages = 0 ;
472
+ vacrel -> frozenskipped_pages = 0 ;
473
+ vacrel -> removed_pages = 0 ;
474
+ vacrel -> lpdead_item_pages = 0 ;
475
+ vacrel -> missed_dead_pages = 0 ;
476
+ vacrel -> nonempty_pages = 0 ;
477
+ /* dead_items_alloc allocates vacrel->dead_items later on */
478
+
479
+ /* Allocate/initialize output statistics state */
480
+ vacrel -> new_rel_tuples = 0 ;
481
+ vacrel -> new_live_tuples = 0 ;
482
+ vacrel -> indstats = (IndexBulkDeleteResult * * )
483
+ palloc0 (vacrel -> nindexes * sizeof (IndexBulkDeleteResult * ));
484
+
485
+ /* Initialize remaining counters (be tidy) */
486
+ vacrel -> num_index_scans = 0 ;
487
+ vacrel -> tuples_deleted = 0 ;
488
+ vacrel -> lpdead_items = 0 ;
489
+ vacrel -> live_tuples = 0 ;
490
+ vacrel -> recently_dead_tuples = 0 ;
491
+ vacrel -> missed_dead_tuples = 0 ;
492
+
493
+ /*
494
+ * Determine the extent of the blocks that we'll scan in lazy_scan_heap,
495
+ * and finalize cutoffs used for freezing and pruning in lazy_scan_prune.
496
+ *
497
+ * We expect vistest will always make heap_page_prune remove any deleted
498
+ * tuple whose xmax is < OldestXmin. lazy_scan_prune must never become
499
+ * confused about whether a tuple should be frozen or removed. (In the
500
+ * future we might want to teach lazy_scan_prune to recompute vistest from
501
+ * time to time, to increase the number of dead tuples it can prune away.)
502
+ *
503
+ * We must determine rel_pages _after_ OldestXmin has been established.
504
+ * lazy_scan_heap's physical heap scan (scan of pages < rel_pages) is
505
+ * thereby guaranteed to not miss any tuples with XIDs < OldestXmin. These
506
+ * XIDs must at least be considered for freezing (though not necessarily
507
+ * frozen) during its scan.
508
+ */
509
+ vacrel -> rel_pages = orig_rel_pages = RelationGetNumberOfBlocks (rel );
472
510
vacrel -> OldestXmin = OldestXmin ;
511
+ vacrel -> vistest = GlobalVisTestFor (rel );
512
+ /* FreezeLimit controls XID freezing (always <= OldestXmin) */
473
513
vacrel -> FreezeLimit = FreezeLimit ;
514
+ /* MultiXactCutoff controls MXID freezing */
474
515
vacrel -> MultiXactCutoff = MultiXactCutoff ;
475
516
/* Track if cutoffs became invalid (possible in !aggressive case only) */
476
517
vacrel -> freeze_cutoffs_valid = true;
@@ -481,21 +522,21 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
481
522
*/
482
523
lazy_scan_heap (vacrel , params -> nworkers );
483
524
484
- /* Done with indexes */
485
- vac_close_indexes (vacrel -> nindexes , vacrel -> indrels , NoLock );
486
-
487
525
/*
488
- * Optionally truncate the relation. But remember the relation size used
489
- * by lazy_scan_heap for later first.
526
+ * Update pg_class entries for each of rel's indexes where appropriate.
527
+ *
528
+ * Unlike the later update to rel's pg_class entry, this is not critical.
529
+ * Maintains relpages/reltuples statistics used by the planner only.
490
530
*/
491
- orig_rel_pages = vacrel -> rel_pages ;
531
+ if (vacrel -> do_index_cleanup )
532
+ update_relstats_all_indexes (vacrel );
533
+
534
+ /* Done with rel's indexes */
535
+ vac_close_indexes (vacrel -> nindexes , vacrel -> indrels , NoLock );
536
+
537
+ /* Optionally truncate rel */
492
538
if (should_attempt_truncation (vacrel ))
493
- {
494
- update_vacuum_error_info (vacrel , NULL , VACUUM_ERRCB_PHASE_TRUNCATE ,
495
- vacrel -> nonempty_pages ,
496
- InvalidOffsetNumber );
497
539
lazy_truncate_heap (vacrel );
498
- }
499
540
500
541
/* Pop the error context stack */
501
542
error_context_stack = errcallback .previous ;
@@ -505,7 +546,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
505
546
PROGRESS_VACUUM_PHASE_FINAL_CLEANUP );
506
547
507
548
/*
508
- * Update statistics in pg_class.
549
+ * Prepare to update rel's pg_class entry .
509
550
*
510
551
* In principle new_live_tuples could be -1 indicating that we (still)
511
552
* don't know the tuple count. In practice that probably can't happen,
@@ -517,22 +558,19 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
517
558
*/
518
559
new_rel_pages = vacrel -> rel_pages ; /* After possible rel truncation */
519
560
new_live_tuples = vacrel -> new_live_tuples ;
520
-
521
561
visibilitymap_count (rel , & new_rel_allvisible , NULL );
522
562
if (new_rel_allvisible > new_rel_pages )
523
563
new_rel_allvisible = new_rel_pages ;
524
564
525
565
/*
566
+ * Now actually update rel's pg_class entry.
567
+ *
526
568
* Aggressive VACUUM must reliably advance relfrozenxid (and relminmxid).
527
569
* We are able to advance relfrozenxid in a non-aggressive VACUUM too,
528
570
* provided we didn't skip any all-visible (not all-frozen) pages using
529
571
* the visibility map, and assuming that we didn't fail to get a cleanup
530
572
* lock that made it unsafe with respect to FreezeLimit (or perhaps our
531
573
* MultiXactCutoff) established for VACUUM operation.
532
- *
533
- * NB: We must use orig_rel_pages, not vacrel->rel_pages, since we want
534
- * the rel_pages used by lazy_scan_heap, which won't match when we
535
- * happened to truncate the relation afterwards.
536
574
*/
537
575
if (vacrel -> scanned_pages + vacrel -> frozenskipped_pages < orig_rel_pages ||
538
576
!vacrel -> freeze_cutoffs_valid )
@@ -787,7 +825,7 @@ static void
787
825
lazy_scan_heap (LVRelState * vacrel , int nworkers )
788
826
{
789
827
VacDeadItems * dead_items ;
790
- BlockNumber nblocks ,
828
+ BlockNumber nblocks = vacrel -> rel_pages ,
791
829
blkno ,
792
830
next_unskippable_block ,
793
831
next_failsafe_block ,
@@ -800,29 +838,6 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
800
838
PROGRESS_VACUUM_MAX_DEAD_TUPLES
801
839
};
802
840
int64 initprog_val [3 ];
803
- GlobalVisState * vistest ;
804
-
805
- nblocks = RelationGetNumberOfBlocks (vacrel -> rel );
806
- vacrel -> rel_pages = nblocks ;
807
- vacrel -> scanned_pages = 0 ;
808
- vacrel -> frozenskipped_pages = 0 ;
809
- vacrel -> removed_pages = 0 ;
810
- vacrel -> lpdead_item_pages = 0 ;
811
- vacrel -> missed_dead_pages = 0 ;
812
- vacrel -> nonempty_pages = 0 ;
813
-
814
- /* Initialize instrumentation counters */
815
- vacrel -> num_index_scans = 0 ;
816
- vacrel -> tuples_deleted = 0 ;
817
- vacrel -> lpdead_items = 0 ;
818
- vacrel -> live_tuples = 0 ;
819
- vacrel -> recently_dead_tuples = 0 ;
820
- vacrel -> missed_dead_tuples = 0 ;
821
-
822
- vistest = GlobalVisTestFor (vacrel -> rel );
823
-
824
- vacrel -> indstats = (IndexBulkDeleteResult * * )
825
- palloc0 (vacrel -> nindexes * sizeof (IndexBulkDeleteResult * ));
826
841
827
842
/*
828
843
* Do failsafe precheck before calling dead_items_alloc. This ensures
@@ -880,9 +895,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
880
895
* might leave some dead tuples lying around, but the next vacuum will
881
896
* find them. But even when aggressive *is* set, it's still OK if we miss
882
897
* a page whose all-frozen marking has just been cleared. Any new XIDs
883
- * just added to that page are necessarily newer than the GlobalXmin we
884
- * computed, so they'll have no effect on the value to which we can safely
885
- * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
898
+ * just added to that page are necessarily >= vacrel->OldestXmin, and so
899
+ * they'll have no effect on the value to which we can safely set
900
+ * relfrozenxid. A similar argument applies for MXIDs and relminmxid.
886
901
*/
887
902
next_unskippable_block = 0 ;
888
903
if (vacrel -> skipwithvm )
@@ -1153,7 +1168,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
1153
1168
* were pruned some time earlier. Also considers freezing XIDs in the
1154
1169
* tuple headers of remaining items with storage.
1155
1170
*/
1156
- lazy_scan_prune (vacrel , buf , blkno , page , vistest , & prunestate );
1171
+ lazy_scan_prune (vacrel , buf , blkno , page , & prunestate );
1157
1172
1158
1173
Assert (!prunestate .all_visible || !prunestate .has_lpdead_items );
1159
1174
@@ -1392,15 +1407,11 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
1392
1407
lazy_cleanup_all_indexes (vacrel );
1393
1408
1394
1409
/*
1395
- * Free resources managed by dead_items_alloc. This will end parallel
1396
- * mode when needed (it must end before updating index statistics as we
1397
- * can't write in parallel mode).
1410
+ * Free resources managed by dead_items_alloc. This ends parallel mode in
1411
+ * passing when necessary.
1398
1412
*/
1399
1413
dead_items_cleanup (vacrel );
1400
-
1401
- /* Update index statistics */
1402
- if (vacrel -> nindexes > 0 && vacrel -> do_index_cleanup )
1403
- update_index_statistics (vacrel );
1414
+ Assert (!IsInParallelMode ());
1404
1415
}
1405
1416
1406
1417
/*
@@ -1559,7 +1570,6 @@ lazy_scan_prune(LVRelState *vacrel,
1559
1570
Buffer buf ,
1560
1571
BlockNumber blkno ,
1561
1572
Page page ,
1562
- GlobalVisState * vistest ,
1563
1573
LVPagePruneState * prunestate )
1564
1574
{
1565
1575
Relation rel = vacrel -> rel ;
@@ -1598,7 +1608,7 @@ lazy_scan_prune(LVRelState *vacrel,
1598
1608
* lpdead_items's final value can be thought of as the number of tuples
1599
1609
* that were deleted from indexes.
1600
1610
*/
1601
- tuples_deleted = heap_page_prune (rel , buf , vistest ,
1611
+ tuples_deleted = heap_page_prune (rel , buf , vacrel -> vistest ,
1602
1612
InvalidTransactionId , 0 , & nnewlpdead ,
1603
1613
& vacrel -> offnum );
1604
1614
@@ -2292,8 +2302,6 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
2292
2302
Assert (vacrel -> nindexes > 0 );
2293
2303
Assert (vacrel -> do_index_vacuuming );
2294
2304
Assert (vacrel -> do_index_cleanup );
2295
- Assert (TransactionIdIsNormal (vacrel -> relfrozenxid ));
2296
- Assert (MultiXactIdIsValid (vacrel -> relminmxid ));
2297
2305
2298
2306
/* Precheck for XID wraparound emergencies */
2299
2307
if (lazy_check_wraparound_failsafe (vacrel ))
@@ -2604,6 +2612,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
2604
2612
static bool
2605
2613
lazy_check_wraparound_failsafe (LVRelState * vacrel )
2606
2614
{
2615
+ Assert (TransactionIdIsNormal (vacrel -> relfrozenxid ));
2616
+ Assert (MultiXactIdIsValid (vacrel -> relminmxid ));
2617
+
2607
2618
/* Don't warn more than once per VACUUM */
2608
2619
if (vacrel -> failsafe_active )
2609
2620
return true;
@@ -2644,6 +2655,10 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
2644
2655
static void
2645
2656
lazy_cleanup_all_indexes (LVRelState * vacrel )
2646
2657
{
2658
+ double reltuples = vacrel -> new_rel_tuples ;
2659
+ bool estimated_count = vacrel -> scanned_pages < vacrel -> rel_pages ;
2660
+
2661
+ Assert (vacrel -> do_index_cleanup );
2647
2662
Assert (vacrel -> nindexes > 0 );
2648
2663
2649
2664
/* Report that we are now cleaning up indexes */
@@ -2652,10 +2667,6 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
2652
2667
2653
2668
if (!ParallelVacuumIsActive (vacrel ))
2654
2669
{
2655
- double reltuples = vacrel -> new_rel_tuples ;
2656
- bool estimated_count =
2657
- vacrel -> scanned_pages < vacrel -> rel_pages ;
2658
-
2659
2670
for (int idx = 0 ; idx < vacrel -> nindexes ; idx ++ )
2660
2671
{
2661
2672
Relation indrel = vacrel -> indrels [idx ];
@@ -2669,9 +2680,9 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
2669
2680
else
2670
2681
{
2671
2682
/* Outsource everything to parallel variant */
2672
- parallel_vacuum_cleanup_all_indexes (vacrel -> pvs , vacrel -> new_rel_tuples ,
2683
+ parallel_vacuum_cleanup_all_indexes (vacrel -> pvs , reltuples ,
2673
2684
vacrel -> num_index_scans ,
2674
- ( vacrel -> scanned_pages < vacrel -> rel_pages ) );
2685
+ estimated_count );
2675
2686
}
2676
2687
}
2677
2688
@@ -2797,27 +2808,23 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
2797
2808
* Also don't attempt it if we are doing early pruning/vacuuming, because a
2798
2809
* scan which cannot find a truncated heap page cannot determine that the
2799
2810
* snapshot is too old to read that page.
2800
- *
2801
- * This is split out so that we can test whether truncation is going to be
2802
- * called for before we actually do it. If you change the logic here, be
2803
- * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
2804
2811
*/
2805
2812
static bool
2806
2813
should_attempt_truncation (LVRelState * vacrel )
2807
2814
{
2808
2815
BlockNumber possibly_freeable ;
2809
2816
2810
- if (!vacrel -> do_rel_truncate || vacrel -> failsafe_active )
2817
+ if (!vacrel -> do_rel_truncate || vacrel -> failsafe_active ||
2818
+ old_snapshot_threshold >= 0 )
2811
2819
return false;
2812
2820
2813
2821
possibly_freeable = vacrel -> rel_pages - vacrel -> nonempty_pages ;
2814
2822
if (possibly_freeable > 0 &&
2815
2823
(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
2816
- possibly_freeable >= vacrel -> rel_pages / REL_TRUNCATE_FRACTION ) &&
2817
- old_snapshot_threshold < 0 )
2824
+ possibly_freeable >= vacrel -> rel_pages / REL_TRUNCATE_FRACTION ))
2818
2825
return true;
2819
- else
2820
- return false;
2826
+
2827
+ return false;
2821
2828
}
2822
2829
2823
2830
/*
@@ -2835,6 +2842,10 @@ lazy_truncate_heap(LVRelState *vacrel)
2835
2842
pgstat_progress_update_param (PROGRESS_VACUUM_PHASE ,
2836
2843
PROGRESS_VACUUM_PHASE_TRUNCATE );
2837
2844
2845
+ /* Update error traceback information one last time */
2846
+ update_vacuum_error_info (vacrel , NULL , VACUUM_ERRCB_PHASE_TRUNCATE ,
2847
+ vacrel -> nonempty_pages , InvalidOffsetNumber );
2848
+
2838
2849
/*
2839
2850
* Loop until no more truncating can be done.
2840
2851
*/
@@ -3328,13 +3339,13 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
3328
3339
* Update index statistics in pg_class if the statistics are accurate.
3329
3340
*/
3330
3341
static void
3331
- update_index_statistics (LVRelState * vacrel )
3342
+ update_relstats_all_indexes (LVRelState * vacrel )
3332
3343
{
3333
3344
Relation * indrels = vacrel -> indrels ;
3334
3345
int nindexes = vacrel -> nindexes ;
3335
3346
IndexBulkDeleteResult * * indstats = vacrel -> indstats ;
3336
3347
3337
- Assert (! IsInParallelMode () );
3348
+ Assert (vacrel -> do_index_cleanup );
3338
3349
3339
3350
for (int idx = 0 ; idx < nindexes ; idx ++ )
3340
3351
{
0 commit comments