@@ -246,7 +246,7 @@ typedef struct LVSavedErrInfo
246
246
247
247
248
248
/* non-export function prototypes */
249
- static void lazy_scan_heap (LVRelState * vacrel , int nworkers );
249
+ static void lazy_scan_heap (LVRelState * vacrel );
250
250
static BlockNumber lazy_scan_skip (LVRelState * vacrel , Buffer * vmbuffer ,
251
251
BlockNumber next_block ,
252
252
bool * next_unskippable_allvis ,
@@ -514,11 +514,28 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
514
514
vacrel -> NewRelminMxid = OldestMxact ;
515
515
vacrel -> skippedallvis = false;
516
516
517
+ /*
518
+ * Allocate dead_items array memory using dead_items_alloc. This handles
519
+ * parallel VACUUM initialization as part of allocating shared memory
520
+ * space used for dead_items. (But do a failsafe precheck first, to
521
+ * ensure that parallel VACUUM won't be attempted at all when relfrozenxid
522
+ * is already dangerously old.)
523
+ */
524
+ lazy_check_wraparound_failsafe (vacrel );
525
+ dead_items_alloc (vacrel , params -> nworkers );
526
+
517
527
/*
518
528
* Call lazy_scan_heap to perform all required heap pruning, index
519
529
* vacuuming, and heap vacuuming (plus related processing)
520
530
*/
521
- lazy_scan_heap (vacrel , params -> nworkers );
531
+ lazy_scan_heap (vacrel );
532
+
533
+ /*
534
+ * Free resources managed by dead_items_alloc. This ends parallel mode in
535
+ * passing when necessary.
536
+ */
537
+ dead_items_cleanup (vacrel );
538
+ Assert (!IsInParallelMode ());
522
539
523
540
/*
524
541
* Update pg_class entries for each of rel's indexes where appropriate.
@@ -825,14 +842,14 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
825
842
* supply.
826
843
*/
827
844
static void
828
- lazy_scan_heap (LVRelState * vacrel , int nworkers )
845
+ lazy_scan_heap (LVRelState * vacrel )
829
846
{
830
- VacDeadItems * dead_items ;
831
847
BlockNumber rel_pages = vacrel -> rel_pages ,
832
848
blkno ,
833
849
next_unskippable_block ,
834
- next_failsafe_block ,
835
- next_fsm_block_to_vacuum ;
850
+ next_failsafe_block = 0 ,
851
+ next_fsm_block_to_vacuum = 0 ;
852
+ VacDeadItems * dead_items = vacrel -> dead_items ;
836
853
Buffer vmbuffer = InvalidBuffer ;
837
854
bool next_unskippable_allvis ,
838
855
skipping_current_range ;
@@ -843,23 +860,6 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
843
860
};
844
861
int64 initprog_val [3 ];
845
862
846
- /*
847
- * Do failsafe precheck before calling dead_items_alloc. This ensures
848
- * that parallel VACUUM won't be attempted when relfrozenxid is already
849
- * dangerously old.
850
- */
851
- lazy_check_wraparound_failsafe (vacrel );
852
- next_failsafe_block = 0 ;
853
-
854
- /*
855
- * Allocate the space for dead_items. Note that this handles parallel
856
- * VACUUM initialization as part of allocating shared memory space used
857
- * for dead_items.
858
- */
859
- dead_items_alloc (vacrel , nworkers );
860
- dead_items = vacrel -> dead_items ;
861
- next_fsm_block_to_vacuum = 0 ;
862
-
863
863
/* Report that we're scanning the heap, advertising total # of blocks */
864
864
initprog_val [0 ] = PROGRESS_VACUUM_PHASE_SCAN_HEAP ;
865
865
initprog_val [1 ] = rel_pages ;
@@ -1236,12 +1236,13 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
1236
1236
}
1237
1237
}
1238
1238
1239
+ vacrel -> blkno = InvalidBlockNumber ;
1240
+ if (BufferIsValid (vmbuffer ))
1241
+ ReleaseBuffer (vmbuffer );
1242
+
1239
1243
/* report that everything is now scanned */
1240
1244
pgstat_progress_update_param (PROGRESS_VACUUM_HEAP_BLKS_SCANNED , blkno );
1241
1245
1242
- /* Clear the block number information */
1243
- vacrel -> blkno = InvalidBlockNumber ;
1244
-
1245
1246
/* now we can compute the new value for pg_class.reltuples */
1246
1247
vacrel -> new_live_tuples = vac_estimate_reltuples (vacrel -> rel , rel_pages ,
1247
1248
vacrel -> scanned_pages ,
@@ -1256,15 +1257,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
1256
1257
vacrel -> missed_dead_tuples ;
1257
1258
1258
1259
/*
1259
- * Release any remaining pin on visibility map page.
1260
+ * Do index vacuuming (call each index's ambulkdelete routine), then do
1261
+ * related heap vacuuming
1260
1262
*/
1261
- if (BufferIsValid (vmbuffer ))
1262
- {
1263
- ReleaseBuffer (vmbuffer );
1264
- vmbuffer = InvalidBuffer ;
1265
- }
1266
-
1267
- /* Perform a final round of index and heap vacuuming */
1268
1263
if (dead_items -> num_items > 0 )
1269
1264
lazy_vacuum (vacrel );
1270
1265
@@ -1278,16 +1273,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
1278
1273
/* report all blocks vacuumed */
1279
1274
pgstat_progress_update_param (PROGRESS_VACUUM_HEAP_BLKS_VACUUMED , blkno );
1280
1275
1281
- /* Do post-vacuum cleanup */
1276
+ /* Do final index cleanup (call each index's amvacuumcleanup routine) */
1282
1277
if (vacrel -> nindexes > 0 && vacrel -> do_index_cleanup )
1283
1278
lazy_cleanup_all_indexes (vacrel );
1284
-
1285
- /*
1286
- * Free resources managed by dead_items_alloc. This ends parallel mode in
1287
- * passing when necessary.
1288
- */
1289
- dead_items_cleanup (vacrel );
1290
- Assert (!IsInParallelMode ());
1291
1279
}
1292
1280
1293
1281
/*
0 commit comments