@@ -315,6 +315,10 @@ typedef struct LVRelStats
315
315
TransactionId latestRemovedXid ;
316
316
bool lock_waiter_detected ;
317
317
318
+ /* Statistics about indexes */
319
+ IndexBulkDeleteResult * * indstats ;
320
+ int nindexes ;
321
+
318
322
/* Used for error callback */
319
323
char * indname ;
320
324
BlockNumber blkno ; /* used only for heap operations */
@@ -348,7 +352,6 @@ static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
348
352
static bool lazy_check_needs_freeze (Buffer buf , bool * hastup ,
349
353
LVRelStats * vacrelstats );
350
354
static void lazy_vacuum_all_indexes (Relation onerel , Relation * Irel ,
351
- IndexBulkDeleteResult * * stats ,
352
355
LVRelStats * vacrelstats , LVParallelState * lps ,
353
356
int nindexes );
354
357
static void lazy_vacuum_index (Relation indrel , IndexBulkDeleteResult * * stats ,
@@ -371,21 +374,18 @@ static int vac_cmp_itemptr(const void *left, const void *right);
371
374
static bool heap_page_is_all_visible (Relation rel , Buffer buf ,
372
375
LVRelStats * vacrelstats ,
373
376
TransactionId * visibility_cutoff_xid , bool * all_frozen );
374
- static void lazy_parallel_vacuum_indexes (Relation * Irel , IndexBulkDeleteResult * * stats ,
375
- LVRelStats * vacrelstats , LVParallelState * lps ,
376
- int nindexes );
377
- static void parallel_vacuum_index (Relation * Irel , IndexBulkDeleteResult * * stats ,
378
- LVShared * lvshared , LVDeadTuples * dead_tuples ,
379
- int nindexes , LVRelStats * vacrelstats );
380
- static void vacuum_indexes_leader (Relation * Irel , IndexBulkDeleteResult * * stats ,
381
- LVRelStats * vacrelstats , LVParallelState * lps ,
382
- int nindexes );
377
+ static void lazy_parallel_vacuum_indexes (Relation * Irel , LVRelStats * vacrelstats ,
378
+ LVParallelState * lps , int nindexes );
379
+ static void parallel_vacuum_index (Relation * Irel , LVShared * lvshared ,
380
+ LVDeadTuples * dead_tuples , int nindexes ,
381
+ LVRelStats * vacrelstats );
382
+ static void vacuum_indexes_leader (Relation * Irel , LVRelStats * vacrelstats ,
383
+ LVParallelState * lps , int nindexes );
383
384
static void vacuum_one_index (Relation indrel , IndexBulkDeleteResult * * stats ,
384
385
LVShared * lvshared , LVSharedIndStats * shared_indstats ,
385
386
LVDeadTuples * dead_tuples , LVRelStats * vacrelstats );
386
- static void lazy_cleanup_all_indexes (Relation * Irel , IndexBulkDeleteResult * * stats ,
387
- LVRelStats * vacrelstats , LVParallelState * lps ,
388
- int nindexes );
387
+ static void lazy_cleanup_all_indexes (Relation * Irel , LVRelStats * vacrelstats ,
388
+ LVParallelState * lps , int nindexes );
389
389
static long compute_max_dead_tuples (BlockNumber relblocks , bool hasindex );
390
390
static int compute_parallel_vacuum_workers (Relation * Irel , int nindexes , int nrequested ,
391
391
bool * can_parallel_vacuum );
@@ -433,6 +433,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
433
433
write_rate ;
434
434
bool aggressive ; /* should we scan all unfrozen pages? */
435
435
bool scanned_all_unfrozen ; /* actually scanned all such pages? */
436
+ char * * indnames = NULL ;
436
437
TransactionId xidFullScanLimit ;
437
438
MultiXactId mxactFullScanLimit ;
438
439
BlockNumber new_rel_pages ;
@@ -512,6 +513,20 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
512
513
vacrelstats -> useindex = (nindexes > 0 &&
513
514
params -> index_cleanup == VACOPT_TERNARY_ENABLED );
514
515
516
+ vacrelstats -> indstats = (IndexBulkDeleteResult * * )
517
+ palloc0 (nindexes * sizeof (IndexBulkDeleteResult * ));
518
+ vacrelstats -> nindexes = nindexes ;
519
+
520
+ /* Save index names iff autovacuum logging requires it */
521
+ if (IsAutoVacuumWorkerProcess () &&
522
+ params -> log_min_duration >= 0 &&
523
+ vacrelstats -> nindexes > 0 )
524
+ {
525
+ indnames = palloc (sizeof (char * ) * vacrelstats -> nindexes );
526
+ for (int i = 0 ; i < vacrelstats -> nindexes ; i ++ )
527
+ indnames [i ] = pstrdup (RelationGetRelationName (Irel [i ]));
528
+ }
529
+
515
530
/*
516
531
* Setup error traceback support for ereport(). The idea is to set up an
517
532
* error context callback to display additional information on any error
@@ -680,6 +695,21 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
680
695
(long long ) VacuumPageHit ,
681
696
(long long ) VacuumPageMiss ,
682
697
(long long ) VacuumPageDirty );
698
+ for (int i = 0 ; i < vacrelstats -> nindexes ; i ++ )
699
+ {
700
+ IndexBulkDeleteResult * stats = vacrelstats -> indstats [i ];
701
+
702
+ if (!stats )
703
+ continue ;
704
+
705
+ appendStringInfo (& buf ,
706
+ _ ("index \"%s\": pages: %u remain, %u newly deleted, %u currently deleted, %u reusable\n" ),
707
+ indnames [i ],
708
+ stats -> num_pages ,
709
+ stats -> pages_newly_deleted ,
710
+ stats -> pages_deleted ,
711
+ stats -> pages_free );
712
+ }
683
713
appendStringInfo (& buf , _ ("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" ),
684
714
read_rate , write_rate );
685
715
if (track_io_timing )
@@ -705,6 +735,16 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
705
735
pfree (buf .data );
706
736
}
707
737
}
738
+
739
+ /* Cleanup index statistics and index names */
740
+ for (int i = 0 ; i < vacrelstats -> nindexes ; i ++ )
741
+ {
742
+ if (vacrelstats -> indstats [i ])
743
+ pfree (vacrelstats -> indstats [i ]);
744
+
745
+ if (indnames && indnames [i ])
746
+ pfree (indnames [i ]);
747
+ }
708
748
}
709
749
710
750
/*
@@ -787,7 +827,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
787
827
tups_vacuumed , /* tuples cleaned up by current vacuum */
788
828
nkeep , /* dead-but-not-removable tuples */
789
829
nunused ; /* # existing unused line pointers */
790
- IndexBulkDeleteResult * * indstats ;
791
830
int i ;
792
831
PGRUsage ru0 ;
793
832
Buffer vmbuffer = InvalidBuffer ;
@@ -820,9 +859,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
820
859
next_fsm_block_to_vacuum = (BlockNumber ) 0 ;
821
860
num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0 ;
822
861
823
- indstats = (IndexBulkDeleteResult * * )
824
- palloc0 (nindexes * sizeof (IndexBulkDeleteResult * ));
825
-
826
862
nblocks = RelationGetNumberOfBlocks (onerel );
827
863
vacrelstats -> rel_pages = nblocks ;
828
864
vacrelstats -> scanned_pages = 0 ;
@@ -1070,8 +1106,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
1070
1106
}
1071
1107
1072
1108
/* Work on all the indexes, then the heap */
1073
- lazy_vacuum_all_indexes (onerel , Irel , indstats ,
1074
- vacrelstats , lps , nindexes );
1109
+ lazy_vacuum_all_indexes (onerel , Irel , vacrelstats , lps , nindexes );
1075
1110
1076
1111
/* Remove tuples from heap */
1077
1112
lazy_vacuum_heap (onerel , vacrelstats );
@@ -1728,8 +1763,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
1728
1763
if (dead_tuples -> num_tuples > 0 )
1729
1764
{
1730
1765
/* Work on all the indexes, and then the heap */
1731
- lazy_vacuum_all_indexes (onerel , Irel , indstats , vacrelstats ,
1732
- lps , nindexes );
1766
+ lazy_vacuum_all_indexes (onerel , Irel , vacrelstats , lps , nindexes );
1733
1767
1734
1768
/* Remove tuples from heap */
1735
1769
lazy_vacuum_heap (onerel , vacrelstats );
@@ -1747,18 +1781,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
1747
1781
1748
1782
/* Do post-vacuum cleanup */
1749
1783
if (vacrelstats -> useindex )
1750
- lazy_cleanup_all_indexes (Irel , indstats , vacrelstats , lps , nindexes );
1784
+ lazy_cleanup_all_indexes (Irel , vacrelstats , lps , nindexes );
1751
1785
1752
1786
/*
1753
1787
* End parallel mode before updating index statistics as we cannot write
1754
1788
* during parallel mode.
1755
1789
*/
1756
1790
if (ParallelVacuumIsActive (lps ))
1757
- end_parallel_vacuum (indstats , lps , nindexes );
1791
+ end_parallel_vacuum (vacrelstats -> indstats , lps , nindexes );
1758
1792
1759
1793
/* Update index statistics */
1760
1794
if (vacrelstats -> useindex )
1761
- update_index_statistics (Irel , indstats , nindexes );
1795
+ update_index_statistics (Irel , vacrelstats -> indstats , nindexes );
1762
1796
1763
1797
/* If no indexes, make log report that lazy_vacuum_heap would've made */
1764
1798
if (vacuumed_pages )
@@ -1803,7 +1837,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
1803
1837
*/
1804
1838
static void
1805
1839
lazy_vacuum_all_indexes (Relation onerel , Relation * Irel ,
1806
- IndexBulkDeleteResult * * stats ,
1807
1840
LVRelStats * vacrelstats , LVParallelState * lps ,
1808
1841
int nindexes )
1809
1842
{
@@ -1831,14 +1864,15 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
1831
1864
lps -> lvshared -> reltuples = vacrelstats -> old_live_tuples ;
1832
1865
lps -> lvshared -> estimated_count = true;
1833
1866
1834
- lazy_parallel_vacuum_indexes (Irel , stats , vacrelstats , lps , nindexes );
1867
+ lazy_parallel_vacuum_indexes (Irel , vacrelstats , lps , nindexes );
1835
1868
}
1836
1869
else
1837
1870
{
1838
1871
int idx ;
1839
1872
1840
1873
for (idx = 0 ; idx < nindexes ; idx ++ )
1841
- lazy_vacuum_index (Irel [idx ], & stats [idx ], vacrelstats -> dead_tuples ,
1874
+ lazy_vacuum_index (Irel [idx ], & (vacrelstats -> indstats [idx ]),
1875
+ vacrelstats -> dead_tuples ,
1842
1876
vacrelstats -> old_live_tuples , vacrelstats );
1843
1877
}
1844
1878
@@ -2109,9 +2143,8 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelStats *vacrelstats)
2109
2143
* cleanup.
2110
2144
*/
2111
2145
static void
2112
- lazy_parallel_vacuum_indexes (Relation * Irel , IndexBulkDeleteResult * * stats ,
2113
- LVRelStats * vacrelstats , LVParallelState * lps ,
2114
- int nindexes )
2146
+ lazy_parallel_vacuum_indexes (Relation * Irel , LVRelStats * vacrelstats ,
2147
+ LVParallelState * lps , int nindexes )
2115
2148
{
2116
2149
int nworkers ;
2117
2150
@@ -2199,14 +2232,14 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
2199
2232
}
2200
2233
2201
2234
/* Process the indexes that can be processed by only leader process */
2202
- vacuum_indexes_leader (Irel , stats , vacrelstats , lps , nindexes );
2235
+ vacuum_indexes_leader (Irel , vacrelstats , lps , nindexes );
2203
2236
2204
2237
/*
2205
2238
* Join as a parallel worker. The leader process alone processes all the
2206
2239
* indexes in the case where no workers are launched.
2207
2240
*/
2208
- parallel_vacuum_index (Irel , stats , lps -> lvshared ,
2209
- vacrelstats -> dead_tuples , nindexes , vacrelstats );
2241
+ parallel_vacuum_index (Irel , lps -> lvshared , vacrelstats -> dead_tuples ,
2242
+ nindexes , vacrelstats );
2210
2243
2211
2244
/*
2212
2245
* Next, accumulate buffer and WAL usage. (This must wait for the workers
@@ -2239,9 +2272,9 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
2239
2272
* vacuum worker processes to process the indexes in parallel.
2240
2273
*/
2241
2274
static void
2242
- parallel_vacuum_index (Relation * Irel , IndexBulkDeleteResult * * stats ,
2243
- LVShared * lvshared , LVDeadTuples * dead_tuples ,
2244
- int nindexes , LVRelStats * vacrelstats )
2275
+ parallel_vacuum_index (Relation * Irel , LVShared * lvshared ,
2276
+ LVDeadTuples * dead_tuples , int nindexes ,
2277
+ LVRelStats * vacrelstats )
2245
2278
{
2246
2279
/*
2247
2280
* Increment the active worker count if we are able to launch any worker.
@@ -2274,8 +2307,8 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
2274
2307
continue ;
2275
2308
2276
2309
/* Do vacuum or cleanup of the index */
2277
- vacuum_one_index (Irel [idx ], & (stats [idx ]), lvshared , shared_indstats ,
2278
- dead_tuples , vacrelstats );
2310
+ vacuum_one_index (Irel [idx ], & (vacrelstats -> indstats [idx ]), lvshared ,
2311
+ shared_indstats , dead_tuples , vacrelstats );
2279
2312
}
2280
2313
2281
2314
/*
@@ -2291,9 +2324,8 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
2291
2324
* because these indexes don't support parallel operation at that phase.
2292
2325
*/
2293
2326
static void
2294
- vacuum_indexes_leader (Relation * Irel , IndexBulkDeleteResult * * stats ,
2295
- LVRelStats * vacrelstats , LVParallelState * lps ,
2296
- int nindexes )
2327
+ vacuum_indexes_leader (Relation * Irel , LVRelStats * vacrelstats ,
2328
+ LVParallelState * lps , int nindexes )
2297
2329
{
2298
2330
int i ;
2299
2331
@@ -2314,7 +2346,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
2314
2346
/* Process the indexes skipped by parallel workers */
2315
2347
if (shared_indstats == NULL ||
2316
2348
skip_parallel_vacuum_index (Irel [i ], lps -> lvshared ))
2317
- vacuum_one_index (Irel [i ], & (stats [i ]), lps -> lvshared ,
2349
+ vacuum_one_index (Irel [i ], & (vacrelstats -> indstats [i ]), lps -> lvshared ,
2318
2350
shared_indstats , vacrelstats -> dead_tuples ,
2319
2351
vacrelstats );
2320
2352
}
@@ -2394,9 +2426,8 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
2394
2426
* parallel vacuum.
2395
2427
*/
2396
2428
static void
2397
- lazy_cleanup_all_indexes (Relation * Irel , IndexBulkDeleteResult * * stats ,
2398
- LVRelStats * vacrelstats , LVParallelState * lps ,
2399
- int nindexes )
2429
+ lazy_cleanup_all_indexes (Relation * Irel , LVRelStats * vacrelstats ,
2430
+ LVParallelState * lps , int nindexes )
2400
2431
{
2401
2432
int idx ;
2402
2433
@@ -2427,12 +2458,12 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
2427
2458
lps -> lvshared -> estimated_count =
2428
2459
(vacrelstats -> tupcount_pages < vacrelstats -> rel_pages );
2429
2460
2430
- lazy_parallel_vacuum_indexes (Irel , stats , vacrelstats , lps , nindexes );
2461
+ lazy_parallel_vacuum_indexes (Irel , vacrelstats , lps , nindexes );
2431
2462
}
2432
2463
else
2433
2464
{
2434
2465
for (idx = 0 ; idx < nindexes ; idx ++ )
2435
- lazy_cleanup_index (Irel [idx ], & stats [idx ],
2466
+ lazy_cleanup_index (Irel [idx ], & ( vacrelstats -> indstats [idx ]) ,
2436
2467
vacrelstats -> new_rel_tuples ,
2437
2468
vacrelstats -> tupcount_pages < vacrelstats -> rel_pages ,
2438
2469
vacrelstats );
@@ -3243,7 +3274,6 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
3243
3274
InvalidTransactionId ,
3244
3275
InvalidMultiXactId ,
3245
3276
false);
3246
- pfree (stats [i ]);
3247
3277
}
3248
3278
}
3249
3279
@@ -3550,7 +3580,6 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
3550
3580
WalUsage * wal_usage ;
3551
3581
int nindexes ;
3552
3582
char * sharedquery ;
3553
- IndexBulkDeleteResult * * stats ;
3554
3583
LVRelStats vacrelstats ;
3555
3584
ErrorContextCallback errcallback ;
3556
3585
@@ -3597,7 +3626,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
3597
3626
VacuumSharedCostBalance = & (lvshared -> cost_balance );
3598
3627
VacuumActiveNWorkers = & (lvshared -> active_nworkers );
3599
3628
3600
- stats = (IndexBulkDeleteResult * * )
3629
+ vacrelstats . indstats = (IndexBulkDeleteResult * * )
3601
3630
palloc0 (nindexes * sizeof (IndexBulkDeleteResult * ));
3602
3631
3603
3632
if (lvshared -> maintenance_work_mem_worker > 0 )
@@ -3622,7 +3651,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
3622
3651
InstrStartParallelQuery ();
3623
3652
3624
3653
/* Process indexes to perform vacuum/cleanup */
3625
- parallel_vacuum_index (indrels , stats , lvshared , dead_tuples , nindexes ,
3654
+ parallel_vacuum_index (indrels , lvshared , dead_tuples , nindexes ,
3626
3655
& vacrelstats );
3627
3656
3628
3657
/* Report buffer/WAL usage during parallel execution */
@@ -3636,7 +3665,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
3636
3665
3637
3666
vac_close_indexes (nindexes , indrels , RowExclusiveLock );
3638
3667
table_close (onerel , ShareUpdateExclusiveLock );
3639
- pfree (stats );
3668
+ pfree (vacrelstats . indstats );
3640
3669
}
3641
3670
3642
3671
/*
0 commit comments