Skip to content

Commit 56d2975

Browse files
committed
Fix performance problems with autovacuum truncation in busy workloads.
In situations where there are over 8MB of empty pages at the end of a table, the truncation work for trailing empty pages takes longer than deadlock_timeout, and there is frequent access to the table by processes other than autovacuum, there was a problem with the autovacuum worker process being canceled by the deadlock checking code. The truncation work done by autovacuum up that point was lost, and the attempt tried again by a later autovacuum worker. The attempts could continue indefinitely without making progress, consuming resources and blocking other processes for up to deadlock_timeout each time. This patch has the autovacuum worker checking whether it is blocking any other thread at 20ms intervals. If such a condition develops, the autovacuum worker will persist the work it has done so far, release its lock on the table, and sleep in 50ms intervals for up to 5 seconds, hoping to be able to re-acquire the lock and try again. If it is unable to get the lock in that time, it moves on and a worker will try to continue later from the point this one left off. While this patch doesn't change the rules about when and what to truncate, it does cause the truncation to occur sooner, with less blocking, and with the consumption of fewer resources when there is contention for the table's lock. The only user-visible change other than improved performance is that the table size during truncation may change incrementally instead of just once. Backpatched to 9.0 from initial master commit at b19e425 -- before that the differences are too large to be clearly safe. Jan Wieck
1 parent e4cfb5f commit 56d2975

File tree

5 files changed

+279
-64
lines changed

5 files changed

+279
-64
lines changed

src/backend/commands/vacuumlazy.c

Lines changed: 166 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include "commands/vacuum.h"
4747
#include "miscadmin.h"
4848
#include "pgstat.h"
49+
#include "portability/instr_time.h"
4950
#include "postmaster/autovacuum.h"
5051
#include "storage/bufmgr.h"
5152
#include "storage/freespace.h"
@@ -66,6 +67,17 @@
6667
#define REL_TRUNCATE_MINIMUM 1000
6768
#define REL_TRUNCATE_FRACTION 16
6869

70+
/*
71+
* Timing parameters for truncate locking heuristics.
72+
*
73+
* These were not exposed as user tunable GUC values because it didn't seem
74+
* that the potential for improvement was great enough to merit the cost of
75+
* supporting them.
76+
*/
77+
#define AUTOVACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
78+
#define AUTOVACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
79+
#define AUTOVACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
80+
6981
/*
7082
* Guesstimation of number of dead tuples per page. This is used to
7183
* provide an upper limit to memory allocated when vacuuming small
@@ -100,6 +112,7 @@ typedef struct LVRelStats
100112
ItemPointer dead_tuples; /* array of ItemPointerData */
101113
int num_index_scans;
102114
TransactionId latestRemovedXid;
115+
bool lock_waiter_detected;
103116
} LVRelStats;
104117

105118

@@ -183,6 +196,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
183196
vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
184197
vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
185198
vacrelstats->num_index_scans = 0;
199+
vacrelstats->pages_removed = 0;
200+
vacrelstats->lock_waiter_detected = false;
186201

187202
/* Open all indexes of the relation */
188203
vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
@@ -239,10 +254,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
239254
vacrelstats->hasindex,
240255
new_frozen_xid);
241256

242-
/* report results to the stats collector, too */
243-
pgstat_report_vacuum(RelationGetRelid(onerel),
244-
onerel->rd_rel->relisshared,
245-
new_rel_tuples);
257+
/*
258+
* Report results to the stats collector, too. An early terminated
259+
* lazy_truncate_heap attempt suppresses the message and also cancels the
260+
* execution of ANALYZE, if that was ordered.
261+
*/
262+
if (!vacrelstats->lock_waiter_detected)
263+
pgstat_report_vacuum(RelationGetRelid(onerel),
264+
onerel->rd_rel->relisshared,
265+
new_rel_tuples);
266+
else
267+
vacstmt->options &= ~VACOPT_ANALYZE;
246268

247269
/* and log the action if appropriate */
248270
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -1087,80 +1109,124 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
10871109
BlockNumber old_rel_pages = vacrelstats->rel_pages;
10881110
BlockNumber new_rel_pages;
10891111
PGRUsage ru0;
1112+
int lock_retry;
10901113

10911114
pg_rusage_init(&ru0);
10921115

10931116
/*
1094-
* We need full exclusive lock on the relation in order to do truncation.
1095-
* If we can't get it, give up rather than waiting --- we don't want to
1096-
* block other backends, and we don't want to deadlock (which is quite
1097-
* possible considering we already hold a lower-grade lock).
1098-
*/
1099-
if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
1100-
return;
1101-
1102-
/*
1103-
* Now that we have exclusive lock, look to see if the rel has grown
1104-
* whilst we were vacuuming with non-exclusive lock. If so, give up; the
1105-
* newly added pages presumably contain non-deletable tuples.
1117+
* Loop until no more truncating can be done.
11061118
*/
1107-
new_rel_pages = RelationGetNumberOfBlocks(onerel);
1108-
if (new_rel_pages != old_rel_pages)
1119+
do
11091120
{
11101121
/*
1111-
* Note: we intentionally don't update vacrelstats->rel_pages with
1112-
* the new rel size here. If we did, it would amount to assuming that
1113-
* the new pages are empty, which is unlikely. Leaving the numbers
1114-
* alone amounts to assuming that the new pages have the same tuple
1115-
* density as existing ones, which is less unlikely.
1122+
* We need full exclusive lock on the relation in order to do
1123+
* truncation. If we can't get it, give up rather than waiting --- we
1124+
* don't want to block other backends, and we don't want to deadlock
1125+
* (which is quite possible considering we already hold a lower-grade
1126+
* lock).
11161127
*/
1117-
UnlockRelation(onerel, AccessExclusiveLock);
1118-
return;
1119-
}
1128+
vacrelstats->lock_waiter_detected = false;
1129+
lock_retry = 0;
1130+
while (true)
1131+
{
1132+
if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1133+
break;
11201134

1121-
/*
1122-
* Scan backwards from the end to verify that the end pages actually
1123-
* contain no tuples. This is *necessary*, not optional, because other
1124-
* backends could have added tuples to these pages whilst we were
1125-
* vacuuming.
1126-
*/
1127-
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1135+
/*
1136+
* Check for interrupts while trying to (re-)acquire the exclusive
1137+
* lock.
1138+
*/
1139+
CHECK_FOR_INTERRUPTS();
11281140

1129-
if (new_rel_pages >= old_rel_pages)
1130-
{
1131-
/* can't do anything after all */
1132-
UnlockRelation(onerel, AccessExclusiveLock);
1133-
return;
1134-
}
1141+
if (++lock_retry > (AUTOVACUUM_TRUNCATE_LOCK_TIMEOUT /
1142+
AUTOVACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1143+
{
1144+
/*
1145+
* We failed to establish the lock in the specified number of
1146+
* retries. This means we give up truncating. Suppress the
1147+
* ANALYZE step. Doing an ANALYZE at this point will reset the
1148+
* dead_tuple_count in the stats collector, so we will not get
1149+
* called by the autovacuum launcher again to do the truncate.
1150+
*/
1151+
vacrelstats->lock_waiter_detected = true;
1152+
ereport(LOG,
1153+
(errmsg("automatic vacuum of table \"%s.%s.%s\": "
1154+
"cannot (re)acquire exclusive "
1155+
"lock for truncate scan",
1156+
get_database_name(MyDatabaseId),
1157+
get_namespace_name(RelationGetNamespace(onerel)),
1158+
RelationGetRelationName(onerel))));
1159+
return;
1160+
}
11351161

1136-
/*
1137-
* Okay to truncate.
1138-
*/
1139-
RelationTruncate(onerel, new_rel_pages);
1162+
pg_usleep(AUTOVACUUM_TRUNCATE_LOCK_WAIT_INTERVAL);
1163+
}
11401164

1141-
/*
1142-
* We can release the exclusive lock as soon as we have truncated. Other
1143-
* backends can't safely access the relation until they have processed the
1144-
* smgr invalidation that smgrtruncate sent out ... but that should happen
1145-
* as part of standard invalidation processing once they acquire lock on
1146-
* the relation.
1147-
*/
1148-
UnlockRelation(onerel, AccessExclusiveLock);
1165+
/*
1166+
* Now that we have exclusive lock, look to see if the rel has grown
1167+
* whilst we were vacuuming with non-exclusive lock. If so, give up;
1168+
* the newly added pages presumably contain non-deletable tuples.
1169+
*/
1170+
new_rel_pages = RelationGetNumberOfBlocks(onerel);
1171+
if (new_rel_pages != old_rel_pages)
1172+
{
1173+
/*
1174+
* Note: we intentionally don't update vacrelstats->rel_pages with
1175+
* the new rel size here. If we did, it would amount to assuming
1176+
* that the new pages are empty, which is unlikely. Leaving the
1177+
* numbers alone amounts to assuming that the new pages have the
1178+
* same tuple density as existing ones, which is less unlikely.
1179+
*/
1180+
UnlockRelation(onerel, AccessExclusiveLock);
1181+
return;
1182+
}
11491183

1150-
/*
1151-
* Update statistics. Here, it *is* correct to adjust rel_pages without
1152-
* also touching reltuples, since the tuple count wasn't changed by the
1153-
* truncation.
1154-
*/
1155-
vacrelstats->rel_pages = new_rel_pages;
1156-
vacrelstats->pages_removed = old_rel_pages - new_rel_pages;
1184+
/*
1185+
* Scan backwards from the end to verify that the end pages actually
1186+
* contain no tuples. This is *necessary*, not optional, because
1187+
* other backends could have added tuples to these pages whilst we
1188+
* were vacuuming.
1189+
*/
1190+
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
11571191

1158-
ereport(elevel,
1159-
(errmsg("\"%s\": truncated %u to %u pages",
1160-
RelationGetRelationName(onerel),
1161-
old_rel_pages, new_rel_pages),
1162-
errdetail("%s.",
1163-
pg_rusage_show(&ru0))));
1192+
if (new_rel_pages >= old_rel_pages)
1193+
{
1194+
/* can't do anything after all */
1195+
UnlockRelation(onerel, AccessExclusiveLock);
1196+
return;
1197+
}
1198+
1199+
/*
1200+
* Okay to truncate.
1201+
*/
1202+
RelationTruncate(onerel, new_rel_pages);
1203+
1204+
/*
1205+
* We can release the exclusive lock as soon as we have truncated.
1206+
* Other backends can't safely access the relation until they have
1207+
* processed the smgr invalidation that smgrtruncate sent out ... but
1208+
* that should happen as part of standard invalidation processing once
1209+
* they acquire lock on the relation.
1210+
*/
1211+
UnlockRelation(onerel, AccessExclusiveLock);
1212+
1213+
/*
1214+
* Update statistics. Here, it *is* correct to adjust rel_pages
1215+
* without also touching reltuples, since the tuple count wasn't
1216+
* changed by the truncation.
1217+
*/
1218+
vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1219+
vacrelstats->rel_pages = new_rel_pages;
1220+
1221+
ereport(elevel,
1222+
(errmsg("\"%s\": truncated %u to %u pages",
1223+
RelationGetRelationName(onerel),
1224+
old_rel_pages, new_rel_pages),
1225+
errdetail("%s.",
1226+
pg_rusage_show(&ru0))));
1227+
old_rel_pages = new_rel_pages;
1228+
} while (new_rel_pages > vacrelstats->nonempty_pages &&
1229+
vacrelstats->lock_waiter_detected);
11641230
}
11651231

11661232
/*
@@ -1172,6 +1238,12 @@ static BlockNumber
11721238
count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
11731239
{
11741240
BlockNumber blkno;
1241+
instr_time starttime;
1242+
instr_time currenttime;
1243+
instr_time elapsed;
1244+
1245+
/* Initialize the starttime if we check for conflicting lock requests */
1246+
INSTR_TIME_SET_CURRENT(starttime);
11751247

11761248
/* Strange coding of loop control is needed because blkno is unsigned */
11771249
blkno = vacrelstats->rel_pages;
@@ -1183,6 +1255,36 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
11831255
maxoff;
11841256
bool hastup;
11851257

1258+
/*
1259+
* Check if another process requests a lock on our relation. We are
1260+
* holding an AccessExclusiveLock here, so they will be waiting. We
1261+
* only do this in autovacuum_truncate_lock_check millisecond
1262+
* intervals, and we only check if that interval has elapsed once
1263+
* every 32 blocks to keep the number of system calls and actual
1264+
* shared lock table lookups to a minimum.
1265+
*/
1266+
if ((blkno % 32) == 0)
1267+
{
1268+
INSTR_TIME_SET_CURRENT(currenttime);
1269+
elapsed = currenttime;
1270+
INSTR_TIME_SUBTRACT(elapsed, starttime);
1271+
if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1272+
>= AUTOVACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1273+
{
1274+
if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1275+
{
1276+
ereport(elevel,
1277+
(errmsg("\"%s\": suspending truncate "
1278+
"due to conflicting lock request",
1279+
RelationGetRelationName(onerel))));
1280+
1281+
vacrelstats->lock_waiter_detected = true;
1282+
return blkno;
1283+
}
1284+
starttime = currenttime;
1285+
}
1286+
}
1287+
11861288
/*
11871289
* We don't insert a vacuum delay point here, because we have an
11881290
* exclusive lock on the table which we want to hold for as short a

src/backend/storage/lmgr/lmgr.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,24 @@ UnlockRelation(Relation relation, LOCKMODE lockmode)
231231
LockRelease(&tag, lockmode, false);
232232
}
233233

234+
/*
235+
* LockHasWaitersRelation
236+
*
237+
* This is a functiion to check if someone else is waiting on a
238+
* lock, we are currently holding.
239+
*/
240+
bool
241+
LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
242+
{
243+
LOCKTAG tag;
244+
245+
SET_LOCKTAG_RELATION(tag,
246+
relation->rd_lockInfo.lockRelId.dbId,
247+
relation->rd_lockInfo.lockRelId.relId);
248+
249+
return LockHasWaiters(&tag, lockmode, false);
250+
}
251+
234252
/*
235253
* LockRelationIdForSession
236254
*

0 commit comments

Comments
 (0)