Skip to content

Commit 0f8977b

Browse files
committed
Detect the deadlocks between backends and the startup process.
The deadlocks that the recovery conflict on lock is involved in can happen between hot-standby backends and the startup process. If a backend takes an access exclusive lock on the table and which finally triggers the deadlock, that deadlock can be detected as expected. On the other hand, previously, if the startup process took an access exclusive lock and which finally triggered the deadlock, that deadlock could not be detected and could remain even after deadlock_timeout passed. This is a bug. The cause of this bug was that the code for handling the recovery conflict on lock didn't take care of deadlock case at all. It assumed that deadlocks involving the startup process and backends were able to be detected by the deadlock detector invoked within backends. But this assumption was incorrect. The startup process also should have invoked the deadlock detector if necessary. To fix this bug, this commit makes the startup process invoke the deadlock detector if deadlock_timeout is reached while handling the recovery conflict on lock. Specifically, in that case, the startup process requests all the backends holding the conflicting locks to check themselves for deadlocks. Back-patch to v9.6. v9.5 has also this bug, but per discussion we decided not to back-patch the fix to v9.5. Because v9.5 doesn't have some infrastructure codes (e.g., 37c5486) that this bug fix patch depends on. We can apply those codes for the back-patch, but since the next minor version release is the final one for v9.5, it's risky to do that. If we unexpectedly introduce new bug to v9.5 by the back-patch, there is no chance to fix that. We determined that the back-patch to v9.5 would give more risk than gain. Author: Fujii Masao Reviewed-by: Bertrand Drouvot, Masahiko Sawada, Kyotaro Horiguchi Discussion: https://postgr.es/m/4041d6b6-cf24-a120-36fa-1294220f8243@oss.nttdata.com
1 parent b1ebec2 commit 0f8977b

File tree

5 files changed

+141
-32
lines changed

5 files changed

+141
-32
lines changed

src/backend/storage/ipc/procarray.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2651,6 +2651,13 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
26512651
*/
26522652
pid_t
26532653
CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode)
2654+
{
2655+
return SignalVirtualTransaction(vxid, sigmode, true);
2656+
}
2657+
2658+
pid_t
2659+
SignalVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode,
2660+
bool conflictPending)
26542661
{
26552662
ProcArrayStruct *arrayP = procArray;
26562663
int index;
@@ -2669,7 +2676,7 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode)
26692676
if (procvxid.backendId == vxid.backendId &&
26702677
procvxid.localTransactionId == vxid.localTransactionId)
26712678
{
2672-
proc->recoveryConflictPending = true;
2679+
proc->recoveryConflictPending = conflictPending;
26732680
pid = proc->pid;
26742681
if (pid != 0)
26752682
{

src/backend/storage/ipc/standby.c

Lines changed: 114 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ int max_standby_streaming_delay = 30 * 1000;
4242

4343
static HTAB *RecoveryLockLists;
4444

45+
/* Flags set by timeout handlers */
46+
static volatile sig_atomic_t got_standby_deadlock_timeout = false;
47+
static volatile sig_atomic_t got_standby_lock_timeout = false;
48+
4549
static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
4650
ProcSignalReason reason,
4751
uint32 wait_event_info,
@@ -398,8 +402,10 @@ ResolveRecoveryConflictWithDatabase(Oid dbid)
398402
* lock. As we are already queued to be granted the lock, no new lock
399403
* requests conflicting with ours will be granted in the meantime.
400404
*
401-
* Deadlocks involving the Startup process and an ordinary backend process
402-
* will be detected by the deadlock detector within the ordinary backend.
405+
* We also must check for deadlocks involving the Startup process and
406+
* hot-standby backend processes. If deadlock_timeout is reached in
407+
* this function, all the backends holding the conflicting locks are
408+
* requested to check themselves for deadlocks.
403409
*/
404410
void
405411
ResolveRecoveryConflictWithLock(LOCKTAG locktag)
@@ -410,7 +416,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
410416

411417
ltime = GetStandbyLimitTime();
412418

413-
if (GetCurrentTimestamp() >= ltime)
419+
if (GetCurrentTimestamp() >= ltime && ltime != 0)
414420
{
415421
/*
416422
* We're already behind, so clear a path as quickly as possible.
@@ -432,26 +438,85 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
432438
else
433439
{
434440
/*
435-
* Wait (or wait again) until ltime
441+
* Wait (or wait again) until ltime, and check for deadlocks as well
442+
* if we will be waiting longer than deadlock_timeout
436443
*/
437-
EnableTimeoutParams timeouts[1];
444+
EnableTimeoutParams timeouts[2];
445+
int cnt = 0;
446+
447+
if (ltime != 0)
448+
{
449+
got_standby_lock_timeout = false;
450+
timeouts[cnt].id = STANDBY_LOCK_TIMEOUT;
451+
timeouts[cnt].type = TMPARAM_AT;
452+
timeouts[cnt].fin_time = ltime;
453+
cnt++;
454+
}
438455

439-
timeouts[0].id = STANDBY_LOCK_TIMEOUT;
440-
timeouts[0].type = TMPARAM_AT;
441-
timeouts[0].fin_time = ltime;
442-
enable_timeouts(timeouts, 1);
456+
got_standby_deadlock_timeout = false;
457+
timeouts[cnt].id = STANDBY_DEADLOCK_TIMEOUT;
458+
timeouts[cnt].type = TMPARAM_AFTER;
459+
timeouts[cnt].delay_ms = DeadlockTimeout;
460+
cnt++;
461+
462+
enable_timeouts(timeouts, cnt);
443463
}
444464

445465
/* Wait to be signaled by the release of the Relation Lock */
446466
ProcWaitForSignal(PG_WAIT_LOCK | locktag.locktag_type);
447467

468+
/*
469+
* Exit if ltime is reached. Then all the backends holding conflicting
470+
* locks will be canceled in the next ResolveRecoveryConflictWithLock()
471+
* call.
472+
*/
473+
if (got_standby_lock_timeout)
474+
goto cleanup;
475+
476+
if (got_standby_deadlock_timeout)
477+
{
478+
VirtualTransactionId *backends;
479+
480+
backends = GetLockConflicts(&locktag, AccessExclusiveLock, NULL);
481+
482+
/* Quick exit if there's no work to be done */
483+
if (!VirtualTransactionIdIsValid(*backends))
484+
goto cleanup;
485+
486+
/*
487+
* Send signals to all the backends holding the conflicting locks, to
488+
* ask them to check themselves for deadlocks.
489+
*/
490+
while (VirtualTransactionIdIsValid(*backends))
491+
{
492+
SignalVirtualTransaction(*backends,
493+
PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK,
494+
false);
495+
backends++;
496+
}
497+
498+
/*
499+
* Wait again here to be signaled by the release of the Relation Lock,
500+
* to prevent the subsequent RecoveryConflictWithLock() from causing
501+
* deadlock_timeout and sending a request for deadlocks check again.
502+
* Otherwise the request continues to be sent every deadlock_timeout
503+
* until the relation locks are released or ltime is reached.
504+
*/
505+
got_standby_deadlock_timeout = false;
506+
ProcWaitForSignal(PG_WAIT_LOCK | locktag.locktag_type);
507+
}
508+
509+
cleanup:
510+
448511
/*
449512
* Clear any timeout requests established above. We assume here that the
450513
* Startup process doesn't have any other outstanding timeouts than those
451514
* used by this function. If that stops being true, we could cancel the
452515
* timeouts individually, but that'd be slower.
453516
*/
454517
disable_all_timeouts(false);
518+
got_standby_lock_timeout = false;
519+
got_standby_deadlock_timeout = false;
455520
}
456521

457522
/*
@@ -490,15 +555,7 @@ ResolveRecoveryConflictWithBufferPin(void)
490555

491556
ltime = GetStandbyLimitTime();
492557

493-
if (ltime == 0)
494-
{
495-
/*
496-
* We're willing to wait forever for conflicts, so set timeout for
497-
* deadlock check only
498-
*/
499-
enable_timeout_after(STANDBY_DEADLOCK_TIMEOUT, DeadlockTimeout);
500-
}
501-
else if (GetCurrentTimestamp() >= ltime)
558+
if (GetCurrentTimestamp() >= ltime && ltime != 0)
502559
{
503560
/*
504561
* We're already behind, so clear a path as quickly as possible.
@@ -512,26 +569,55 @@ ResolveRecoveryConflictWithBufferPin(void)
512569
* waiting longer than deadlock_timeout
513570
*/
514571
EnableTimeoutParams timeouts[2];
572+
int cnt = 0;
515573

516-
timeouts[0].id = STANDBY_TIMEOUT;
517-
timeouts[0].type = TMPARAM_AT;
518-
timeouts[0].fin_time = ltime;
519-
timeouts[1].id = STANDBY_DEADLOCK_TIMEOUT;
520-
timeouts[1].type = TMPARAM_AFTER;
521-
timeouts[1].delay_ms = DeadlockTimeout;
522-
enable_timeouts(timeouts, 2);
574+
if (ltime != 0)
575+
{
576+
timeouts[cnt].id = STANDBY_TIMEOUT;
577+
timeouts[cnt].type = TMPARAM_AT;
578+
timeouts[cnt].fin_time = ltime;
579+
cnt++;
580+
}
581+
582+
got_standby_deadlock_timeout = false;
583+
timeouts[cnt].id = STANDBY_DEADLOCK_TIMEOUT;
584+
timeouts[cnt].type = TMPARAM_AFTER;
585+
timeouts[cnt].delay_ms = DeadlockTimeout;
586+
cnt++;
587+
588+
enable_timeouts(timeouts, cnt);
523589
}
524590

525591
/* Wait to be signaled by UnpinBuffer() */
526592
ProcWaitForSignal(PG_WAIT_BUFFER_PIN);
527593

594+
if (got_standby_deadlock_timeout)
595+
{
596+
/*
597+
* Send out a request for hot-standby backends to check themselves for
598+
* deadlocks.
599+
*
600+
* XXX The subsequent ResolveRecoveryConflictWithBufferPin() will wait
601+
* to be signaled by UnpinBuffer() again and send a request for
602+
* deadlocks check if deadlock_timeout happens. This causes the
603+
* request to continue to be sent every deadlock_timeout until the
604+
* buffer is unpinned or ltime is reached. This would increase the
605+
* workload in the startup process and backends. In practice it may
606+
* not be so harmful because the period that the buffer is kept pinned
607+
* is basically no so long. But we should fix this?
608+
*/
609+
SendRecoveryConflictWithBufferPin(
610+
PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
611+
}
612+
528613
/*
529614
* Clear any timeout requests established above. We assume here that the
530615
* Startup process doesn't have any other timeouts than what this function
531616
* uses. If that stops being true, we could cancel the timeouts
532617
* individually, but that'd be slower.
533618
*/
534619
disable_all_timeouts(false);
620+
got_standby_deadlock_timeout = false;
535621
}
536622

537623
static void
@@ -591,13 +677,12 @@ CheckRecoveryConflictDeadlock(void)
591677

592678
/*
593679
* StandbyDeadLockHandler() will be called if STANDBY_DEADLOCK_TIMEOUT
594-
* occurs before STANDBY_TIMEOUT. Send out a request for hot-standby
595-
* backends to check themselves for deadlocks.
680+
* occurs before STANDBY_TIMEOUT.
596681
*/
597682
void
598683
StandbyDeadLockHandler(void)
599684
{
600-
SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
685+
got_standby_deadlock_timeout = true;
601686
}
602687

603688
/*
@@ -616,11 +701,11 @@ StandbyTimeoutHandler(void)
616701

617702
/*
618703
* StandbyLockTimeoutHandler() will be called if STANDBY_LOCK_TIMEOUT is exceeded.
619-
* This doesn't need to do anything, simply waking up is enough.
620704
*/
621705
void
622706
StandbyLockTimeoutHandler(void)
623707
{
708+
got_standby_lock_timeout = true;
624709
}
625710

626711
/*

src/backend/storage/lmgr/proc.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1784,6 +1784,9 @@ CheckDeadLockAlert(void)
17841784
* Have to set the latch again, even if handle_sig_alarm already did. Back
17851785
* then got_deadlock_timeout wasn't yet set... It's unlikely that this
17861786
* ever would be a problem, but setting a set latch again is cheap.
1787+
*
1788+
* Note that, when this function runs inside procsignal_sigusr1_handler(),
1789+
* the handler function sets the latch again after the latch is set here.
17871790
*/
17881791
SetLatch(MyLatch);
17891792
errno = save_errno;

src/backend/tcop/postgres.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2919,11 +2919,23 @@ RecoveryConflictInterrupt(ProcSignalReason reason)
29192919
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
29202920

29212921
/*
2922-
* If we aren't blocking the Startup process there is nothing
2923-
* more to do.
2922+
* If PROCSIG_RECOVERY_CONFLICT_BUFFERPIN is requested but we
2923+
* aren't blocking the Startup process there is nothing more
2924+
* to do.
2925+
*
2926+
* When PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK is
2927+
* requested, if we're waiting for locks and the startup
2928+
* process is not waiting for buffer pin (i.e., also waiting
2929+
* for locks), we set the flag so that ProcSleep() will check
2930+
* for deadlocks.
29242931
*/
29252932
if (!HoldingBufferPinThatDelaysRecovery())
2933+
{
2934+
if (reason == PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK &&
2935+
GetStartupBufferPinWaitBufId() < 0)
2936+
CheckDeadLockAlert();
29262937
return;
2938+
}
29272939

29282940
MyProc->recoveryConflictPending = true;
29292941

src/include/storage/procarray.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,8 @@ extern VirtualTransactionId *GetCurrentVirtualXIDs(TransactionId limitXmin,
105105
int *nvxids);
106106
extern VirtualTransactionId *GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid);
107107
extern pid_t CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode);
108+
extern pid_t SignalVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode,
109+
bool conflictPending);
108110

109111
extern bool MinimumActiveBackends(int min);
110112
extern int CountDBBackends(Oid databaseid);

0 commit comments

Comments
 (0)