Skip to content

Commit 93d4484

Browse files
committed
Revert "Avoid spurious deadlocks when upgrading a tuple lock"
This reverts commits 3da73d6 and de87a08. This code has some tricky corner cases that I'm not sure are correct and not properly tested anyway, so I'm reverting the whole thing for next week's releases (reintroducing the deadlock bug that we set to fix). I'll try again afterwards. Discussion: https://postgr.es/m/E1hbXKQ-0003g1-0C@gemulon.postgresql.org
1 parent 0e45e52 commit 93d4484

File tree

5 files changed

+21
-280
lines changed

5 files changed

+21
-280
lines changed

src/backend/access/heap/README.tuplock

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,6 @@ do LockTuple as well, if there is any conflict, to ensure that they don't
3636
starve out waiting exclusive-lockers. However, if there is not any active
3737
conflict for a tuple, we don't incur any extra overhead.
3838

39-
We make an exception to the above rule for those lockers that already hold
40-
some lock on a tuple and attempt to acquire a stronger one on it. In that
41-
case, we skip the LockTuple() call even when there are conflicts, provided
42-
that the target tuple is being locked, updated or deleted by multiple sessions
43-
concurrently. Failing to skip the lock would risk a deadlock, e.g., between a
44-
session that was first to record its weaker lock in the tuple header and would
45-
be waiting on the LockTuple() call to upgrade to the stronger lock level, and
46-
another session that has already done LockTuple() and is waiting for the first
47-
session transaction to release its tuple header-level lock.
48-
4939
We provide four levels of tuple locking strength: SELECT FOR UPDATE obtains an
5040
exclusive lock which prevents any kind of modification of the tuple. This is
5141
the lock level that is implicitly taken by DELETE operations, and also by

src/backend/access/heap/heapam.c

Lines changed: 21 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
115115
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
116116
uint16 t_infomask);
117117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
118-
LockTupleMode lockmode, bool *current_is_member);
118+
LockTupleMode lockmode);
119119
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
120120
Relation rel, ItemPointer ctid, XLTW_Oper oper,
121121
int *remaining);
@@ -3112,20 +3112,15 @@ heap_delete(Relation relation, ItemPointer tid,
31123112
*/
31133113
if (infomask & HEAP_XMAX_IS_MULTI)
31143114
{
3115-
bool current_is_member = false;
3116-
3115+
/* wait for multixact */
31173116
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3118-
LockTupleExclusive, &current_is_member))
3117+
LockTupleExclusive))
31193118
{
31203119
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
31213120

3122-
/*
3123-
* Acquire the lock, if necessary (but skip it when we're
3124-
* requesting a lock and already have one; avoids deadlock).
3125-
*/
3126-
if (!current_is_member)
3127-
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3128-
LockWaitBlock, &have_tuple_lock);
3121+
/* acquire tuple lock, if necessary */
3122+
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3123+
LockWaitBlock, &have_tuple_lock);
31293124

31303125
/* wait for multixact */
31313126
MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
@@ -3715,20 +3710,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
37153710
{
37163711
TransactionId update_xact;
37173712
int remain;
3718-
bool current_is_member = false;
37193713

37203714
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3721-
*lockmode, &current_is_member))
3715+
*lockmode))
37223716
{
37233717
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
37243718

3725-
/*
3726-
* Acquire the lock, if necessary (but skip it when we're
3727-
* requesting a lock and already have one; avoids deadlock).
3728-
*/
3729-
if (!current_is_member)
3730-
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3731-
LockWaitBlock, &have_tuple_lock);
3719+
/* acquire tuple lock, if necessary */
3720+
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3721+
LockWaitBlock, &have_tuple_lock);
37323722

37333723
/* wait for multixact */
37343724
MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
@@ -4610,7 +4600,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
46104600
uint16 infomask;
46114601
uint16 infomask2;
46124602
bool require_sleep;
4613-
bool skip_tuple_lock = false;
46144603
ItemPointerData t_ctid;
46154604

46164605
/* must copy state data before unlocking buffer */
@@ -4664,21 +4653,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
46644653
result = HeapTupleMayBeUpdated;
46654654
goto out_unlocked;
46664655
}
4667-
else
4668-
{
4669-
/*
4670-
* Disable acquisition of the heavyweight tuple lock.
4671-
* Otherwise, when promoting a weaker lock, we might
4672-
* deadlock with another locker that has acquired the
4673-
* heavyweight tuple lock and is waiting for our
4674-
* transaction to finish.
4675-
*
4676-
* Note that in this case we still need to wait for
4677-
* the multixact if required, to avoid acquiring
4678-
* conflicting locks.
4679-
*/
4680-
skip_tuple_lock = true;
4681-
}
46824656
}
46834657

46844658
if (members)
@@ -4833,7 +4807,7 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
48334807
if (infomask & HEAP_XMAX_IS_MULTI)
48344808
{
48354809
if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4836-
mode, NULL))
4810+
mode))
48374811
{
48384812
/*
48394813
* No conflict, but if the xmax changed under us in the
@@ -4910,15 +4884,13 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
49104884
/*
49114885
* Acquire tuple lock to establish our priority for the tuple, or
49124886
* die trying. LockTuple will release us when we are next-in-line
4913-
* for the tuple. We must do this even if we are share-locking,
4914-
* but not if we already have a weaker lock on the tuple.
4887+
* for the tuple. We must do this even if we are share-locking.
49154888
*
49164889
* If we are forced to "start over" below, we keep the tuple lock;
49174890
* this arranges that we stay at the head of the line while
49184891
* rechecking tuple state.
49194892
*/
4920-
if (!skip_tuple_lock &&
4921-
!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4893+
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
49224894
&have_tuple_lock))
49234895
{
49244896
/*
@@ -7090,13 +7062,10 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
70907062
* tuple lock of the given strength?
70917063
*
70927064
* The passed infomask pairs up with the given multixact in the tuple header.
7093-
*
7094-
* If current_is_member is not NULL, it is set to 'true' if the current
7095-
* transaction is a member of the given multixact.
70967065
*/
70977066
static bool
70987067
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
7099-
LockTupleMode lockmode, bool *current_is_member)
7068+
LockTupleMode lockmode)
71007069
{
71017070
int nmembers;
71027071
MultiXactMember *members;
@@ -7117,26 +7086,17 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
71177086
TransactionId memxid;
71187087
LOCKMODE memlockmode;
71197088

7120-
if (result && (current_is_member == NULL || *current_is_member))
7121-
break;
7122-
71237089
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
71247090

7125-
/* ignore members from current xact (but track their presence) */
7126-
memxid = members[i].xid;
7127-
if (TransactionIdIsCurrentTransactionId(memxid))
7128-
{
7129-
if (current_is_member != NULL)
7130-
*current_is_member = true;
7131-
continue;
7132-
}
7133-
else if (result)
7134-
continue;
7135-
71367091
/* ignore members that don't conflict with the lock we want */
71377092
if (!DoLockModesConflict(memlockmode, wanted))
71387093
continue;
71397094

7095+
/* ignore members from current xact */
7096+
memxid = members[i].xid;
7097+
if (TransactionIdIsCurrentTransactionId(memxid))
7098+
continue;
7099+
71407100
if (ISUPDATE_from_mxstatus(members[i].status))
71417101
{
71427102
/* ignore aborted updaters */
@@ -7153,11 +7113,10 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
71537113
/*
71547114
* Whatever remains are either live lockers that conflict with our
71557115
* wanted lock, and updaters that are not aborted. Those conflict
7156-
* with what we want. Set up to return true, but keep going to
7157-
* look for the current transaction among the multixact members,
7158-
* if needed.
7116+
* with what we want, so return true.
71597117
*/
71607118
result = true;
7119+
break;
71617120
}
71627121
pfree(members);
71637122
}

src/test/isolation/expected/tuplelock-upgrade-no-deadlock.out

Lines changed: 0 additions & 150 deletions
This file was deleted.

src/test/isolation/isolation_schedule

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ test: update-locked-tuple
4646
test: propagate-lock-delete
4747
test: tuplelock-conflict
4848
test: tuplelock-update
49-
test: tuplelock-upgrade-no-deadlock
5049
test: freeze-the-dead
5150
test: nowait
5251
test: nowait-2

0 commit comments

Comments
 (0)