@@ -93,6 +93,8 @@ static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
93
93
bool * satisfies_hot , bool * satisfies_key ,
94
94
bool * satisfies_id ,
95
95
HeapTuple oldtup , HeapTuple newtup );
96
+ static void heap_acquire_tuplock (Relation relation , ItemPointer tid ,
97
+ LockTupleMode mode , bool nowait , bool * have_tuple_lock );
96
98
static void compute_new_xmax_infomask (TransactionId xmax , uint16 old_infomask ,
97
99
uint16 old_infomask2 , TransactionId add_to_xmax ,
98
100
LockTupleMode mode , bool is_update ,
@@ -105,6 +107,8 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
105
107
uint16 * new_infomask2 );
106
108
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax ,
107
109
uint16 t_infomask );
110
+ static bool DoesMultiXactIdConflict (MultiXactId multi , uint16 infomask ,
111
+ LockTupleMode lockmode );
108
112
static void MultiXactIdWait (MultiXactId multi , MultiXactStatus status , uint16 infomask ,
109
113
Relation rel , ItemPointer ctid , XLTW_Oper oper ,
110
114
int * remaining );
@@ -2726,11 +2730,8 @@ heap_delete(Relation relation, ItemPointer tid,
2726
2730
* this arranges that we stay at the head of the line while rechecking
2727
2731
* tuple state.
2728
2732
*/
2729
- if (!have_tuple_lock )
2730
- {
2731
- LockTupleTuplock (relation , & (tp .t_self ), LockTupleExclusive );
2732
- have_tuple_lock = true;
2733
- }
2733
+ heap_acquire_tuplock (relation , & (tp .t_self ), LockTupleExclusive ,
2734
+ false, & have_tuple_lock );
2734
2735
2735
2736
/*
2736
2737
* Sleep until concurrent transaction ends. Note that we don't care
@@ -3261,21 +3262,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3261
3262
3262
3263
LockBuffer (buffer , BUFFER_LOCK_UNLOCK );
3263
3264
3264
- /*
3265
- * Acquire tuple lock to establish our priority for the tuple (see
3266
- * heap_lock_tuple). LockTuple will release us when we are
3267
- * next-in-line for the tuple.
3268
- *
3269
- * If we are forced to "start over" below, we keep the tuple lock;
3270
- * this arranges that we stay at the head of the line while rechecking
3271
- * tuple state.
3272
- */
3273
- if (!have_tuple_lock )
3274
- {
3275
- LockTupleTuplock (relation , & (oldtup .t_self ), * lockmode );
3276
- have_tuple_lock = true;
3277
- }
3278
-
3279
3265
/*
3280
3266
* Now we have to do something about the existing locker. If it's a
3281
3267
* multi, sleep on it; we might be awakened before it is completely
@@ -3286,12 +3272,30 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3286
3272
* before actually going to sleep. If the update doesn't conflict
3287
3273
* with the locks, we just continue without sleeping (but making sure
3288
3274
* it is preserved).
3275
+ *
3276
+ * Before sleeping, we need to acquire tuple lock to establish our
3277
+ * priority for the tuple (see heap_lock_tuple). LockTuple will
3278
+ * release us when we are next-in-line for the tuple. Note we must not
3279
+ * acquire the tuple lock until we're sure we're going to sleep;
3280
+ * otherwise we're open for race conditions with other transactions
3281
+ * holding the tuple lock which sleep on us.
3282
+ *
3283
+ * If we are forced to "start over" below, we keep the tuple lock;
3284
+ * this arranges that we stay at the head of the line while rechecking
3285
+ * tuple state.
3289
3286
*/
3290
3287
if (infomask & HEAP_XMAX_IS_MULTI )
3291
3288
{
3292
3289
TransactionId update_xact ;
3293
3290
int remain ;
3294
3291
3292
+ /* acquire tuple lock, if necessary */
3293
+ if (DoesMultiXactIdConflict ((MultiXactId ) xwait , infomask , * lockmode ))
3294
+ {
3295
+ heap_acquire_tuplock (relation , & (oldtup .t_self ), * lockmode ,
3296
+ false, & have_tuple_lock );
3297
+ }
3298
+
3295
3299
/* wait for multixact */
3296
3300
MultiXactIdWait ((MultiXactId ) xwait , mxact_status , infomask ,
3297
3301
relation , & oldtup .t_data -> t_ctid , XLTW_Update ,
@@ -3368,7 +3372,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3368
3372
}
3369
3373
else
3370
3374
{
3371
- /* wait for regular transaction to end */
3375
+ /*
3376
+ * Wait for regular transaction to end; but first, acquire
3377
+ * tuple lock.
3378
+ */
3379
+ heap_acquire_tuplock (relation , & (oldtup .t_self ), * lockmode ,
3380
+ false, & have_tuple_lock );
3372
3381
XactLockTableWait (xwait , relation , & oldtup .t_data -> t_ctid ,
3373
3382
XLTW_Update );
3374
3383
LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
@@ -4076,7 +4085,6 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4076
4085
return (MultiXactStatus ) retval ;
4077
4086
}
4078
4087
4079
-
4080
4088
/*
4081
4089
* heap_lock_tuple - lock a tuple in shared or exclusive mode
4082
4090
*
@@ -4204,30 +4212,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4204
4212
pfree (members );
4205
4213
}
4206
4214
4207
- /*
4208
- * Acquire tuple lock to establish our priority for the tuple.
4209
- * LockTuple will release us when we are next-in-line for the tuple.
4210
- * We must do this even if we are share-locking.
4211
- *
4212
- * If we are forced to "start over" below, we keep the tuple lock;
4213
- * this arranges that we stay at the head of the line while rechecking
4214
- * tuple state.
4215
- */
4216
- if (!have_tuple_lock )
4217
- {
4218
- if (nowait )
4219
- {
4220
- if (!ConditionalLockTupleTuplock (relation , tid , mode ))
4221
- ereport (ERROR ,
4222
- (errcode (ERRCODE_LOCK_NOT_AVAILABLE ),
4223
- errmsg ("could not obtain lock on row in relation \"%s\"" ,
4224
- RelationGetRelationName (relation ))));
4225
- }
4226
- else
4227
- LockTupleTuplock (relation , tid , mode );
4228
- have_tuple_lock = true;
4229
- }
4230
-
4231
4215
/*
4232
4216
* Initially assume that we will have to wait for the locking
4233
4217
* transaction(s) to finish. We check various cases below in which
@@ -4334,64 +4318,26 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4334
4318
{
4335
4319
/*
4336
4320
* If we're requesting NoKeyExclusive, we might also be able to
4337
- * avoid sleeping; just ensure that there's no other lock type
4338
- * than KeyShare. Note that this is a bit more involved than just
4339
- * checking hint bits -- we need to expand the multixact to figure
4340
- * out lock modes for each one (unless there was only one such
4341
- * locker).
4321
+ * avoid sleeping; just ensure that there no conflicting lock
4322
+ * already acquired.
4342
4323
*/
4343
4324
if (infomask & HEAP_XMAX_IS_MULTI )
4344
4325
{
4345
- int nmembers ;
4346
- MultiXactMember * members ;
4347
-
4348
- /*
4349
- * We don't need to allow old multixacts here; if that had
4350
- * been the case, HeapTupleSatisfiesUpdate would have returned
4351
- * MayBeUpdated and we wouldn't be here.
4352
- */
4353
- nmembers = GetMultiXactIdMembers (xwait , & members , false);
4354
-
4355
- if (nmembers <= 0 )
4326
+ if (!DoesMultiXactIdConflict ((MultiXactId ) xwait , infomask ,
4327
+ mode ))
4356
4328
{
4357
4329
/*
4358
- * No need to keep the previous xmax here. This is
4359
- * unlikely to happen .
4330
+ * No conflict, but if the xmax changed under us in the
4331
+ * meantime, start over .
4360
4332
*/
4361
- require_sleep = false;
4362
- }
4363
- else
4364
- {
4365
- int i ;
4366
- bool allowed = true;
4333
+ LockBuffer (* buffer , BUFFER_LOCK_EXCLUSIVE );
4334
+ if (xmax_infomask_changed (tuple -> t_data -> t_infomask , infomask ) ||
4335
+ !TransactionIdEquals (HeapTupleHeaderGetRawXmax (tuple -> t_data ),
4336
+ xwait ))
4337
+ goto l3 ;
4367
4338
4368
- for (i = 0 ; i < nmembers ; i ++ )
4369
- {
4370
- if (members [i ].status != MultiXactStatusForKeyShare )
4371
- {
4372
- allowed = false;
4373
- break ;
4374
- }
4375
- }
4376
- if (allowed )
4377
- {
4378
- /*
4379
- * if the xmax changed under us in the meantime, start
4380
- * over.
4381
- */
4382
- LockBuffer (* buffer , BUFFER_LOCK_EXCLUSIVE );
4383
- if (xmax_infomask_changed (tuple -> t_data -> t_infomask , infomask ) ||
4384
- !TransactionIdEquals (HeapTupleHeaderGetRawXmax (tuple -> t_data ),
4385
- xwait ))
4386
- {
4387
- pfree (members );
4388
- goto l3 ;
4389
- }
4390
- /* otherwise, we're good */
4391
- require_sleep = false;
4392
- }
4393
-
4394
- pfree (members );
4339
+ /* otherwise, we're good */
4340
+ require_sleep = false;
4395
4341
}
4396
4342
}
4397
4343
else if (HEAP_XMAX_IS_KEYSHR_LOCKED (infomask ))
@@ -4417,6 +4363,18 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4417
4363
4418
4364
if (require_sleep )
4419
4365
{
4366
+ /*
4367
+ * Acquire tuple lock to establish our priority for the tuple.
4368
+ * LockTuple will release us when we are next-in-line for the tuple.
4369
+ * We must do this even if we are share-locking.
4370
+ *
4371
+ * If we are forced to "start over" below, we keep the tuple lock;
4372
+ * this arranges that we stay at the head of the line while rechecking
4373
+ * tuple state.
4374
+ */
4375
+ heap_acquire_tuplock (relation , tid , mode , nowait ,
4376
+ & have_tuple_lock );
4377
+
4420
4378
if (infomask & HEAP_XMAX_IS_MULTI )
4421
4379
{
4422
4380
MultiXactStatus status = get_mxact_status_for_lock (mode , false);
@@ -4714,6 +4672,32 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4714
4672
return HeapTupleMayBeUpdated ;
4715
4673
}
4716
4674
4675
+ /*
4676
+ * Acquire heavyweight lock on the given tuple, in preparation for acquiring
4677
+ * its normal, Xmax-based tuple lock.
4678
+ *
4679
+ * have_tuple_lock is an input and output parameter: on input, it indicates
4680
+ * whether the lock has previously been acquired (and this function does
4681
+ * nothing in that case). If this function returns success, have_tuple_lock
4682
+ * has been flipped to true.
4683
+ */
4684
+ static void
4685
+ heap_acquire_tuplock (Relation relation , ItemPointer tid , LockTupleMode mode ,
4686
+ bool nowait , bool * have_tuple_lock )
4687
+ {
4688
+ if (* have_tuple_lock )
4689
+ return ;
4690
+
4691
+ if (!nowait )
4692
+ LockTupleTuplock (relation , tid , mode );
4693
+ else if (!ConditionalLockTupleTuplock (relation , tid , mode ))
4694
+ ereport (ERROR ,
4695
+ (errcode (ERRCODE_LOCK_NOT_AVAILABLE ),
4696
+ errmsg ("could not obtain lock on row in relation \"%s\"" ,
4697
+ RelationGetRelationName (relation ))));
4698
+
4699
+ * have_tuple_lock = true;
4700
+ }
4717
4701
4718
4702
/*
4719
4703
* Given an original set of Xmax and infomask, and a transaction (identified by
@@ -6113,6 +6097,69 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
6113
6097
tuple -> t_infomask );
6114
6098
}
6115
6099
6100
+ /*
6101
+ * Does the given multixact conflict with the current transaction grabbing a
6102
+ * tuple lock of the given strength?
6103
+ *
6104
+ * The passed infomask pairs up with the given multixact in the tuple header.
6105
+ */
6106
+ static bool
6107
+ DoesMultiXactIdConflict (MultiXactId multi , uint16 infomask ,
6108
+ LockTupleMode lockmode )
6109
+ {
6110
+ bool allow_old ;
6111
+ int nmembers ;
6112
+ MultiXactMember * members ;
6113
+ bool result = false;
6114
+
6115
+ allow_old = !(infomask & HEAP_LOCK_MASK ) && HEAP_XMAX_IS_LOCKED_ONLY (infomask );
6116
+ nmembers = GetMultiXactIdMembers (multi , & members , allow_old );
6117
+ if (nmembers >= 0 )
6118
+ {
6119
+ int i ;
6120
+
6121
+ for (i = 0 ; i < nmembers ; i ++ )
6122
+ {
6123
+ TransactionId memxid ;
6124
+ LockTupleMode memlockmode ;
6125
+
6126
+ memlockmode = LOCKMODE_from_mxstatus (members [i ].status );
6127
+ /* ignore members that don't conflict with the lock we want */
6128
+ if (!DoLockModesConflict (memlockmode , lockmode ))
6129
+ continue ;
6130
+
6131
+ /* ignore members from current xact */
6132
+ memxid = members [i ].xid ;
6133
+ if (TransactionIdIsCurrentTransactionId (memxid ))
6134
+ continue ;
6135
+
6136
+ if (ISUPDATE_from_mxstatus (members [i ].status ))
6137
+ {
6138
+ /* ignore aborted updaters */
6139
+ if (TransactionIdDidAbort (memxid ))
6140
+ continue ;
6141
+ }
6142
+ else
6143
+ {
6144
+ /* ignore lockers-only that are no longer in progress */
6145
+ if (!TransactionIdIsInProgress (memxid ))
6146
+ continue ;
6147
+ }
6148
+
6149
+ /*
6150
+ * Whatever remains are either live lockers that conflict with our
6151
+ * wanted lock, and updaters that are not aborted. Those conflict
6152
+ * with what we want, so return true.
6153
+ */
6154
+ result = true;
6155
+ break ;
6156
+ }
6157
+ pfree (members );
6158
+ }
6159
+
6160
+ return result ;
6161
+ }
6162
+
6116
6163
/*
6117
6164
* Do_MultiXactIdWait
6118
6165
* Actual implementation for the two functions below.
0 commit comments