@@ -3947,10 +3947,8 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
3947
3947
* because this function is applied during WAL recovery, when we don't have
3948
3948
* access to any such state, and can't depend on the hint bits to be set.)
3949
3949
*
3950
- * In lazy VACUUM, we call this while initially holding only a shared lock
3951
- * on the tuple's buffer. If any change is needed, we trade that in for an
3952
- * exclusive lock before making the change. Caller should pass the buffer ID
3953
- * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3950
+ * If the tuple is in a shared buffer, caller must hold an exclusive lock on
3951
+ * that buffer.
3954
3952
*
3955
3953
* Note: it might seem we could make the changes without exclusive lock, since
3956
3954
* TransactionId read/write is assumed atomic anyway. However there is a race
@@ -3962,8 +3960,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
3962
3960
* infomask bits.
3963
3961
*/
3964
3962
bool
3965
- heap_freeze_tuple (HeapTupleHeader tuple , TransactionId cutoff_xid ,
3966
- Buffer buf )
3963
+ heap_freeze_tuple (HeapTupleHeader tuple , TransactionId cutoff_xid )
3967
3964
{
3968
3965
bool changed = false;
3969
3966
TransactionId xid ;
@@ -3972,13 +3969,6 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
3972
3969
if (TransactionIdIsNormal (xid ) &&
3973
3970
TransactionIdPrecedes (xid , cutoff_xid ))
3974
3971
{
3975
- if (buf != InvalidBuffer )
3976
- {
3977
- /* trade in share lock for exclusive lock */
3978
- LockBuffer (buf , BUFFER_LOCK_UNLOCK );
3979
- LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
3980
- buf = InvalidBuffer ;
3981
- }
3982
3972
HeapTupleHeaderSetXmin (tuple , FrozenTransactionId );
3983
3973
3984
3974
/*
@@ -3990,28 +3980,12 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
3990
3980
changed = true;
3991
3981
}
3992
3982
3993
- /*
3994
- * When we release shared lock, it's possible for someone else to change
3995
- * xmax before we get the lock back, so repeat the check after acquiring
3996
- * exclusive lock. (We don't need this pushup for xmin, because only
3997
- * VACUUM could be interested in changing an existing tuple's xmin, and
3998
- * there's only one VACUUM allowed on a table at a time.)
3999
- */
4000
- recheck_xmax :
4001
3983
if (!(tuple -> t_infomask & HEAP_XMAX_IS_MULTI ))
4002
3984
{
4003
3985
xid = HeapTupleHeaderGetXmax (tuple );
4004
3986
if (TransactionIdIsNormal (xid ) &&
4005
3987
TransactionIdPrecedes (xid , cutoff_xid ))
4006
3988
{
4007
- if (buf != InvalidBuffer )
4008
- {
4009
- /* trade in share lock for exclusive lock */
4010
- LockBuffer (buf , BUFFER_LOCK_UNLOCK );
4011
- LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
4012
- buf = InvalidBuffer ;
4013
- goto recheck_xmax ; /* see comment above */
4014
- }
4015
3989
HeapTupleHeaderSetXmax (tuple , InvalidTransactionId );
4016
3990
4017
3991
/*
@@ -4046,30 +4020,15 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
4046
4020
}
4047
4021
4048
4022
/*
4049
- * Although xvac per se could only be set by old-style VACUUM FULL, it
4050
- * shares physical storage space with cmax, and so could be wiped out by
4051
- * someone setting xmax. Hence recheck after changing lock, same as for
4052
- * xmax itself.
4053
- *
4054
4023
* Old-style VACUUM FULL is gone, but we have to keep this code as long as
4055
4024
* we support having MOVED_OFF/MOVED_IN tuples in the database.
4056
4025
*/
4057
- recheck_xvac :
4058
4026
if (tuple -> t_infomask & HEAP_MOVED )
4059
4027
{
4060
4028
xid = HeapTupleHeaderGetXvac (tuple );
4061
4029
if (TransactionIdIsNormal (xid ) &&
4062
4030
TransactionIdPrecedes (xid , cutoff_xid ))
4063
4031
{
4064
- if (buf != InvalidBuffer )
4065
- {
4066
- /* trade in share lock for exclusive lock */
4067
- LockBuffer (buf , BUFFER_LOCK_UNLOCK );
4068
- LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
4069
- buf = InvalidBuffer ;
4070
- goto recheck_xvac ; /* see comment above */
4071
- }
4072
-
4073
4032
/*
4074
4033
* If a MOVED_OFF tuple is not dead, the xvac transaction must
4075
4034
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@@ -4711,7 +4670,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
4711
4670
ItemId lp = PageGetItemId (page , * offsets );
4712
4671
HeapTupleHeader tuple = (HeapTupleHeader ) PageGetItem (page , lp );
4713
4672
4714
- (void ) heap_freeze_tuple (tuple , cutoff_xid , InvalidBuffer );
4673
+ (void ) heap_freeze_tuple (tuple , cutoff_xid );
4715
4674
offsets ++ ;
4716
4675
}
4717
4676
}
0 commit comments