@@ -788,14 +788,16 @@ LockAcquireExtended(const LOCKTAG *locktag,
788
788
}
789
789
790
790
/*
791
- * Emit a WAL record if acquisition of this lock needs to be replayed in a
792
- * standby server. Only AccessExclusiveLocks can conflict with lock types
793
- * that read-only transactions can acquire in a standby server.
791
+ * Prepare to emit a WAL record if acquisition of this lock needs to be
792
+ * replayed in a standby server.
794
793
*
795
- * Make sure this definition matches the one in
796
- * GetRunningTransactionLocks().
794
+ * Here we prepare to log; after lock is acquired we'll issue log record.
795
+ * This arrangement simplifies error recovery in case the preparation step
796
+ * fails.
797
797
*
798
- * First we prepare to log, then after lock acquired we issue log record.
798
+ * Only AccessExclusiveLocks can conflict with lock types that read-only
799
+ * transactions can acquire in a standby server. Make sure this definition
800
+ * matches the one in GetRunningTransactionLocks().
799
801
*/
800
802
if (lockmode >= AccessExclusiveLock &&
801
803
locktag -> locktag_type == LOCKTAG_RELATION &&
@@ -816,8 +818,8 @@ LockAcquireExtended(const LOCKTAG *locktag,
816
818
* lock type on a relation we have already locked using the fast-path, but
817
819
* for now we don't worry about that case either.
818
820
*/
819
- if (EligibleForRelationFastPath (locktag , lockmode )
820
- && FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND )
821
+ if (EligibleForRelationFastPath (locktag , lockmode ) &&
822
+ FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND )
821
823
{
822
824
uint32 fasthashcode = FastPathStrongLockHashPartition (hashcode );
823
825
bool acquired ;
@@ -837,6 +839,13 @@ LockAcquireExtended(const LOCKTAG *locktag,
837
839
LWLockRelease (MyProc -> backendLock );
838
840
if (acquired )
839
841
{
842
+ /*
843
+ * The locallock might contain stale pointers to some old shared
844
+ * objects; we MUST reset these to null before considering the
845
+ * lock to be acquired via fast-path.
846
+ */
847
+ locallock -> lock = NULL ;
848
+ locallock -> proclock = NULL ;
840
849
GrantLockLocal (locallock , owner );
841
850
return LOCKACQUIRE_OK ;
842
851
}
@@ -877,7 +886,13 @@ LockAcquireExtended(const LOCKTAG *locktag,
877
886
LWLockAcquire (partitionLock , LW_EXCLUSIVE );
878
887
879
888
/*
880
- * Find or create a proclock entry with this tag
889
+ * Find or create lock and proclock entries with this tag
890
+ *
891
+ * Note: if the locallock object already existed, it might have a pointer
892
+ * to the lock already ... but we should not assume that that pointer is
893
+ * valid, since a lock object with zero hold and request counts can go
894
+ * away anytime. So we have to use SetupLockInTable() to recompute the
895
+ * lock and proclock pointers, even if they're already set.
881
896
*/
882
897
proclock = SetupLockInTable (lockMethodTable , MyProc , locktag ,
883
898
hashcode , lockmode );
@@ -1010,7 +1025,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
1010
1025
LWLockRelease (partitionLock );
1011
1026
1012
1027
/*
1013
- * Emit a WAL record if acquisition of this lock need to be replayed in a
1028
+ * Emit a WAL record if acquisition of this lock needs to be replayed in a
1014
1029
* standby server.
1015
1030
*/
1016
1031
if (log_lock )
@@ -1049,11 +1064,6 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1049
1064
1050
1065
/*
1051
1066
* Find or create a lock with this tag.
1052
- *
1053
- * Note: if the locallock object already existed, it might have a pointer
1054
- * to the lock already ... but we probably should not assume that that
1055
- * pointer is valid, since a lock object with no locks can go away
1056
- * anytime.
1057
1067
*/
1058
1068
lock = (LOCK * ) hash_search_with_hash_value (LockMethodLockHash ,
1059
1069
(const void * ) locktag ,
@@ -1821,8 +1831,8 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1821
1831
return TRUE;
1822
1832
1823
1833
/* Attempt fast release of any lock eligible for the fast path. */
1824
- if (EligibleForRelationFastPath (locktag , lockmode )
1825
- && FastPathLocalUseCount > 0 )
1834
+ if (EligibleForRelationFastPath (locktag , lockmode ) &&
1835
+ FastPathLocalUseCount > 0 )
1826
1836
{
1827
1837
bool released ;
1828
1838
@@ -1852,30 +1862,33 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1852
1862
* Normally, we don't need to re-find the lock or proclock, since we kept
1853
1863
* their addresses in the locallock table, and they couldn't have been
1854
1864
* removed while we were holding a lock on them. But it's possible that
1855
- * the locks have been moved to the main hash table by another backend, in
1856
- * which case we might need to go look them up after all.
1865
+ * the lock was taken fast-path and has since been moved to the main hash
1866
+ * table by another backend, in which case we will need to look up the
1867
+ * objects here. We assume the lock field is NULL if so.
1857
1868
*/
1858
1869
lock = locallock -> lock ;
1859
1870
if (!lock )
1860
1871
{
1861
1872
PROCLOCKTAG proclocktag ;
1862
- bool found ;
1863
1873
1864
1874
Assert (EligibleForRelationFastPath (locktag , lockmode ));
1865
1875
lock = (LOCK * ) hash_search_with_hash_value (LockMethodLockHash ,
1866
1876
(const void * ) locktag ,
1867
1877
locallock -> hashcode ,
1868
1878
HASH_FIND ,
1869
- & found );
1870
- Assert (found && lock != NULL );
1879
+ NULL );
1880
+ if (!lock )
1881
+ elog (ERROR , "failed to re-find shared lock object" );
1871
1882
locallock -> lock = lock ;
1872
1883
1873
1884
proclocktag .myLock = lock ;
1874
1885
proclocktag .myProc = MyProc ;
1875
1886
locallock -> proclock = (PROCLOCK * ) hash_search (LockMethodProcLockHash ,
1876
1887
(void * ) & proclocktag ,
1877
- HASH_FIND , & found );
1878
- Assert (found );
1888
+ HASH_FIND ,
1889
+ NULL );
1890
+ if (!locallock -> proclock )
1891
+ elog (ERROR , "failed to re-find shared proclock object" );
1879
1892
}
1880
1893
LOCK_PRINT ("LockRelease: found" , lock , lockmode );
1881
1894
proclock = locallock -> proclock ;
@@ -1956,7 +1969,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
1956
1969
* entries, then we scan the process's proclocks and get rid of those. We
1957
1970
* do this separately because we may have multiple locallock entries
1958
1971
* pointing to the same proclock, and we daren't end up with any dangling
1959
- * pointers.
1972
+ * pointers. Fast-path locks are cleaned up during the locallock table
1973
+ * scan, though.
1960
1974
*/
1961
1975
hash_seq_init (& status , LockMethodLocalHash );
1962
1976
@@ -2011,7 +2025,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2011
2025
2012
2026
/*
2013
2027
* If the lock or proclock pointers are NULL, this lock was taken via
2014
- * the relation fast-path.
2028
+ * the relation fast-path (and is not known to have been transferred) .
2015
2029
*/
2016
2030
if (locallock -> proclock == NULL || locallock -> lock == NULL )
2017
2031
{
@@ -2025,7 +2039,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2025
2039
/*
2026
2040
* If we don't currently hold the LWLock that protects our
2027
2041
* fast-path data structures, we must acquire it before attempting
2028
- * to release the lock via the fast-path.
2042
+ * to release the lock via the fast-path. We will continue to
2043
+ * hold the LWLock until we're done scanning the locallock table,
2044
+ * unless we hit a transferred fast-path lock. (XXX is this
2045
+ * really such a good idea? There could be a lot of entries ...)
2029
2046
*/
2030
2047
if (!have_fast_path_lwlock )
2031
2048
{
@@ -2070,6 +2087,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2070
2087
RemoveLocalLock (locallock );
2071
2088
}
2072
2089
2090
+ /* Done with the fast-path data structures */
2073
2091
if (have_fast_path_lwlock )
2074
2092
LWLockRelease (MyProc -> backendLock );
2075
2093
@@ -2421,6 +2439,7 @@ FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2421
2439
Assert (!result );
2422
2440
FAST_PATH_CLEAR_LOCKMODE (MyProc , f , lockmode );
2423
2441
result = true;
2442
+ /* we continue iterating so as to update FastPathLocalUseCount */
2424
2443
}
2425
2444
if (FAST_PATH_GET_BITS (MyProc , f ) != 0 )
2426
2445
++ FastPathLocalUseCount ;
@@ -2506,6 +2525,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
2506
2525
FAST_PATH_CLEAR_LOCKMODE (proc , f , lockmode );
2507
2526
}
2508
2527
LWLockRelease (partitionLock );
2528
+
2529
+ /* No need to examine remaining slots. */
2530
+ break ;
2509
2531
}
2510
2532
LWLockRelease (proc -> backendLock );
2511
2533
}
@@ -2516,6 +2538,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
2516
2538
* FastPathGetLockEntry
2517
2539
* Return the PROCLOCK for a lock originally taken via the fast-path,
2518
2540
* transferring it to the primary lock table if necessary.
2541
+ *
2542
+ * Note: caller takes care of updating the locallock object.
2519
2543
*/
2520
2544
static PROCLOCK *
2521
2545
FastPathGetRelationLockEntry (LOCALLOCK * locallock )
@@ -2559,6 +2583,9 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2559
2583
FAST_PATH_CLEAR_LOCKMODE (MyProc , f , lockmode );
2560
2584
2561
2585
LWLockRelease (partitionLock );
2586
+
2587
+ /* No need to examine remaining slots. */
2588
+ break ;
2562
2589
}
2563
2590
2564
2591
LWLockRelease (MyProc -> backendLock );
@@ -2731,6 +2758,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
2731
2758
*/
2732
2759
if (VirtualTransactionIdIsValid (vxid ))
2733
2760
vxids [count ++ ] = vxid ;
2761
+
2762
+ /* No need to examine remaining slots. */
2734
2763
break ;
2735
2764
}
2736
2765
0 commit comments