@@ -5,12 +5,10 @@ use crate::pyobject::{PyClassImpl, PyObjectRef, PyRef, PyResult, PyValue};
5
5
use crate :: vm:: VirtualMachine ;
6
6
7
7
use parking_lot:: {
8
- lock_api:: { GetThreadId , RawMutex as RawMutexT , RawMutexTimed } ,
8
+ lock_api:: { RawMutex as RawMutexT , RawMutexTimed , RawReentrantMutex } ,
9
9
RawMutex , RawThreadId ,
10
10
} ;
11
- use std:: cell:: Cell ;
12
11
use std:: fmt;
13
- use std:: sync:: atomic:: { AtomicUsize , Ordering } ;
14
12
use std:: time:: Duration ;
15
13
16
14
#[ cfg( not( target_os = "windows" ) ) ]
@@ -93,89 +91,13 @@ impl LockProtocol for PyLock {
93
91
94
92
#[ pyimpl( with( LockProtocol ) ) ]
95
93
impl PyLock {
96
- // TODO: locked(), might require something to change in parking_lot
97
- }
98
-
99
- // Copied from lock_api
100
- // TODO: open a PR to make this public in lock_api
101
- struct RawReentrantMutex < R , G > {
102
- owner : AtomicUsize ,
103
- lock_count : Cell < usize > ,
104
- mutex : R ,
105
- get_thread_id : G ,
106
- }
107
-
108
- impl < R : RawMutexT , G : GetThreadId > RawReentrantMutex < R , G > {
109
- #[ inline]
110
- fn lock_internal < F : FnOnce ( ) -> bool > ( & self , try_lock : F ) -> bool {
111
- let id = self . get_thread_id . nonzero_thread_id ( ) . get ( ) ;
112
- if self . owner . load ( Ordering :: Relaxed ) == id {
113
- self . lock_count . set (
114
- self . lock_count
115
- . get ( )
116
- . checked_add ( 1 )
117
- . expect ( "ReentrantMutex lock count overflow" ) ,
118
- ) ;
119
- } else {
120
- if !try_lock ( ) {
121
- return false ;
122
- }
123
- self . owner . store ( id, Ordering :: Relaxed ) ;
124
- debug_assert_eq ! ( self . lock_count. get( ) , 0 ) ;
125
- self . lock_count . set ( 1 ) ;
126
- }
127
- true
128
- }
129
- }
130
-
131
- unsafe impl < R : RawMutexT + Send , G : GetThreadId + Send > Send for RawReentrantMutex < R , G > { }
132
- unsafe impl < R : RawMutexT + Sync , G : GetThreadId + Sync > Sync for RawReentrantMutex < R , G > { }
133
-
134
- unsafe impl < R : RawMutexT , G : GetThreadId > RawMutexT for RawReentrantMutex < R , G > {
135
- const INIT : Self = RawReentrantMutex {
136
- owner : AtomicUsize :: new ( 0 ) ,
137
- lock_count : Cell :: new ( 0 ) ,
138
- mutex : R :: INIT ,
139
- get_thread_id : G :: INIT ,
140
- } ;
141
-
142
- type GuardMarker = R :: GuardMarker ;
143
-
144
- #[ inline]
145
- fn lock ( & self ) {
146
- self . lock_internal ( || {
147
- self . mutex . lock ( ) ;
148
- true
149
- } ) ;
150
- }
151
-
152
- #[ inline]
153
- fn try_lock ( & self ) -> bool {
154
- self . lock_internal ( || self . mutex . try_lock ( ) )
155
- }
156
-
157
- #[ inline]
158
- fn unlock ( & self ) {
159
- let lock_count = self . lock_count . get ( ) - 1 ;
160
- self . lock_count . set ( lock_count) ;
161
- if lock_count == 0 {
162
- self . owner . store ( 0 , Ordering :: Relaxed ) ;
163
- self . mutex . unlock ( ) ;
94
+ #[ pymethod]
95
+ fn locked ( & self ) -> bool {
96
+ let acquired_lock = self . mu . try_lock ( ) ;
97
+ if acquired_lock {
98
+ self . mu . unlock ( ) ;
164
99
}
165
- }
166
- }
167
-
168
- unsafe impl < R : RawMutexTimed , G : GetThreadId > RawMutexTimed for RawReentrantMutex < R , G > {
169
- type Instant = R :: Instant ;
170
- type Duration = R :: Duration ;
171
- #[ inline]
172
- fn try_lock_until ( & self , timeout : R :: Instant ) -> bool {
173
- self . lock_internal ( || self . mutex . try_lock_until ( timeout) )
174
- }
175
-
176
- #[ inline]
177
- fn try_lock_for ( & self , timeout : R :: Duration ) -> bool {
178
- self . lock_internal ( || self . mutex . try_lock_for ( timeout) )
100
+ !acquired_lock
179
101
}
180
102
}
181
103
0 commit comments