@@ -175,12 +175,43 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
175
175
return ret ;
176
176
}
177
177
178
+ static void vmw_seqno_waiter_add (struct vmw_private * dev_priv )
179
+ {
180
+ mutex_lock (& dev_priv -> hw_mutex );
181
+ if (dev_priv -> fence_queue_waiters ++ == 0 ) {
182
+ unsigned long irq_flags ;
183
+
184
+ spin_lock_irqsave (& dev_priv -> irq_lock , irq_flags );
185
+ outl (SVGA_IRQFLAG_ANY_FENCE ,
186
+ dev_priv -> io_start + VMWGFX_IRQSTATUS_PORT );
187
+ vmw_write (dev_priv , SVGA_REG_IRQMASK ,
188
+ vmw_read (dev_priv , SVGA_REG_IRQMASK ) |
189
+ SVGA_IRQFLAG_ANY_FENCE );
190
+ spin_unlock_irqrestore (& dev_priv -> irq_lock , irq_flags );
191
+ }
192
+ mutex_unlock (& dev_priv -> hw_mutex );
193
+ }
194
+
195
+ static void vmw_seqno_waiter_remove (struct vmw_private * dev_priv )
196
+ {
197
+ mutex_lock (& dev_priv -> hw_mutex );
198
+ if (-- dev_priv -> fence_queue_waiters == 0 ) {
199
+ unsigned long irq_flags ;
200
+
201
+ spin_lock_irqsave (& dev_priv -> irq_lock , irq_flags );
202
+ vmw_write (dev_priv , SVGA_REG_IRQMASK ,
203
+ vmw_read (dev_priv , SVGA_REG_IRQMASK ) &
204
+ ~SVGA_IRQFLAG_ANY_FENCE );
205
+ spin_unlock_irqrestore (& dev_priv -> irq_lock , irq_flags );
206
+ }
207
+ mutex_unlock (& dev_priv -> hw_mutex );
208
+ }
209
+
178
210
int vmw_wait_seqno (struct vmw_private * dev_priv ,
179
211
bool lazy , uint32_t seqno ,
180
212
bool interruptible , unsigned long timeout )
181
213
{
182
214
long ret ;
183
- unsigned long irq_flags ;
184
215
struct vmw_fifo_state * fifo = & dev_priv -> fifo ;
185
216
186
217
if (likely (dev_priv -> last_read_seqno - seqno < VMW_FENCE_WRAP ))
@@ -199,17 +230,7 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
199
230
return vmw_fallback_wait (dev_priv , lazy , false, seqno ,
200
231
interruptible , timeout );
201
232
202
- mutex_lock (& dev_priv -> hw_mutex );
203
- if (atomic_add_return (1 , & dev_priv -> fence_queue_waiters ) > 0 ) {
204
- spin_lock_irqsave (& dev_priv -> irq_lock , irq_flags );
205
- outl (SVGA_IRQFLAG_ANY_FENCE ,
206
- dev_priv -> io_start + VMWGFX_IRQSTATUS_PORT );
207
- vmw_write (dev_priv , SVGA_REG_IRQMASK ,
208
- vmw_read (dev_priv , SVGA_REG_IRQMASK ) |
209
- SVGA_IRQFLAG_ANY_FENCE );
210
- spin_unlock_irqrestore (& dev_priv -> irq_lock , irq_flags );
211
- }
212
- mutex_unlock (& dev_priv -> hw_mutex );
233
+ vmw_seqno_waiter_add (dev_priv );
213
234
214
235
if (interruptible )
215
236
ret = wait_event_interruptible_timeout
@@ -222,21 +243,13 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
222
243
vmw_seqno_passed (dev_priv , seqno ),
223
244
timeout );
224
245
246
+ vmw_seqno_waiter_remove (dev_priv );
247
+
225
248
if (unlikely (ret == 0 ))
226
249
ret = - EBUSY ;
227
250
else if (likely (ret > 0 ))
228
251
ret = 0 ;
229
252
230
- mutex_lock (& dev_priv -> hw_mutex );
231
- if (atomic_dec_and_test (& dev_priv -> fence_queue_waiters )) {
232
- spin_lock_irqsave (& dev_priv -> irq_lock , irq_flags );
233
- vmw_write (dev_priv , SVGA_REG_IRQMASK ,
234
- vmw_read (dev_priv , SVGA_REG_IRQMASK ) &
235
- ~SVGA_IRQFLAG_ANY_FENCE );
236
- spin_unlock_irqrestore (& dev_priv -> irq_lock , irq_flags );
237
- }
238
- mutex_unlock (& dev_priv -> hw_mutex );
239
-
240
253
return ret ;
241
254
}
242
255
0 commit comments