@@ -131,6 +131,19 @@ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
131
131
vq -> is_le = virtio_legacy_is_little_endian ();
132
132
}
133
133
134
+ struct vhost_flush_struct {
135
+ struct vhost_work work ;
136
+ struct completion wait_event ;
137
+ };
138
+
139
+ static void vhost_flush_work (struct vhost_work * work )
140
+ {
141
+ struct vhost_flush_struct * s ;
142
+
143
+ s = container_of (work , struct vhost_flush_struct , work );
144
+ complete (& s -> wait_event );
145
+ }
146
+
134
147
static void vhost_poll_func (struct file * file , wait_queue_head_t * wqh ,
135
148
poll_table * pt )
136
149
{
@@ -158,8 +171,6 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
158
171
INIT_LIST_HEAD (& work -> node );
159
172
work -> fn = fn ;
160
173
init_waitqueue_head (& work -> done );
161
- work -> flushing = 0 ;
162
- work -> queue_seq = work -> done_seq = 0 ;
163
174
}
164
175
EXPORT_SYMBOL_GPL (vhost_work_init );
165
176
@@ -211,31 +222,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
211
222
}
212
223
EXPORT_SYMBOL_GPL (vhost_poll_stop );
213
224
214
- static bool vhost_work_seq_done (struct vhost_dev * dev , struct vhost_work * work ,
215
- unsigned seq )
216
- {
217
- int left ;
218
-
219
- spin_lock_irq (& dev -> work_lock );
220
- left = seq - work -> done_seq ;
221
- spin_unlock_irq (& dev -> work_lock );
222
- return left <= 0 ;
223
- }
224
-
225
225
void vhost_work_flush (struct vhost_dev * dev , struct vhost_work * work )
226
226
{
227
- unsigned seq ;
228
- int flushing ;
227
+ struct vhost_flush_struct flush ;
228
+
229
+ if (dev -> worker ) {
230
+ init_completion (& flush .wait_event );
231
+ vhost_work_init (& flush .work , vhost_flush_work );
229
232
230
- spin_lock_irq (& dev -> work_lock );
231
- seq = work -> queue_seq ;
232
- work -> flushing ++ ;
233
- spin_unlock_irq (& dev -> work_lock );
234
- wait_event (work -> done , vhost_work_seq_done (dev , work , seq ));
235
- spin_lock_irq (& dev -> work_lock );
236
- flushing = -- work -> flushing ;
237
- spin_unlock_irq (& dev -> work_lock );
238
- BUG_ON (flushing < 0 );
233
+ vhost_work_queue (dev , & flush .work );
234
+ wait_for_completion (& flush .wait_event );
235
+ }
239
236
}
240
237
EXPORT_SYMBOL_GPL (vhost_work_flush );
241
238
@@ -254,7 +251,6 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
254
251
spin_lock_irqsave (& dev -> work_lock , flags );
255
252
if (list_empty (& work -> node )) {
256
253
list_add_tail (& work -> node , & dev -> work_list );
257
- work -> queue_seq ++ ;
258
254
spin_unlock_irqrestore (& dev -> work_lock , flags );
259
255
wake_up_process (dev -> worker );
260
256
} else {
@@ -310,7 +306,6 @@ static int vhost_worker(void *data)
310
306
{
311
307
struct vhost_dev * dev = data ;
312
308
struct vhost_work * work = NULL ;
313
- unsigned uninitialized_var (seq );
314
309
mm_segment_t oldfs = get_fs ();
315
310
316
311
set_fs (USER_DS );
@@ -321,11 +316,6 @@ static int vhost_worker(void *data)
321
316
set_current_state (TASK_INTERRUPTIBLE );
322
317
323
318
spin_lock_irq (& dev -> work_lock );
324
- if (work ) {
325
- work -> done_seq = seq ;
326
- if (work -> flushing )
327
- wake_up_all (& work -> done );
328
- }
329
319
330
320
if (kthread_should_stop ()) {
331
321
spin_unlock_irq (& dev -> work_lock );
@@ -336,7 +326,6 @@ static int vhost_worker(void *data)
336
326
work = list_first_entry (& dev -> work_list ,
337
327
struct vhost_work , node );
338
328
list_del_init (& work -> node );
339
- seq = work -> queue_seq ;
340
329
} else
341
330
work = NULL ;
342
331
spin_unlock_irq (& dev -> work_lock );
0 commit comments