@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
477
477
}
478
478
EXPORT_SYMBOL (kiocb_set_cancel_fn );
479
479
480
- static int kiocb_cancel (struct kioctx * ctx , struct kiocb * kiocb )
480
+ static int kiocb_cancel (struct kiocb * kiocb )
481
481
{
482
482
kiocb_cancel_fn * old , * cancel ;
483
483
@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
538
538
struct kiocb , ki_list );
539
539
540
540
list_del_init (& req -> ki_list );
541
- kiocb_cancel (ctx , req );
541
+ kiocb_cancel (req );
542
542
}
543
543
544
544
spin_unlock_irq (& ctx -> ctx_lock );
@@ -727,42 +727,42 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
727
727
* when the processes owning a context have all exited to encourage
728
728
* the rapid destruction of the kioctx.
729
729
*/
730
- static void kill_ioctx (struct mm_struct * mm , struct kioctx * ctx ,
730
+ static int kill_ioctx (struct mm_struct * mm , struct kioctx * ctx ,
731
731
struct completion * requests_done )
732
732
{
733
- if (!atomic_xchg (& ctx -> dead , 1 )) {
734
- struct kioctx_table * table ;
733
+ struct kioctx_table * table ;
735
734
736
- spin_lock (& mm -> ioctx_lock );
737
- rcu_read_lock ();
738
- table = rcu_dereference (mm -> ioctx_table );
735
+ if (atomic_xchg (& ctx -> dead , 1 ))
736
+ return - EINVAL ;
739
737
740
- WARN_ON (ctx != table -> table [ctx -> id ]);
741
- table -> table [ctx -> id ] = NULL ;
742
- rcu_read_unlock ();
743
- spin_unlock (& mm -> ioctx_lock );
744
738
745
- /* percpu_ref_kill() will do the necessary call_rcu() */
746
- wake_up_all (& ctx -> wait );
739
+ spin_lock (& mm -> ioctx_lock );
740
+ rcu_read_lock ();
741
+ table = rcu_dereference (mm -> ioctx_table );
747
742
748
- /*
749
- * It'd be more correct to do this in free_ioctx(), after all
750
- * the outstanding kiocbs have finished - but by then io_destroy
751
- * has already returned, so io_setup() could potentially return
752
- * -EAGAIN with no ioctxs actually in use (as far as userspace
753
- * could tell).
754
- */
755
- aio_nr_sub (ctx -> max_reqs );
743
+ WARN_ON (ctx != table -> table [ctx -> id ]);
744
+ table -> table [ctx -> id ] = NULL ;
745
+ rcu_read_unlock ();
746
+ spin_unlock (& mm -> ioctx_lock );
756
747
757
- if ( ctx -> mmap_size )
758
- vm_munmap ( ctx -> mmap_base , ctx -> mmap_size );
748
+ /* percpu_ref_kill() will do the necessary call_rcu() */
749
+ wake_up_all ( & ctx -> wait );
759
750
760
- ctx -> requests_done = requests_done ;
761
- percpu_ref_kill (& ctx -> users );
762
- } else {
763
- if (requests_done )
764
- complete (requests_done );
765
- }
751
+ /*
752
+ * It'd be more correct to do this in free_ioctx(), after all
753
+ * the outstanding kiocbs have finished - but by then io_destroy
754
+ * has already returned, so io_setup() could potentially return
755
+ * -EAGAIN with no ioctxs actually in use (as far as userspace
756
+ * could tell).
757
+ */
758
+ aio_nr_sub (ctx -> max_reqs );
759
+
760
+ if (ctx -> mmap_size )
761
+ vm_munmap (ctx -> mmap_base , ctx -> mmap_size );
762
+
763
+ ctx -> requests_done = requests_done ;
764
+ percpu_ref_kill (& ctx -> users );
765
+ return 0 ;
766
766
}
767
767
768
768
/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1219
1219
if (likely (NULL != ioctx )) {
1220
1220
struct completion requests_done =
1221
1221
COMPLETION_INITIALIZER_ONSTACK (requests_done );
1222
+ int ret ;
1222
1223
1223
1224
/* Pass requests_done to kill_ioctx() where it can be set
1224
1225
* in a thread-safe way. If we try to set it here then we have
1225
1226
* a race condition if two io_destroy() called simultaneously.
1226
1227
*/
1227
- kill_ioctx (current -> mm , ioctx , & requests_done );
1228
+ ret = kill_ioctx (current -> mm , ioctx , & requests_done );
1228
1229
percpu_ref_put (& ioctx -> users );
1229
1230
1230
1231
/* Wait until all IO for the context are done. Otherwise kernel
1231
1232
* keep using user-space buffers even if user thinks the context
1232
1233
* is destroyed.
1233
1234
*/
1234
- wait_for_completion (& requests_done );
1235
+ if (!ret )
1236
+ wait_for_completion (& requests_done );
1235
1237
1236
- return 0 ;
1238
+ return ret ;
1237
1239
}
1238
1240
pr_debug ("EINVAL: io_destroy: invalid context id\n" );
1239
1241
return - EINVAL ;
@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1595
1597
1596
1598
kiocb = lookup_kiocb (ctx , iocb , key );
1597
1599
if (kiocb )
1598
- ret = kiocb_cancel (ctx , kiocb );
1600
+ ret = kiocb_cancel (kiocb );
1599
1601
else
1600
1602
ret = - EINVAL ;
1601
1603
0 commit comments