@@ -1022,6 +1022,9 @@ static bool get_reqs_available(struct kioctx *ctx)
1022
1022
/* aio_get_req
1023
1023
* Allocate a slot for an aio request.
1024
1024
* Returns NULL if no requests are free.
1025
+ *
1026
+ * The refcount is initialized to 2 - one for the async op completion,
1027
+ * one for the synchronous code that does this.
1025
1028
*/
1026
1029
static inline struct aio_kiocb * aio_get_req (struct kioctx * ctx )
1027
1030
{
@@ -1034,7 +1037,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1034
1037
percpu_ref_get (& ctx -> reqs );
1035
1038
req -> ki_ctx = ctx ;
1036
1039
INIT_LIST_HEAD (& req -> ki_list );
1037
- refcount_set (& req -> ki_refcnt , 0 );
1040
+ refcount_set (& req -> ki_refcnt , 2 );
1038
1041
req -> ki_eventfd = NULL ;
1039
1042
return req ;
1040
1043
}
@@ -1067,15 +1070,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1067
1070
return ret ;
1068
1071
}
1069
1072
1073
+ static inline void iocb_destroy (struct aio_kiocb * iocb )
1074
+ {
1075
+ if (iocb -> ki_filp )
1076
+ fput (iocb -> ki_filp );
1077
+ percpu_ref_put (& iocb -> ki_ctx -> reqs );
1078
+ kmem_cache_free (kiocb_cachep , iocb );
1079
+ }
1080
+
1070
1081
static inline void iocb_put (struct aio_kiocb * iocb )
1071
1082
{
1072
- if (refcount_read (& iocb -> ki_refcnt ) == 0 ||
1073
- refcount_dec_and_test (& iocb -> ki_refcnt )) {
1074
- if (iocb -> ki_filp )
1075
- fput (iocb -> ki_filp );
1076
- percpu_ref_put (& iocb -> ki_ctx -> reqs );
1077
- kmem_cache_free (kiocb_cachep , iocb );
1078
- }
1083
+ if (refcount_dec_and_test (& iocb -> ki_refcnt ))
1084
+ iocb_destroy (iocb );
1079
1085
}
1080
1086
1081
1087
static void aio_fill_event (struct io_event * ev , struct aio_kiocb * iocb ,
@@ -1749,9 +1755,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1749
1755
INIT_LIST_HEAD (& req -> wait .entry );
1750
1756
init_waitqueue_func_entry (& req -> wait , aio_poll_wake );
1751
1757
1752
- /* one for removal from waitqueue, one for this function */
1753
- refcount_set (& aiocb -> ki_refcnt , 2 );
1754
-
1755
1758
mask = vfs_poll (req -> file , & apt .pt ) & req -> events ;
1756
1759
if (unlikely (!req -> head )) {
1757
1760
/* we did not manage to set up a waitqueue, done */
@@ -1782,7 +1785,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1782
1785
1783
1786
if (mask )
1784
1787
aio_poll_complete (aiocb , mask );
1785
- iocb_put (aiocb );
1786
1788
return 0 ;
1787
1789
}
1788
1790
@@ -1873,18 +1875,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1873
1875
break ;
1874
1876
}
1875
1877
1878
+ /* Done with the synchronous reference */
1879
+ iocb_put (req );
1880
+
1876
1881
/*
1877
1882
* If ret is 0, we'd either done aio_complete() ourselves or have
1878
1883
* arranged for that to be done asynchronously. Anything non-zero
1879
1884
* means that we need to destroy req ourselves.
1880
1885
*/
1881
- if (ret )
1882
- goto out_put_req ;
1883
- return 0 ;
1886
+ if (! ret )
1887
+ return 0 ;
1888
+
1884
1889
out_put_req :
1885
1890
if (req -> ki_eventfd )
1886
1891
eventfd_ctx_put (req -> ki_eventfd );
1887
- iocb_put (req );
1892
+ iocb_destroy (req );
1888
1893
out_put_reqs_available :
1889
1894
put_reqs_available (ctx , 1 );
1890
1895
return ret ;
0 commit comments