@@ -204,8 +204,7 @@ struct aio_kiocb {
204
204
struct kioctx * ki_ctx ;
205
205
kiocb_cancel_fn * ki_cancel ;
206
206
207
- struct iocb __user * ki_user_iocb ; /* user's aiocb */
208
- __u64 ki_user_data ; /* user's data for completion */
207
+ struct io_event ki_res ;
209
208
210
209
struct list_head ki_list ; /* the aio core uses this
211
210
* for cancellation */
@@ -1084,15 +1083,6 @@ static inline void iocb_put(struct aio_kiocb *iocb)
1084
1083
iocb_destroy (iocb );
1085
1084
}
1086
1085
1087
- static void aio_fill_event (struct io_event * ev , struct aio_kiocb * iocb ,
1088
- long res , long res2 )
1089
- {
1090
- ev -> obj = (u64 )(unsigned long )iocb -> ki_user_iocb ;
1091
- ev -> data = iocb -> ki_user_data ;
1092
- ev -> res = res ;
1093
- ev -> res2 = res2 ;
1094
- }
1095
-
1096
1086
/* aio_complete
1097
1087
* Called when the io request on the given iocb is complete.
1098
1088
*/
@@ -1104,6 +1094,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1104
1094
unsigned tail , pos , head ;
1105
1095
unsigned long flags ;
1106
1096
1097
+ iocb -> ki_res .res = res ;
1098
+ iocb -> ki_res .res2 = res2 ;
1107
1099
/*
1108
1100
* Add a completion event to the ring buffer. Must be done holding
1109
1101
* ctx->completion_lock to prevent other code from messing with the tail
@@ -1120,14 +1112,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1120
1112
ev_page = kmap_atomic (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1121
1113
event = ev_page + pos % AIO_EVENTS_PER_PAGE ;
1122
1114
1123
- aio_fill_event ( event , iocb , res , res2 ) ;
1115
+ * event = iocb -> ki_res ;
1124
1116
1125
1117
kunmap_atomic (ev_page );
1126
1118
flush_dcache_page (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1127
1119
1128
- pr_debug ("%p[%u]: %p: %p %Lx %lx %lx \n" ,
1129
- ctx , tail , iocb , iocb -> ki_user_iocb , iocb -> ki_user_data ,
1130
- res , res2 );
1120
+ pr_debug ("%p[%u]: %p: %p %Lx %Lx %Lx \n" , ctx , tail , iocb ,
1121
+ ( void __user * )( unsigned long ) iocb -> ki_res . obj ,
1122
+ iocb -> ki_res . data , iocb -> ki_res . res , iocb -> ki_res . res2 );
1131
1123
1132
1124
/* after flagging the request as done, we
1133
1125
* must never even look at it again
@@ -1844,8 +1836,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1844
1836
goto out_put_req ;
1845
1837
}
1846
1838
1847
- req -> ki_user_iocb = user_iocb ;
1848
- req -> ki_user_data = iocb -> aio_data ;
1839
+ req -> ki_res .obj = (u64 )(unsigned long )user_iocb ;
1840
+ req -> ki_res .data = iocb -> aio_data ;
1841
+ req -> ki_res .res = 0 ;
1842
+ req -> ki_res .res2 = 0 ;
1849
1843
1850
1844
switch (iocb -> aio_lio_opcode ) {
1851
1845
case IOCB_CMD_PREAD :
@@ -2019,6 +2013,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2019
2013
struct aio_kiocb * kiocb ;
2020
2014
int ret = - EINVAL ;
2021
2015
u32 key ;
2016
+ u64 obj = (u64 )(unsigned long )iocb ;
2022
2017
2023
2018
if (unlikely (get_user (key , & iocb -> aio_key )))
2024
2019
return - EFAULT ;
@@ -2032,7 +2027,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2032
2027
spin_lock_irq (& ctx -> ctx_lock );
2033
2028
/* TODO: use a hash or array, this sucks. */
2034
2029
list_for_each_entry (kiocb , & ctx -> active_reqs , ki_list ) {
2035
- if (kiocb -> ki_user_iocb == iocb ) {
2030
+ if (kiocb -> ki_res . obj == obj ) {
2036
2031
ret = kiocb -> ki_cancel (& kiocb -> rw );
2037
2032
list_del_init (& kiocb -> ki_list );
2038
2033
break ;
0 commit comments