@@ -157,8 +157,7 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
157
157
ctxt = kmalloc (sizeof (* ctxt ), flags );
158
158
if (ctxt ) {
159
159
ctxt -> xprt = xprt ;
160
- INIT_LIST_HEAD (& ctxt -> free );
161
- INIT_LIST_HEAD (& ctxt -> dto_q );
160
+ INIT_LIST_HEAD (& ctxt -> list );
162
161
}
163
162
return ctxt ;
164
163
}
@@ -180,7 +179,7 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
180
179
dprintk ("svcrdma: No memory for RDMA ctxt\n" );
181
180
return false;
182
181
}
183
- list_add (& ctxt -> free , & xprt -> sc_ctxts );
182
+ list_add (& ctxt -> list , & xprt -> sc_ctxts );
184
183
}
185
184
return true;
186
185
}
@@ -195,8 +194,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
195
194
goto out_empty ;
196
195
197
196
ctxt = list_first_entry (& xprt -> sc_ctxts ,
198
- struct svc_rdma_op_ctxt , free );
199
- list_del_init (& ctxt -> free );
197
+ struct svc_rdma_op_ctxt , list );
198
+ list_del (& ctxt -> list );
200
199
spin_unlock_bh (& xprt -> sc_ctxt_lock );
201
200
202
201
out :
@@ -256,7 +255,7 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
256
255
257
256
spin_lock_bh (& xprt -> sc_ctxt_lock );
258
257
xprt -> sc_ctxt_used -- ;
259
- list_add (& ctxt -> free , & xprt -> sc_ctxts );
258
+ list_add (& ctxt -> list , & xprt -> sc_ctxts );
260
259
spin_unlock_bh (& xprt -> sc_ctxt_lock );
261
260
}
262
261
@@ -266,8 +265,8 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
266
265
struct svc_rdma_op_ctxt * ctxt ;
267
266
268
267
ctxt = list_first_entry (& xprt -> sc_ctxts ,
269
- struct svc_rdma_op_ctxt , free );
270
- list_del (& ctxt -> free );
268
+ struct svc_rdma_op_ctxt , list );
269
+ list_del (& ctxt -> list );
271
270
kfree (ctxt );
272
271
}
273
272
}
@@ -404,7 +403,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
404
403
/* All wc fields are now known to be valid */
405
404
ctxt -> byte_len = wc -> byte_len ;
406
405
spin_lock (& xprt -> sc_rq_dto_lock );
407
- list_add_tail (& ctxt -> dto_q , & xprt -> sc_rq_dto_q );
406
+ list_add_tail (& ctxt -> list , & xprt -> sc_rq_dto_q );
408
407
spin_unlock (& xprt -> sc_rq_dto_lock );
409
408
410
409
set_bit (XPT_DATA , & xprt -> sc_xprt .xpt_flags );
@@ -525,7 +524,7 @@ void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
525
524
526
525
read_hdr = ctxt -> read_hdr ;
527
526
spin_lock (& xprt -> sc_rq_dto_lock );
528
- list_add_tail (& read_hdr -> dto_q ,
527
+ list_add_tail (& read_hdr -> list ,
529
528
& xprt -> sc_read_complete_q );
530
529
spin_unlock (& xprt -> sc_rq_dto_lock );
531
530
@@ -1213,20 +1212,18 @@ static void __svc_rdma_free(struct work_struct *work)
1213
1212
*/
1214
1213
while (!list_empty (& rdma -> sc_read_complete_q )) {
1215
1214
struct svc_rdma_op_ctxt * ctxt ;
1216
- ctxt = list_entry (rdma -> sc_read_complete_q .next ,
1217
- struct svc_rdma_op_ctxt ,
1218
- dto_q );
1219
- list_del_init (& ctxt -> dto_q );
1215
+ ctxt = list_first_entry (& rdma -> sc_read_complete_q ,
1216
+ struct svc_rdma_op_ctxt , list );
1217
+ list_del (& ctxt -> list );
1220
1218
svc_rdma_put_context (ctxt , 1 );
1221
1219
}
1222
1220
1223
1221
/* Destroy queued, but not processed recv completions */
1224
1222
while (!list_empty (& rdma -> sc_rq_dto_q )) {
1225
1223
struct svc_rdma_op_ctxt * ctxt ;
1226
- ctxt = list_entry (rdma -> sc_rq_dto_q .next ,
1227
- struct svc_rdma_op_ctxt ,
1228
- dto_q );
1229
- list_del_init (& ctxt -> dto_q );
1224
+ ctxt = list_first_entry (& rdma -> sc_rq_dto_q ,
1225
+ struct svc_rdma_op_ctxt , list );
1226
+ list_del (& ctxt -> list );
1230
1227
svc_rdma_put_context (ctxt , 1 );
1231
1228
}
1232
1229
0 commit comments