31
31
32
32
#define uptr64 (val ) ((void __user *)(uintptr_t)(val))
33
33
34
+ struct bsg_set {
35
+ struct blk_mq_tag_set tag_set ;
36
+ bsg_job_fn * job_fn ;
37
+ bsg_timeout_fn * timeout_fn ;
38
+ };
39
+
34
40
static int bsg_transport_check_proto (struct sg_io_v4 * hdr )
35
41
{
36
42
if (hdr -> protocol != BSG_PROTOCOL_SCSI ||
@@ -239,6 +245,8 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
239
245
struct request_queue * q = hctx -> queue ;
240
246
struct device * dev = q -> queuedata ;
241
247
struct request * req = bd -> rq ;
248
+ struct bsg_set * bset =
249
+ container_of (q -> tag_set , struct bsg_set , tag_set );
242
250
int ret ;
243
251
244
252
blk_mq_start_request (req );
@@ -249,7 +257,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
249
257
if (!bsg_prepare_job (dev , req ))
250
258
return BLK_STS_IOERR ;
251
259
252
- ret = q -> bsg_job_fn (blk_mq_rq_to_pdu (req ));
260
+ ret = bset -> job_fn (blk_mq_rq_to_pdu (req ));
253
261
if (ret )
254
262
return BLK_STS_IOERR ;
255
263
@@ -292,25 +300,25 @@ static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
292
300
void bsg_remove_queue (struct request_queue * q )
293
301
{
294
302
if (q ) {
295
- struct blk_mq_tag_set * set = q -> tag_set ;
303
+ struct bsg_set * bset =
304
+ container_of (q -> tag_set , struct bsg_set , tag_set );
296
305
297
306
bsg_unregister_queue (q );
298
307
blk_cleanup_queue (q );
299
- blk_mq_free_tag_set (set );
300
- kfree (set );
308
+ blk_mq_free_tag_set (& bset -> tag_set );
309
+ kfree (bset );
301
310
}
302
311
}
303
312
EXPORT_SYMBOL_GPL (bsg_remove_queue );
304
313
305
314
static enum blk_eh_timer_return bsg_timeout (struct request * rq , bool reserved )
306
315
{
307
- enum blk_eh_timer_return ret = BLK_EH_DONE ;
308
- struct request_queue * q = rq -> q ;
309
-
310
- if (q -> bsg_job_timeout_fn )
311
- ret = q -> bsg_job_timeout_fn (rq );
316
+ struct bsg_set * bset =
317
+ container_of (rq -> q -> tag_set , struct bsg_set , tag_set );
312
318
313
- return ret ;
319
+ if (!bset -> timeout_fn )
320
+ return BLK_EH_DONE ;
321
+ return bset -> timeout_fn (rq );
314
322
}
315
323
316
324
static const struct blk_mq_ops bsg_mq_ops = {
@@ -330,16 +338,21 @@ static const struct blk_mq_ops bsg_mq_ops = {
330
338
* @dd_job_size: size of LLD data needed for each job
331
339
*/
332
340
struct request_queue * bsg_setup_queue (struct device * dev , const char * name ,
333
- bsg_job_fn * job_fn , rq_timed_out_fn * timeout , int dd_job_size )
341
+ bsg_job_fn * job_fn , bsg_timeout_fn * timeout , int dd_job_size )
334
342
{
343
+ struct bsg_set * bset ;
335
344
struct blk_mq_tag_set * set ;
336
345
struct request_queue * q ;
337
346
int ret = - ENOMEM ;
338
347
339
- set = kzalloc (sizeof (* set ), GFP_KERNEL );
340
- if (!set )
348
+ bset = kzalloc (sizeof (* bset ), GFP_KERNEL );
349
+ if (!bset )
341
350
return ERR_PTR (- ENOMEM );
342
351
352
+ bset -> job_fn = job_fn ;
353
+ bset -> timeout_fn = timeout ;
354
+
355
+ set = & bset -> tag_set ;
343
356
set -> ops = & bsg_mq_ops ,
344
357
set -> nr_hw_queues = 1 ;
345
358
set -> queue_depth = 128 ;
@@ -356,8 +369,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
356
369
}
357
370
358
371
q -> queuedata = dev ;
359
- q -> bsg_job_fn = job_fn ;
360
- q -> bsg_job_timeout_fn = timeout ;
361
372
blk_queue_flag_set (QUEUE_FLAG_BIDI , q );
362
373
blk_queue_rq_timeout (q , BLK_DEFAULT_SG_TIMEOUT );
363
374
@@ -374,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
374
385
out_queue :
375
386
blk_mq_free_tag_set (set );
376
387
out_tag_set :
377
- kfree (set );
388
+ kfree (bset );
378
389
return ERR_PTR (ret );
379
390
}
380
391
EXPORT_SYMBOL_GPL (bsg_setup_queue );
0 commit comments