23
23
#include <linux/module.h>
24
24
#include <linux/init.h>
25
25
#include <linux/blkdev.h>
26
+ #include <linux/blk-mq.h>
26
27
#include <linux/ata.h>
27
28
#include <linux/hdreg.h>
28
29
#include <linux/cdrom.h>
@@ -142,7 +143,6 @@ struct cow {
142
143
#define MAX_SG 64
143
144
144
145
struct ubd {
145
- struct list_head restart ;
146
146
/* name (and fd, below) of the file opened for writing, either the
147
147
* backing or the cow file. */
148
148
char * file ;
@@ -156,9 +156,12 @@ struct ubd {
156
156
struct cow cow ;
157
157
struct platform_device pdev ;
158
158
struct request_queue * queue ;
159
+ struct blk_mq_tag_set tag_set ;
159
160
spinlock_t lock ;
161
+ };
162
+
163
+ struct ubd_pdu {
160
164
struct scatterlist sg [MAX_SG ];
161
- struct request * request ;
162
165
int start_sg , end_sg ;
163
166
sector_t rq_pos ;
164
167
};
@@ -182,10 +185,6 @@ struct ubd {
182
185
.shared = 0, \
183
186
.cow = DEFAULT_COW, \
184
187
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
185
- .request = NULL, \
186
- .start_sg = 0, \
187
- .end_sg = 0, \
188
- .rq_pos = 0, \
189
188
}
190
189
191
190
/* Protected by ubd_lock */
@@ -196,6 +195,12 @@ static int fake_ide = 0;
196
195
static struct proc_dir_entry * proc_ide_root = NULL ;
197
196
static struct proc_dir_entry * proc_ide = NULL ;
198
197
198
+ static blk_status_t ubd_queue_rq (struct blk_mq_hw_ctx * hctx ,
199
+ const struct blk_mq_queue_data * bd );
200
+ static int ubd_init_request (struct blk_mq_tag_set * set ,
201
+ struct request * req , unsigned int hctx_idx ,
202
+ unsigned int numa_node );
203
+
199
204
static void make_proc_ide (void )
200
205
{
201
206
proc_ide_root = proc_mkdir ("ide" , NULL );
@@ -436,11 +441,8 @@ __uml_help(udb_setup,
436
441
" in the boot output.\n\n"
437
442
);
438
443
439
- static void do_ubd_request (struct request_queue * q );
440
-
441
444
/* Only changed by ubd_init, which is an initcall. */
442
445
static int thread_fd = -1 ;
443
- static LIST_HEAD (restart );
444
446
445
447
/* Function to read several request pointers at a time
446
448
* handling fractional reads if (and as) needed
@@ -498,9 +500,6 @@ static int bulk_req_safe_read(
498
500
/* Called without dev->lock held, and only in interrupt context. */
499
501
static void ubd_handler (void )
500
502
{
501
- struct ubd * ubd ;
502
- struct list_head * list , * next_ele ;
503
- unsigned long flags ;
504
503
int n ;
505
504
int count ;
506
505
@@ -520,23 +519,17 @@ static void ubd_handler(void)
520
519
return ;
521
520
}
522
521
for (count = 0 ; count < n /sizeof (struct io_thread_req * ); count ++ ) {
523
- blk_end_request (
524
- (* irq_req_buffer )[count ]-> req ,
525
- BLK_STS_OK ,
526
- (* irq_req_buffer )[count ]-> length
527
- );
528
- kfree ((* irq_req_buffer )[count ]);
522
+ struct io_thread_req * io_req = (* irq_req_buffer )[count ];
523
+ int err = io_req -> error ? BLK_STS_IOERR : BLK_STS_OK ;
524
+
525
+ if (!blk_update_request (io_req -> req , err , io_req -> length ))
526
+ __blk_mq_end_request (io_req -> req , err );
527
+
528
+ kfree (io_req );
529
529
}
530
530
}
531
- reactivate_fd (thread_fd , UBD_IRQ );
532
531
533
- list_for_each_safe (list , next_ele , & restart ){
534
- ubd = container_of (list , struct ubd , restart );
535
- list_del_init (& ubd -> restart );
536
- spin_lock_irqsave (& ubd -> lock , flags );
537
- do_ubd_request (ubd -> queue );
538
- spin_unlock_irqrestore (& ubd -> lock , flags );
539
- }
532
+ reactivate_fd (thread_fd , UBD_IRQ );
540
533
}
541
534
542
535
static irqreturn_t ubd_intr (int irq , void * dev )
@@ -857,6 +850,7 @@ static void ubd_device_release(struct device *dev)
857
850
struct ubd * ubd_dev = dev_get_drvdata (dev );
858
851
859
852
blk_cleanup_queue (ubd_dev -> queue );
853
+ blk_mq_free_tag_set (& ubd_dev -> tag_set );
860
854
* ubd_dev = ((struct ubd ) DEFAULT_UBD );
861
855
}
862
856
@@ -899,6 +893,11 @@ static int ubd_disk_register(int major, u64 size, int unit,
899
893
900
894
#define ROUND_BLOCK (n ) ((n + ((1 << 9) - 1)) & (-1 << 9))
901
895
896
+ static const struct blk_mq_ops ubd_mq_ops = {
897
+ .queue_rq = ubd_queue_rq ,
898
+ .init_request = ubd_init_request ,
899
+ };
900
+
902
901
static int ubd_add (int n , char * * error_out )
903
902
{
904
903
struct ubd * ubd_dev = & ubd_devs [n ];
@@ -915,23 +914,32 @@ static int ubd_add(int n, char **error_out)
915
914
916
915
ubd_dev -> size = ROUND_BLOCK (ubd_dev -> size );
917
916
918
- INIT_LIST_HEAD (& ubd_dev -> restart );
919
- sg_init_table (ubd_dev -> sg , MAX_SG );
917
+ ubd_dev -> tag_set .ops = & ubd_mq_ops ;
918
+ ubd_dev -> tag_set .queue_depth = 64 ;
919
+ ubd_dev -> tag_set .numa_node = NUMA_NO_NODE ;
920
+ ubd_dev -> tag_set .flags = BLK_MQ_F_SHOULD_MERGE ;
921
+ ubd_dev -> tag_set .cmd_size = sizeof (struct ubd_pdu );
922
+ ubd_dev -> tag_set .driver_data = ubd_dev ;
923
+ ubd_dev -> tag_set .nr_hw_queues = 1 ;
920
924
921
- err = - ENOMEM ;
922
- ubd_dev -> queue = blk_init_queue (do_ubd_request , & ubd_dev -> lock );
923
- if (ubd_dev -> queue == NULL ) {
924
- * error_out = "Failed to initialize device queue" ;
925
+ err = blk_mq_alloc_tag_set (& ubd_dev -> tag_set );
926
+ if (err )
925
927
goto out ;
928
+
929
+ ubd_dev -> queue = blk_mq_init_queue (& ubd_dev -> tag_set );
930
+ if (IS_ERR (ubd_dev -> queue )) {
931
+ err = PTR_ERR (ubd_dev -> queue );
932
+ goto out_cleanup ;
926
933
}
934
+
927
935
ubd_dev -> queue -> queuedata = ubd_dev ;
928
936
blk_queue_write_cache (ubd_dev -> queue , true, false);
929
937
930
938
blk_queue_max_segments (ubd_dev -> queue , MAX_SG );
931
939
err = ubd_disk_register (UBD_MAJOR , ubd_dev -> size , n , & ubd_gendisk [n ]);
932
940
if (err ){
933
941
* error_out = "Failed to register device" ;
934
- goto out_cleanup ;
942
+ goto out_cleanup_tags ;
935
943
}
936
944
937
945
if (fake_major != UBD_MAJOR )
@@ -949,6 +957,8 @@ static int ubd_add(int n, char **error_out)
949
957
out :
950
958
return err ;
951
959
960
+ out_cleanup_tags :
961
+ blk_mq_free_tag_set (& ubd_dev -> tag_set );
952
962
out_cleanup :
953
963
blk_cleanup_queue (ubd_dev -> queue );
954
964
goto out ;
@@ -1333,80 +1343,78 @@ static void prepare_flush_request(struct request *req,
1333
1343
io_req -> op = UBD_FLUSH ;
1334
1344
}
1335
1345
1336
- static bool submit_request (struct io_thread_req * io_req , struct ubd * dev )
1346
+ static void submit_request (struct io_thread_req * io_req , struct ubd * dev )
1337
1347
{
1338
1348
int n = os_write_file (thread_fd , & io_req ,
1339
1349
sizeof (io_req ));
1350
+
1340
1351
if (n != sizeof (io_req )) {
1341
1352
if (n != - EAGAIN )
1342
- printk ("write to io thread failed, "
1343
- "errno = %d\n" , - n );
1344
- else if (list_empty (& dev -> restart ))
1345
- list_add (& dev -> restart , & restart );
1353
+ pr_err ("write to io thread failed: %d\n" , - n );
1346
1354
1355
+ blk_mq_requeue_request (io_req -> req , true);
1347
1356
kfree (io_req );
1348
- return false;
1349
1357
}
1350
- return true;
1351
1358
}
1352
1359
1353
- /* Called with dev->lock held */
1354
- static void do_ubd_request ( struct request_queue * q )
1360
+ static blk_status_t ubd_queue_rq ( struct blk_mq_hw_ctx * hctx ,
1361
+ const struct blk_mq_queue_data * bd )
1355
1362
{
1363
+ struct request * req = bd -> rq ;
1364
+ struct ubd * dev = hctx -> queue -> queuedata ;
1365
+ struct ubd_pdu * pdu = blk_mq_rq_to_pdu (req );
1356
1366
struct io_thread_req * io_req ;
1357
- struct request * req ;
1358
1367
1359
- while (1 ){
1360
- struct ubd * dev = q -> queuedata ;
1361
- if (dev -> request == NULL ){
1362
- struct request * req = blk_fetch_request (q );
1363
- if (req == NULL )
1364
- return ;
1368
+ blk_mq_start_request (req );
1369
+
1370
+ pdu -> rq_pos = blk_rq_pos (req );
1371
+ pdu -> start_sg = 0 ;
1372
+ pdu -> end_sg = blk_rq_map_sg (req -> q , req , pdu -> sg );
1365
1373
1366
- dev -> request = req ;
1367
- dev -> rq_pos = blk_rq_pos (req );
1368
- dev -> start_sg = 0 ;
1369
- dev -> end_sg = blk_rq_map_sg (q , req , dev -> sg );
1374
+ if (req_op (req ) == REQ_OP_FLUSH ) {
1375
+ io_req = kmalloc (sizeof (struct io_thread_req ), GFP_ATOMIC );
1376
+ if (io_req == NULL ) {
1377
+ blk_mq_requeue_request (req , true);
1378
+ goto done ;
1370
1379
}
1380
+ prepare_flush_request (req , io_req );
1381
+ submit_request (io_req , dev );
1371
1382
1372
- req = dev -> request ;
1383
+ goto done ;
1384
+ }
1373
1385
1374
- if (req_op (req ) == REQ_OP_FLUSH ) {
1375
- io_req = kmalloc (sizeof (struct io_thread_req ),
1376
- GFP_ATOMIC );
1377
- if (io_req == NULL ) {
1378
- if (list_empty (& dev -> restart ))
1379
- list_add (& dev -> restart , & restart );
1380
- return ;
1381
- }
1382
- prepare_flush_request (req , io_req );
1383
- if (submit_request (io_req , dev ) == false)
1384
- return ;
1386
+ while (pdu -> start_sg < pdu -> end_sg ) {
1387
+ struct scatterlist * sg = & pdu -> sg [pdu -> start_sg ];
1388
+
1389
+ io_req = kmalloc (sizeof (struct io_thread_req ),
1390
+ GFP_ATOMIC );
1391
+ if (io_req == NULL ) {
1392
+ blk_mq_requeue_request (req , true);
1393
+ goto done ;
1385
1394
}
1395
+ prepare_request (req , io_req ,
1396
+ (unsigned long long )pdu -> rq_pos << 9 ,
1397
+ sg -> offset , sg -> length , sg_page (sg ));
1386
1398
1387
- while (dev -> start_sg < dev -> end_sg ){
1388
- struct scatterlist * sg = & dev -> sg [dev -> start_sg ];
1399
+ submit_request (io_req , dev );
1389
1400
1390
- io_req = kmalloc (sizeof (struct io_thread_req ),
1391
- GFP_ATOMIC );
1392
- if (io_req == NULL ){
1393
- if (list_empty (& dev -> restart ))
1394
- list_add (& dev -> restart , & restart );
1395
- return ;
1396
- }
1397
- prepare_request (req , io_req ,
1398
- (unsigned long long )dev -> rq_pos << 9 ,
1399
- sg -> offset , sg -> length , sg_page (sg ));
1401
+ pdu -> rq_pos += sg -> length >> 9 ;
1402
+ pdu -> start_sg ++ ;
1403
+ }
1400
1404
1401
- if (submit_request (io_req , dev ) == false)
1402
- return ;
1405
+ done :
1406
+ return BLK_STS_OK ;
1407
+ }
1403
1408
1404
- dev -> rq_pos += sg -> length >> 9 ;
1405
- dev -> start_sg ++ ;
1406
- }
1407
- dev -> end_sg = 0 ;
1408
- dev -> request = NULL ;
1409
- }
1409
+ static int ubd_init_request (struct blk_mq_tag_set * set ,
1410
+ struct request * req , unsigned int hctx_idx ,
1411
+ unsigned int numa_node )
1412
+ {
1413
+ struct ubd_pdu * pdu = blk_mq_rq_to_pdu (req );
1414
+
1415
+ sg_init_table (pdu -> sg , MAX_SG );
1416
+
1417
+ return 0 ;
1410
1418
}
1411
1419
1412
1420
static int ubd_getgeo (struct block_device * bdev , struct hd_geometry * geo )
0 commit comments