Skip to content

Commit 4e6da0f

Browse files
richardweinbergeraxboe
authored andcommitted
um: Convert ubd driver to blk-mq
Convert the driver to the modern blk-mq framework. As byproduct we get rid of our open coded restart logic and let blk-mq handle it. Signed-off-by: Richard Weinberger <richard@nod.at> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 6d1f9df commit 4e6da0f

File tree

1 file changed

+93
-85
lines changed

1 file changed

+93
-85
lines changed

arch/um/drivers/ubd_kern.c

Lines changed: 93 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/module.h>
2424
#include <linux/init.h>
2525
#include <linux/blkdev.h>
26+
#include <linux/blk-mq.h>
2627
#include <linux/ata.h>
2728
#include <linux/hdreg.h>
2829
#include <linux/cdrom.h>
@@ -142,7 +143,6 @@ struct cow {
142143
#define MAX_SG 64
143144

144145
struct ubd {
145-
struct list_head restart;
146146
/* name (and fd, below) of the file opened for writing, either the
147147
* backing or the cow file. */
148148
char *file;
@@ -156,9 +156,12 @@ struct ubd {
156156
struct cow cow;
157157
struct platform_device pdev;
158158
struct request_queue *queue;
159+
struct blk_mq_tag_set tag_set;
159160
spinlock_t lock;
161+
};
162+
163+
struct ubd_pdu {
160164
struct scatterlist sg[MAX_SG];
161-
struct request *request;
162165
int start_sg, end_sg;
163166
sector_t rq_pos;
164167
};
@@ -182,10 +185,6 @@ struct ubd {
182185
.shared = 0, \
183186
.cow = DEFAULT_COW, \
184187
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
185-
.request = NULL, \
186-
.start_sg = 0, \
187-
.end_sg = 0, \
188-
.rq_pos = 0, \
189188
}
190189

191190
/* Protected by ubd_lock */
@@ -196,6 +195,12 @@ static int fake_ide = 0;
196195
static struct proc_dir_entry *proc_ide_root = NULL;
197196
static struct proc_dir_entry *proc_ide = NULL;
198197

198+
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
199+
const struct blk_mq_queue_data *bd);
200+
static int ubd_init_request(struct blk_mq_tag_set *set,
201+
struct request *req, unsigned int hctx_idx,
202+
unsigned int numa_node);
203+
199204
static void make_proc_ide(void)
200205
{
201206
proc_ide_root = proc_mkdir("ide", NULL);
@@ -436,11 +441,8 @@ __uml_help(udb_setup,
436441
" in the boot output.\n\n"
437442
);
438443

439-
static void do_ubd_request(struct request_queue * q);
440-
441444
/* Only changed by ubd_init, which is an initcall. */
442445
static int thread_fd = -1;
443-
static LIST_HEAD(restart);
444446

445447
/* Function to read several request pointers at a time
446448
* handling fractional reads if (and as) needed
@@ -498,9 +500,6 @@ static int bulk_req_safe_read(
498500
/* Called without dev->lock held, and only in interrupt context. */
499501
static void ubd_handler(void)
500502
{
501-
struct ubd *ubd;
502-
struct list_head *list, *next_ele;
503-
unsigned long flags;
504503
int n;
505504
int count;
506505

@@ -520,23 +519,17 @@ static void ubd_handler(void)
520519
return;
521520
}
522521
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
523-
blk_end_request(
524-
(*irq_req_buffer)[count]->req,
525-
BLK_STS_OK,
526-
(*irq_req_buffer)[count]->length
527-
);
528-
kfree((*irq_req_buffer)[count]);
522+
struct io_thread_req *io_req = (*irq_req_buffer)[count];
523+
int err = io_req->error ? BLK_STS_IOERR : BLK_STS_OK;
524+
525+
if (!blk_update_request(io_req->req, err, io_req->length))
526+
__blk_mq_end_request(io_req->req, err);
527+
528+
kfree(io_req);
529529
}
530530
}
531-
reactivate_fd(thread_fd, UBD_IRQ);
532531

533-
list_for_each_safe(list, next_ele, &restart){
534-
ubd = container_of(list, struct ubd, restart);
535-
list_del_init(&ubd->restart);
536-
spin_lock_irqsave(&ubd->lock, flags);
537-
do_ubd_request(ubd->queue);
538-
spin_unlock_irqrestore(&ubd->lock, flags);
539-
}
532+
reactivate_fd(thread_fd, UBD_IRQ);
540533
}
541534

542535
static irqreturn_t ubd_intr(int irq, void *dev)
@@ -857,6 +850,7 @@ static void ubd_device_release(struct device *dev)
857850
struct ubd *ubd_dev = dev_get_drvdata(dev);
858851

859852
blk_cleanup_queue(ubd_dev->queue);
853+
blk_mq_free_tag_set(&ubd_dev->tag_set);
860854
*ubd_dev = ((struct ubd) DEFAULT_UBD);
861855
}
862856

@@ -899,6 +893,11 @@ static int ubd_disk_register(int major, u64 size, int unit,
899893

900894
#define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9))
901895

896+
static const struct blk_mq_ops ubd_mq_ops = {
897+
.queue_rq = ubd_queue_rq,
898+
.init_request = ubd_init_request,
899+
};
900+
902901
static int ubd_add(int n, char **error_out)
903902
{
904903
struct ubd *ubd_dev = &ubd_devs[n];
@@ -915,23 +914,32 @@ static int ubd_add(int n, char **error_out)
915914

916915
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
917916

918-
INIT_LIST_HEAD(&ubd_dev->restart);
919-
sg_init_table(ubd_dev->sg, MAX_SG);
917+
ubd_dev->tag_set.ops = &ubd_mq_ops;
918+
ubd_dev->tag_set.queue_depth = 64;
919+
ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
920+
ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
921+
ubd_dev->tag_set.cmd_size = sizeof(struct ubd_pdu);
922+
ubd_dev->tag_set.driver_data = ubd_dev;
923+
ubd_dev->tag_set.nr_hw_queues = 1;
920924

921-
err = -ENOMEM;
922-
ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
923-
if (ubd_dev->queue == NULL) {
924-
*error_out = "Failed to initialize device queue";
925+
err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
926+
if (err)
925927
goto out;
928+
929+
ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
930+
if (IS_ERR(ubd_dev->queue)) {
931+
err = PTR_ERR(ubd_dev->queue);
932+
goto out_cleanup;
926933
}
934+
927935
ubd_dev->queue->queuedata = ubd_dev;
928936
blk_queue_write_cache(ubd_dev->queue, true, false);
929937

930938
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
931939
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
932940
if(err){
933941
*error_out = "Failed to register device";
934-
goto out_cleanup;
942+
goto out_cleanup_tags;
935943
}
936944

937945
if (fake_major != UBD_MAJOR)
@@ -949,6 +957,8 @@ static int ubd_add(int n, char **error_out)
949957
out:
950958
return err;
951959

960+
out_cleanup_tags:
961+
blk_mq_free_tag_set(&ubd_dev->tag_set);
952962
out_cleanup:
953963
blk_cleanup_queue(ubd_dev->queue);
954964
goto out;
@@ -1333,80 +1343,78 @@ static void prepare_flush_request(struct request *req,
13331343
io_req->op = UBD_FLUSH;
13341344
}
13351345

1336-
static bool submit_request(struct io_thread_req *io_req, struct ubd *dev)
1346+
static void submit_request(struct io_thread_req *io_req, struct ubd *dev)
13371347
{
13381348
int n = os_write_file(thread_fd, &io_req,
13391349
sizeof(io_req));
1350+
13401351
if (n != sizeof(io_req)) {
13411352
if (n != -EAGAIN)
1342-
printk("write to io thread failed, "
1343-
"errno = %d\n", -n);
1344-
else if (list_empty(&dev->restart))
1345-
list_add(&dev->restart, &restart);
1353+
pr_err("write to io thread failed: %d\n", -n);
13461354

1355+
blk_mq_requeue_request(io_req->req, true);
13471356
kfree(io_req);
1348-
return false;
13491357
}
1350-
return true;
13511358
}
13521359

1353-
/* Called with dev->lock held */
1354-
static void do_ubd_request(struct request_queue *q)
1360+
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1361+
const struct blk_mq_queue_data *bd)
13551362
{
1363+
struct request *req = bd->rq;
1364+
struct ubd *dev = hctx->queue->queuedata;
1365+
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
13561366
struct io_thread_req *io_req;
1357-
struct request *req;
13581367

1359-
while(1){
1360-
struct ubd *dev = q->queuedata;
1361-
if(dev->request == NULL){
1362-
struct request *req = blk_fetch_request(q);
1363-
if(req == NULL)
1364-
return;
1368+
blk_mq_start_request(req);
1369+
1370+
pdu->rq_pos = blk_rq_pos(req);
1371+
pdu->start_sg = 0;
1372+
pdu->end_sg = blk_rq_map_sg(req->q, req, pdu->sg);
13651373

1366-
dev->request = req;
1367-
dev->rq_pos = blk_rq_pos(req);
1368-
dev->start_sg = 0;
1369-
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1374+
if (req_op(req) == REQ_OP_FLUSH) {
1375+
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
1376+
if (io_req == NULL) {
1377+
blk_mq_requeue_request(req, true);
1378+
goto done;
13701379
}
1380+
prepare_flush_request(req, io_req);
1381+
submit_request(io_req, dev);
13711382

1372-
req = dev->request;
1383+
goto done;
1384+
}
13731385

1374-
if (req_op(req) == REQ_OP_FLUSH) {
1375-
io_req = kmalloc(sizeof(struct io_thread_req),
1376-
GFP_ATOMIC);
1377-
if (io_req == NULL) {
1378-
if (list_empty(&dev->restart))
1379-
list_add(&dev->restart, &restart);
1380-
return;
1381-
}
1382-
prepare_flush_request(req, io_req);
1383-
if (submit_request(io_req, dev) == false)
1384-
return;
1386+
while (pdu->start_sg < pdu->end_sg) {
1387+
struct scatterlist *sg = &pdu->sg[pdu->start_sg];
1388+
1389+
io_req = kmalloc(sizeof(struct io_thread_req),
1390+
GFP_ATOMIC);
1391+
if (io_req == NULL) {
1392+
blk_mq_requeue_request(req, true);
1393+
goto done;
13851394
}
1395+
prepare_request(req, io_req,
1396+
(unsigned long long)pdu->rq_pos << 9,
1397+
sg->offset, sg->length, sg_page(sg));
13861398

1387-
while(dev->start_sg < dev->end_sg){
1388-
struct scatterlist *sg = &dev->sg[dev->start_sg];
1399+
submit_request(io_req, dev);
13891400

1390-
io_req = kmalloc(sizeof(struct io_thread_req),
1391-
GFP_ATOMIC);
1392-
if(io_req == NULL){
1393-
if(list_empty(&dev->restart))
1394-
list_add(&dev->restart, &restart);
1395-
return;
1396-
}
1397-
prepare_request(req, io_req,
1398-
(unsigned long long)dev->rq_pos << 9,
1399-
sg->offset, sg->length, sg_page(sg));
1401+
pdu->rq_pos += sg->length >> 9;
1402+
pdu->start_sg++;
1403+
}
14001404

1401-
if (submit_request(io_req, dev) == false)
1402-
return;
1405+
done:
1406+
return BLK_STS_OK;
1407+
}
14031408

1404-
dev->rq_pos += sg->length >> 9;
1405-
dev->start_sg++;
1406-
}
1407-
dev->end_sg = 0;
1408-
dev->request = NULL;
1409-
}
1409+
static int ubd_init_request(struct blk_mq_tag_set *set,
1410+
struct request *req, unsigned int hctx_idx,
1411+
unsigned int numa_node)
1412+
{
1413+
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
1414+
1415+
sg_init_table(pdu->sg, MAX_SG);
1416+
1417+
return 0;
14101418
}
14111419

14121420
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)

0 commit comments

Comments
 (0)