Skip to content

Commit ecb0a83

Browse files
Christoph Hellwigaxboe
authored andcommitted
ubd: remove use of blk_rq_map_sg
There is no good reason to create a scatterlist in the ubd driver, it can just iterate the request directly. Signed-off-by: Christoph Hellwig <hch@lst.de> [rw: Folded in improvements as discussed with hch and jens] Signed-off-by: Richard Weinberger <richard@nod.at> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 6956b95 commit ecb0a83

File tree

1 file changed

+54
-104
lines changed

1 file changed

+54
-104
lines changed

arch/um/drivers/ubd_kern.c

Lines changed: 54 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -160,12 +160,6 @@ struct ubd {
160160
spinlock_t lock;
161161
};
162162

163-
struct ubd_pdu {
164-
struct scatterlist sg[MAX_SG];
165-
int start_sg, end_sg;
166-
sector_t rq_pos;
167-
};
168-
169163
#define DEFAULT_COW { \
170164
.file = NULL, \
171165
.fd = -1, \
@@ -197,9 +191,6 @@ static struct proc_dir_entry *proc_ide = NULL;
197191

198192
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
199193
const struct blk_mq_queue_data *bd);
200-
static int ubd_init_request(struct blk_mq_tag_set *set,
201-
struct request *req, unsigned int hctx_idx,
202-
unsigned int numa_node);
203194

204195
static void make_proc_ide(void)
205196
{
@@ -895,7 +886,6 @@ static int ubd_disk_register(int major, u64 size, int unit,
895886

896887
static const struct blk_mq_ops ubd_mq_ops = {
897888
.queue_rq = ubd_queue_rq,
898-
.init_request = ubd_init_request,
899889
};
900890

901891
static int ubd_add(int n, char **error_out)
@@ -918,7 +908,6 @@ static int ubd_add(int n, char **error_out)
918908
ubd_dev->tag_set.queue_depth = 64;
919909
ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
920910
ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
921-
ubd_dev->tag_set.cmd_size = sizeof(struct ubd_pdu);
922911
ubd_dev->tag_set.driver_data = ubd_dev;
923912
ubd_dev->tag_set.nr_hw_queues = 1;
924913

@@ -1300,123 +1289,84 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
13001289
req->bitmap_words, bitmap_len);
13011290
}
13021291

1303-
/* Called with dev->lock held */
1304-
static void prepare_request(struct request *req, struct io_thread_req *io_req,
1305-
unsigned long long offset, int page_offset,
1306-
int len, struct page *page)
1292+
static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
1293+
u64 off, struct bio_vec *bvec)
13071294
{
1308-
struct gendisk *disk = req->rq_disk;
1309-
struct ubd *ubd_dev = disk->private_data;
1310-
1311-
io_req->req = req;
1312-
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
1313-
ubd_dev->fd;
1314-
io_req->fds[1] = ubd_dev->fd;
1315-
io_req->cow_offset = -1;
1316-
io_req->offset = offset;
1317-
io_req->length = len;
1318-
io_req->error = 0;
1319-
io_req->sector_mask = 0;
1320-
1321-
io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
1322-
io_req->offsets[0] = 0;
1323-
io_req->offsets[1] = ubd_dev->cow.data_offset;
1324-
io_req->buffer = page_address(page) + page_offset;
1325-
io_req->sectorsize = 1 << 9;
1326-
1327-
if(ubd_dev->cow.file != NULL)
1328-
cowify_req(io_req, ubd_dev->cow.bitmap,
1329-
ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len);
1330-
1331-
}
1295+
struct ubd *dev = hctx->queue->queuedata;
1296+
struct io_thread_req *io_req;
1297+
int ret;
13321298

1333-
/* Called with dev->lock held */
1334-
static void prepare_flush_request(struct request *req,
1335-
struct io_thread_req *io_req)
1336-
{
1337-
struct gendisk *disk = req->rq_disk;
1338-
struct ubd *ubd_dev = disk->private_data;
1299+
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
1300+
if (!io_req)
1301+
return -ENOMEM;
13391302

13401303
io_req->req = req;
1341-
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
1342-
ubd_dev->fd;
1343-
io_req->op = UBD_FLUSH;
1344-
}
1345-
1346-
static void submit_request(struct io_thread_req *io_req, struct ubd *dev)
1347-
{
1348-
int n = os_write_file(thread_fd, &io_req,
1349-
sizeof(io_req));
1304+
if (dev->cow.file)
1305+
io_req->fds[0] = dev->cow.fd;
1306+
else
1307+
io_req->fds[0] = dev->fd;
13501308

1351-
if (n != sizeof(io_req)) {
1352-
if (n != -EAGAIN)
1353-
pr_err("write to io thread failed: %d\n", -n);
1309+
if (req_op(req) == REQ_OP_FLUSH) {
1310+
io_req->op = UBD_FLUSH;
1311+
} else {
1312+
io_req->fds[1] = dev->fd;
1313+
io_req->cow_offset = -1;
1314+
io_req->offset = off;
1315+
io_req->length = bvec->bv_len;
1316+
io_req->error = 0;
1317+
io_req->sector_mask = 0;
1318+
1319+
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
1320+
io_req->offsets[0] = 0;
1321+
io_req->offsets[1] = dev->cow.data_offset;
1322+
io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
1323+
io_req->sectorsize = 1 << 9;
1324+
1325+
if (dev->cow.file) {
1326+
cowify_req(io_req, dev->cow.bitmap,
1327+
dev->cow.bitmap_offset, dev->cow.bitmap_len);
1328+
}
1329+
}
13541330

1355-
blk_mq_requeue_request(io_req->req, true);
1331+
ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
1332+
if (ret != sizeof(io_req)) {
1333+
if (ret != -EAGAIN)
1334+
pr_err("write to io thread failed: %d\n", -ret);
13561335
kfree(io_req);
13571336
}
1337+
1338+
return ret;
13581339
}
13591340

13601341
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
13611342
const struct blk_mq_queue_data *bd)
13621343
{
13631344
struct request *req = bd->rq;
1364-
struct ubd *dev = hctx->queue->queuedata;
1365-
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
1366-
struct io_thread_req *io_req;
1345+
int ret = 0;
13671346

13681347
blk_mq_start_request(req);
13691348

1370-
pdu->rq_pos = blk_rq_pos(req);
1371-
pdu->start_sg = 0;
1372-
pdu->end_sg = blk_rq_map_sg(req->q, req, pdu->sg);
1373-
13741349
if (req_op(req) == REQ_OP_FLUSH) {
1375-
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
1376-
if (io_req == NULL) {
1377-
blk_mq_requeue_request(req, true);
1378-
goto done;
1350+
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
1351+
} else {
1352+
struct req_iterator iter;
1353+
struct bio_vec bvec;
1354+
u64 off = (u64)blk_rq_pos(req) << 9;
1355+
1356+
rq_for_each_segment(bvec, req, iter) {
1357+
ret = ubd_queue_one_vec(hctx, req, off, &bvec);
1358+
if (ret < 0)
1359+
goto out;
1360+
off += bvec.bv_len;
13791361
}
1380-
prepare_flush_request(req, io_req);
1381-
submit_request(io_req, dev);
1382-
1383-
goto done;
13841362
}
1385-
1386-
while (pdu->start_sg < pdu->end_sg) {
1387-
struct scatterlist *sg = &pdu->sg[pdu->start_sg];
1388-
1389-
io_req = kmalloc(sizeof(struct io_thread_req),
1390-
GFP_ATOMIC);
1391-
if (io_req == NULL) {
1392-
blk_mq_requeue_request(req, true);
1393-
goto done;
1394-
}
1395-
prepare_request(req, io_req,
1396-
(unsigned long long)pdu->rq_pos << 9,
1397-
sg->offset, sg->length, sg_page(sg));
1398-
1399-
submit_request(io_req, dev);
1400-
1401-
pdu->rq_pos += sg->length >> 9;
1402-
pdu->start_sg++;
1363+
out:
1364+
if (ret < 0) {
1365+
blk_mq_requeue_request(req, true);
14031366
}
1404-
1405-
done:
14061367
return BLK_STS_OK;
14071368
}
14081369

1409-
static int ubd_init_request(struct blk_mq_tag_set *set,
1410-
struct request *req, unsigned int hctx_idx,
1411-
unsigned int numa_node)
1412-
{
1413-
struct ubd_pdu *pdu = blk_mq_rq_to_pdu(req);
1414-
1415-
sg_init_table(pdu->sg, MAX_SG);
1416-
1417-
return 0;
1418-
}
1419-
14201370
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
14211371
{
14221372
struct ubd *ubd_dev = bdev->bd_disk->private_data;

0 commit comments

Comments
 (0)