Skip to content

Commit eb8db83

Browse files
Christoph Hellwigaxboe
authored andcommitted
dm: always defer request allocation to the owner of the request_queue
DM already calls blk_mq_alloc_request on the request_queue of the underlying device if it is a blk-mq device. But now that we allow drivers to allocate additional data and initialize it ahead of time we need to do the same for all drivers. Doing so and using the new cmd_size infrastructure in the block layer greatly simplifies the dm-rq and mpath code, and should also make arbitrary combinations of SQ and MQ devices with SQ or MQ device mapper tables easily possible as a further step. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
1 parent 4bf5843 commit eb8db83

File tree

8 files changed

+85
-344
lines changed

8 files changed

+85
-344
lines changed

drivers/md/dm-core.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,6 @@ struct mapped_device {
9292
* io objects are allocated from here.
9393
*/
9494
mempool_t *io_pool;
95-
mempool_t *rq_pool;
9695

9796
struct bio_set *bs;
9897

drivers/md/dm-mpath.c

Lines changed: 20 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -92,12 +92,6 @@ struct multipath {
9292

9393
unsigned queue_mode;
9494

95-
/*
96-
* We must use a mempool of dm_mpath_io structs so that we
97-
* can resubmit bios on error.
98-
*/
99-
mempool_t *mpio_pool;
100-
10195
struct mutex work_mutex;
10296
struct work_struct trigger_event;
10397

@@ -115,8 +109,6 @@ struct dm_mpath_io {
115109

116110
typedef int (*action_fn) (struct pgpath *pgpath);
117111

118-
static struct kmem_cache *_mpio_cache;
119-
120112
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121113
static void trigger_event(struct work_struct *work);
122114
static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
209201
init_waitqueue_head(&m->pg_init_wait);
210202
mutex_init(&m->work_mutex);
211203

212-
m->mpio_pool = NULL;
213204
m->queue_mode = DM_TYPE_NONE;
214205

215206
m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
229220
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230221
else
231222
m->queue_mode = DM_TYPE_REQUEST_BASED;
232-
}
233-
234-
if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235-
unsigned min_ios = dm_get_reserved_rq_based_ios();
236-
237-
m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238-
if (!m->mpio_pool)
239-
return -ENOMEM;
240-
}
241-
else if (m->queue_mode == DM_TYPE_BIO_BASED) {
223+
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242224
INIT_WORK(&m->process_queued_bios, process_queued_bios);
243225
/*
244226
* bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
263245

264246
kfree(m->hw_handler_name);
265247
kfree(m->hw_handler_params);
266-
mempool_destroy(m->mpio_pool);
267248
kfree(m);
268249
}
269250

@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
272253
return info->ptr;
273254
}
274255

275-
static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
276-
{
277-
struct dm_mpath_io *mpio;
278-
279-
if (!m->mpio_pool) {
280-
/* Use blk-mq pdu memory requested via per_io_data_size */
281-
mpio = get_mpio(info);
282-
memset(mpio, 0, sizeof(*mpio));
283-
return mpio;
284-
}
285-
286-
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287-
if (!mpio)
288-
return NULL;
289-
290-
memset(mpio, 0, sizeof(*mpio));
291-
info->ptr = mpio;
292-
293-
return mpio;
294-
}
295-
296-
static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
297-
{
298-
/* Only needed for non blk-mq (.request_fn) multipath */
299-
if (m->mpio_pool) {
300-
struct dm_mpath_io *mpio = info->ptr;
301-
302-
info->ptr = NULL;
303-
mempool_free(mpio, m->mpio_pool);
304-
}
305-
}
306-
307256
static size_t multipath_per_bio_data_size(void)
308257
{
309258
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
530479
/*
531480
* Map cloned requests (request-based multipath)
532481
*/
533-
static int __multipath_map(struct dm_target *ti, struct request *clone,
534-
union map_info *map_context,
535-
struct request *rq, struct request **__clone)
482+
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
483+
union map_info *map_context,
484+
struct request **__clone)
536485
{
537486
struct multipath *m = ti->private;
538487
int r = DM_MAPIO_REQUEUE;
539-
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
488+
size_t nr_bytes = blk_rq_bytes(rq);
540489
struct pgpath *pgpath;
541490
struct block_device *bdev;
542-
struct dm_mpath_io *mpio;
491+
struct dm_mpath_io *mpio = get_mpio(map_context);
492+
struct request *clone;
543493

544494
/* Do we need to select a new pgpath? */
545495
pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
556506
return r;
557507
}
558508

559-
mpio = set_mpio(m, map_context);
560-
if (!mpio)
561-
/* ENOMEM, requeue */
562-
return r;
563-
509+
memset(mpio, 0, sizeof(*mpio));
564510
mpio->pgpath = pgpath;
565511
mpio->nr_bytes = nr_bytes;
566512

567513
bdev = pgpath->path.dev->bdev;
568514

569-
if (clone) {
570-
/*
571-
* Old request-based interface: allocated clone is passed in.
572-
* Used by: .request_fn stacked on .request_fn path(s).
573-
*/
574-
clone->q = bdev_get_queue(bdev);
575-
clone->rq_disk = bdev->bd_disk;
576-
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
577-
} else {
578-
/*
579-
* blk-mq request-based interface; used by both:
580-
* .request_fn stacked on blk-mq path(s) and
581-
* blk-mq stacked on blk-mq path(s).
582-
*/
583-
clone = blk_mq_alloc_request(bdev_get_queue(bdev),
584-
rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
585-
if (IS_ERR(clone)) {
586-
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
587-
clear_request_fn_mpio(m, map_context);
588-
return r;
589-
}
590-
clone->bio = clone->biotail = NULL;
591-
clone->rq_disk = bdev->bd_disk;
592-
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
593-
*__clone = clone;
515+
clone = blk_get_request(bdev_get_queue(bdev),
516+
rq->cmd_flags | REQ_NOMERGE,
517+
GFP_ATOMIC);
518+
if (IS_ERR(clone)) {
519+
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
520+
return r;
594521
}
522+
clone->bio = clone->biotail = NULL;
523+
clone->rq_disk = bdev->bd_disk;
524+
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
525+
*__clone = clone;
595526

596527
if (pgpath->pg->ps.type->start_io)
597528
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
600531
return DM_MAPIO_REMAPPED;
601532
}
602533

603-
static int multipath_map(struct dm_target *ti, struct request *clone,
604-
union map_info *map_context)
605-
{
606-
return __multipath_map(ti, clone, map_context, NULL, NULL);
607-
}
608-
609-
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
610-
union map_info *map_context,
611-
struct request **clone)
612-
{
613-
return __multipath_map(ti, NULL, map_context, rq, clone);
614-
}
615-
616534
static void multipath_release_clone(struct request *clone)
617535
{
618-
blk_mq_free_request(clone);
536+
blk_put_request(clone);
619537
}
620538

621539
/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
11871105
ti->num_write_same_bios = 1;
11881106
if (m->queue_mode == DM_TYPE_BIO_BASED)
11891107
ti->per_io_data_size = multipath_per_bio_data_size();
1190-
else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
1108+
else
11911109
ti->per_io_data_size = sizeof(struct dm_mpath_io);
11921110

11931111
return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
16101528
if (ps->type->end_io)
16111529
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
16121530
}
1613-
clear_request_fn_mpio(m, map_context);
16141531

16151532
return r;
16161533
}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
20601977
.module = THIS_MODULE,
20611978
.ctr = multipath_ctr,
20621979
.dtr = multipath_dtr,
2063-
.map_rq = multipath_map,
20641980
.clone_and_map_rq = multipath_clone_and_map,
20651981
.release_clone_rq = multipath_release_clone,
20661982
.rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
20801996
{
20811997
int r;
20821998

2083-
/* allocate a slab for the dm_mpath_ios */
2084-
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2085-
if (!_mpio_cache)
2086-
return -ENOMEM;
2087-
20881999
r = dm_register_target(&multipath_target);
20892000
if (r < 0) {
20902001
DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ static int __init dm_multipath_init(void)
21202031
bad_alloc_kmultipathd:
21212032
dm_unregister_target(&multipath_target);
21222033
bad_register_target:
2123-
kmem_cache_destroy(_mpio_cache);
2124-
21252034
return r;
21262035
}
21272036

@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
21312040
destroy_workqueue(kmultipathd);
21322041

21332042
dm_unregister_target(&multipath_target);
2134-
kmem_cache_destroy(_mpio_cache);
21352043
}
21362044

21372045
module_init(dm_multipath_init);

0 commit comments

Comments
 (0)