Skip to content

Commit 82f402f

Browse files
committed
null_blk: add support for shared tags
Some storage drivers need to share tag sets between devices. It's useful to be able to model that with null_blk, to find hangs or performance issues. Add a 'shared_tags' bool module parameter that. If that is set to true and nr_devices is bigger than 1, all devices allocated will share the same tag set. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent edf064e commit 82f402f

File tree

1 file changed

+70
-42
lines changed

1 file changed

+70
-42
lines changed

drivers/block/null_blk.c

Lines changed: 70 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,8 @@ struct nullb {
3535
struct request_queue *q;
3636
struct gendisk *disk;
3737
struct nvm_dev *ndev;
38-
struct blk_mq_tag_set tag_set;
38+
struct blk_mq_tag_set *tag_set;
39+
struct blk_mq_tag_set __tag_set;
3940
struct hrtimer timer;
4041
unsigned int queue_depth;
4142
spinlock_t lock;
@@ -50,6 +51,7 @@ static struct mutex lock;
5051
static int null_major;
5152
static int nullb_indexes;
5253
static struct kmem_cache *ppa_cache;
54+
static struct blk_mq_tag_set tag_set;
5355

5456
enum {
5557
NULL_IRQ_NONE = 0,
@@ -109,7 +111,7 @@ static int bs = 512;
109111
module_param(bs, int, S_IRUGO);
110112
MODULE_PARM_DESC(bs, "Block size (in bytes)");
111113

112-
static int nr_devices = 2;
114+
static int nr_devices = 1;
113115
module_param(nr_devices, int, S_IRUGO);
114116
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
115117

@@ -121,6 +123,10 @@ static bool blocking;
121123
module_param(blocking, bool, S_IRUGO);
122124
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
123125

126+
static bool shared_tags;
127+
module_param(shared_tags, bool, S_IRUGO);
128+
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
129+
124130
static int irqmode = NULL_IRQ_SOFTIRQ;
125131

126132
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -376,31 +382,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
376382
return BLK_STS_OK;
377383
}
378384

379-
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
380-
{
381-
BUG_ON(!nullb);
382-
BUG_ON(!nq);
383-
384-
init_waitqueue_head(&nq->wait);
385-
nq->queue_depth = nullb->queue_depth;
386-
}
387-
388-
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
389-
unsigned int index)
390-
{
391-
struct nullb *nullb = data;
392-
struct nullb_queue *nq = &nullb->queues[index];
393-
394-
hctx->driver_data = nq;
395-
null_init_queue(nullb, nq);
396-
nullb->nr_queues++;
397-
398-
return 0;
399-
}
400-
401385
static const struct blk_mq_ops null_mq_ops = {
402386
.queue_rq = null_queue_rq,
403-
.init_hctx = null_init_hctx,
404387
.complete = null_softirq_done_fn,
405388
};
406389

@@ -592,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
592575
else
593576
del_gendisk(nullb->disk);
594577
blk_cleanup_queue(nullb->q);
595-
if (queue_mode == NULL_Q_MQ)
596-
blk_mq_free_tag_set(&nullb->tag_set);
578+
if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
579+
blk_mq_free_tag_set(nullb->tag_set);
597580
if (!use_lightnvm)
598581
put_disk(nullb->disk);
599582
cleanup_queues(nullb);
@@ -615,6 +598,32 @@ static const struct block_device_operations null_fops = {
615598
.release = null_release,
616599
};
617600

601+
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
602+
{
603+
BUG_ON(!nullb);
604+
BUG_ON(!nq);
605+
606+
init_waitqueue_head(&nq->wait);
607+
nq->queue_depth = nullb->queue_depth;
608+
}
609+
610+
static void null_init_queues(struct nullb *nullb)
611+
{
612+
struct request_queue *q = nullb->q;
613+
struct blk_mq_hw_ctx *hctx;
614+
struct nullb_queue *nq;
615+
int i;
616+
617+
queue_for_each_hw_ctx(q, hctx, i) {
618+
if (!hctx->nr_ctx || !hctx->tags)
619+
continue;
620+
nq = &nullb->queues[i];
621+
hctx->driver_data = nq;
622+
null_init_queue(nullb, nq);
623+
nullb->nr_queues++;
624+
}
625+
}
626+
618627
static int setup_commands(struct nullb_queue *nq)
619628
{
620629
struct nullb_cmd *cmd;
@@ -695,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
695704
return 0;
696705
}
697706

707+
static int null_init_tag_set(struct blk_mq_tag_set *set)
708+
{
709+
set->ops = &null_mq_ops;
710+
set->nr_hw_queues = submit_queues;
711+
set->queue_depth = hw_queue_depth;
712+
set->numa_node = home_node;
713+
set->cmd_size = sizeof(struct nullb_cmd);
714+
set->flags = BLK_MQ_F_SHOULD_MERGE;
715+
set->driver_data = NULL;
716+
717+
if (blocking)
718+
set->flags |= BLK_MQ_F_BLOCKING;
719+
720+
return blk_mq_alloc_tag_set(set);
721+
}
722+
698723
static int null_add_dev(void)
699724
{
700725
struct nullb *nullb;
@@ -716,26 +741,23 @@ static int null_add_dev(void)
716741
goto out_free_nullb;
717742

718743
if (queue_mode == NULL_Q_MQ) {
719-
nullb->tag_set.ops = &null_mq_ops;
720-
nullb->tag_set.nr_hw_queues = submit_queues;
721-
nullb->tag_set.queue_depth = hw_queue_depth;
722-
nullb->tag_set.numa_node = home_node;
723-
nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
724-
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
725-
nullb->tag_set.driver_data = nullb;
726-
727-
if (blocking)
728-
nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
729-
730-
rv = blk_mq_alloc_tag_set(&nullb->tag_set);
744+
if (shared_tags) {
745+
nullb->tag_set = &tag_set;
746+
rv = 0;
747+
} else {
748+
nullb->tag_set = &nullb->__tag_set;
749+
rv = null_init_tag_set(nullb->tag_set);
750+
}
751+
731752
if (rv)
732753
goto out_cleanup_queues;
733754

734-
nullb->q = blk_mq_init_queue(&nullb->tag_set);
755+
nullb->q = blk_mq_init_queue(nullb->tag_set);
735756
if (IS_ERR(nullb->q)) {
736757
rv = -ENOMEM;
737758
goto out_cleanup_tags;
738759
}
760+
null_init_queues(nullb);
739761
} else if (queue_mode == NULL_Q_BIO) {
740762
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
741763
if (!nullb->q) {
@@ -788,8 +810,8 @@ static int null_add_dev(void)
788810
out_cleanup_blk_queue:
789811
blk_cleanup_queue(nullb->q);
790812
out_cleanup_tags:
791-
if (queue_mode == NULL_Q_MQ)
792-
blk_mq_free_tag_set(&nullb->tag_set);
813+
if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
814+
blk_mq_free_tag_set(nullb->tag_set);
793815
out_cleanup_queues:
794816
cleanup_queues(nullb);
795817
out_free_nullb:
@@ -822,6 +844,9 @@ static int __init null_init(void)
822844
queue_mode = NULL_Q_MQ;
823845
}
824846

847+
if (queue_mode == NULL_Q_MQ && shared_tags)
848+
null_init_tag_set(&tag_set);
849+
825850
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
826851
if (submit_queues < nr_online_nodes) {
827852
pr_warn("null_blk: submit_queues param is set to %u.",
@@ -882,6 +907,9 @@ static void __exit null_exit(void)
882907
}
883908
mutex_unlock(&lock);
884909

910+
if (queue_mode == NULL_Q_MQ && shared_tags)
911+
blk_mq_free_tag_set(&tag_set);
912+
885913
kmem_cache_destroy(ppa_cache);
886914
}
887915

0 commit comments

Comments
 (0)