@@ -35,7 +35,8 @@ struct nullb {
35
35
struct request_queue * q ;
36
36
struct gendisk * disk ;
37
37
struct nvm_dev * ndev ;
38
- struct blk_mq_tag_set tag_set ;
38
+ struct blk_mq_tag_set * tag_set ;
39
+ struct blk_mq_tag_set __tag_set ;
39
40
struct hrtimer timer ;
40
41
unsigned int queue_depth ;
41
42
spinlock_t lock ;
@@ -50,6 +51,7 @@ static struct mutex lock;
50
51
static int null_major ;
51
52
static int nullb_indexes ;
52
53
static struct kmem_cache * ppa_cache ;
54
+ static struct blk_mq_tag_set tag_set ;
53
55
54
56
enum {
55
57
NULL_IRQ_NONE = 0 ,
@@ -109,7 +111,7 @@ static int bs = 512;
109
111
module_param (bs , int , S_IRUGO );
110
112
MODULE_PARM_DESC (bs , "Block size (in bytes)" );
111
113
112
- static int nr_devices = 2 ;
114
+ static int nr_devices = 1 ;
113
115
module_param (nr_devices , int , S_IRUGO );
114
116
MODULE_PARM_DESC (nr_devices , "Number of devices to register" );
115
117
@@ -121,6 +123,10 @@ static bool blocking;
121
123
module_param (blocking , bool , S_IRUGO );
122
124
MODULE_PARM_DESC (blocking , "Register as a blocking blk-mq driver device" );
123
125
126
+ static bool shared_tags ;
127
+ module_param (shared_tags , bool , S_IRUGO );
128
+ MODULE_PARM_DESC (shared_tags , "Share tag set between devices for blk-mq" );
129
+
124
130
static int irqmode = NULL_IRQ_SOFTIRQ ;
125
131
126
132
static int null_set_irqmode (const char * str , const struct kernel_param * kp )
@@ -376,31 +382,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
376
382
return BLK_STS_OK ;
377
383
}
378
384
379
- static void null_init_queue (struct nullb * nullb , struct nullb_queue * nq )
380
- {
381
- BUG_ON (!nullb );
382
- BUG_ON (!nq );
383
-
384
- init_waitqueue_head (& nq -> wait );
385
- nq -> queue_depth = nullb -> queue_depth ;
386
- }
387
-
388
- static int null_init_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
389
- unsigned int index )
390
- {
391
- struct nullb * nullb = data ;
392
- struct nullb_queue * nq = & nullb -> queues [index ];
393
-
394
- hctx -> driver_data = nq ;
395
- null_init_queue (nullb , nq );
396
- nullb -> nr_queues ++ ;
397
-
398
- return 0 ;
399
- }
400
-
401
385
static const struct blk_mq_ops null_mq_ops = {
402
386
.queue_rq = null_queue_rq ,
403
- .init_hctx = null_init_hctx ,
404
387
.complete = null_softirq_done_fn ,
405
388
};
406
389
@@ -592,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
592
575
else
593
576
del_gendisk (nullb -> disk );
594
577
blk_cleanup_queue (nullb -> q );
595
- if (queue_mode == NULL_Q_MQ )
596
- blk_mq_free_tag_set (& nullb -> tag_set );
578
+ if (queue_mode == NULL_Q_MQ && nullb -> tag_set == & nullb -> __tag_set )
579
+ blk_mq_free_tag_set (nullb -> tag_set );
597
580
if (!use_lightnvm )
598
581
put_disk (nullb -> disk );
599
582
cleanup_queues (nullb );
@@ -615,6 +598,32 @@ static const struct block_device_operations null_fops = {
615
598
.release = null_release ,
616
599
};
617
600
601
+ static void null_init_queue (struct nullb * nullb , struct nullb_queue * nq )
602
+ {
603
+ BUG_ON (!nullb );
604
+ BUG_ON (!nq );
605
+
606
+ init_waitqueue_head (& nq -> wait );
607
+ nq -> queue_depth = nullb -> queue_depth ;
608
+ }
609
+
610
+ static void null_init_queues (struct nullb * nullb )
611
+ {
612
+ struct request_queue * q = nullb -> q ;
613
+ struct blk_mq_hw_ctx * hctx ;
614
+ struct nullb_queue * nq ;
615
+ int i ;
616
+
617
+ queue_for_each_hw_ctx (q , hctx , i ) {
618
+ if (!hctx -> nr_ctx || !hctx -> tags )
619
+ continue ;
620
+ nq = & nullb -> queues [i ];
621
+ hctx -> driver_data = nq ;
622
+ null_init_queue (nullb , nq );
623
+ nullb -> nr_queues ++ ;
624
+ }
625
+ }
626
+
618
627
static int setup_commands (struct nullb_queue * nq )
619
628
{
620
629
struct nullb_cmd * cmd ;
@@ -695,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
695
704
return 0 ;
696
705
}
697
706
707
+ static int null_init_tag_set (struct blk_mq_tag_set * set )
708
+ {
709
+ set -> ops = & null_mq_ops ;
710
+ set -> nr_hw_queues = submit_queues ;
711
+ set -> queue_depth = hw_queue_depth ;
712
+ set -> numa_node = home_node ;
713
+ set -> cmd_size = sizeof (struct nullb_cmd );
714
+ set -> flags = BLK_MQ_F_SHOULD_MERGE ;
715
+ set -> driver_data = NULL ;
716
+
717
+ if (blocking )
718
+ set -> flags |= BLK_MQ_F_BLOCKING ;
719
+
720
+ return blk_mq_alloc_tag_set (set );
721
+ }
722
+
698
723
static int null_add_dev (void )
699
724
{
700
725
struct nullb * nullb ;
@@ -716,26 +741,23 @@ static int null_add_dev(void)
716
741
goto out_free_nullb ;
717
742
718
743
if (queue_mode == NULL_Q_MQ ) {
719
- nullb -> tag_set .ops = & null_mq_ops ;
720
- nullb -> tag_set .nr_hw_queues = submit_queues ;
721
- nullb -> tag_set .queue_depth = hw_queue_depth ;
722
- nullb -> tag_set .numa_node = home_node ;
723
- nullb -> tag_set .cmd_size = sizeof (struct nullb_cmd );
724
- nullb -> tag_set .flags = BLK_MQ_F_SHOULD_MERGE ;
725
- nullb -> tag_set .driver_data = nullb ;
726
-
727
- if (blocking )
728
- nullb -> tag_set .flags |= BLK_MQ_F_BLOCKING ;
729
-
730
- rv = blk_mq_alloc_tag_set (& nullb -> tag_set );
744
+ if (shared_tags ) {
745
+ nullb -> tag_set = & tag_set ;
746
+ rv = 0 ;
747
+ } else {
748
+ nullb -> tag_set = & nullb -> __tag_set ;
749
+ rv = null_init_tag_set (nullb -> tag_set );
750
+ }
751
+
731
752
if (rv )
732
753
goto out_cleanup_queues ;
733
754
734
- nullb -> q = blk_mq_init_queue (& nullb -> tag_set );
755
+ nullb -> q = blk_mq_init_queue (nullb -> tag_set );
735
756
if (IS_ERR (nullb -> q )) {
736
757
rv = - ENOMEM ;
737
758
goto out_cleanup_tags ;
738
759
}
760
+ null_init_queues (nullb );
739
761
} else if (queue_mode == NULL_Q_BIO ) {
740
762
nullb -> q = blk_alloc_queue_node (GFP_KERNEL , home_node );
741
763
if (!nullb -> q ) {
@@ -788,8 +810,8 @@ static int null_add_dev(void)
788
810
out_cleanup_blk_queue :
789
811
blk_cleanup_queue (nullb -> q );
790
812
out_cleanup_tags :
791
- if (queue_mode == NULL_Q_MQ )
792
- blk_mq_free_tag_set (& nullb -> tag_set );
813
+ if (queue_mode == NULL_Q_MQ && nullb -> tag_set == & nullb -> __tag_set )
814
+ blk_mq_free_tag_set (nullb -> tag_set );
793
815
out_cleanup_queues :
794
816
cleanup_queues (nullb );
795
817
out_free_nullb :
@@ -822,6 +844,9 @@ static int __init null_init(void)
822
844
queue_mode = NULL_Q_MQ ;
823
845
}
824
846
847
+ if (queue_mode == NULL_Q_MQ && shared_tags )
848
+ null_init_tag_set (& tag_set );
849
+
825
850
if (queue_mode == NULL_Q_MQ && use_per_node_hctx ) {
826
851
if (submit_queues < nr_online_nodes ) {
827
852
pr_warn ("null_blk: submit_queues param is set to %u." ,
@@ -882,6 +907,9 @@ static void __exit null_exit(void)
882
907
}
883
908
mutex_unlock (& lock );
884
909
910
+ if (queue_mode == NULL_Q_MQ && shared_tags )
911
+ blk_mq_free_tag_set (& tag_set );
912
+
885
913
kmem_cache_destroy (ppa_cache );
886
914
}
887
915
0 commit comments