@@ -2154,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
2154
2154
struct blk_mq_tag_set * set ,
2155
2155
struct blk_mq_hw_ctx * hctx , unsigned int hctx_idx )
2156
2156
{
2157
- blk_mq_debugfs_unregister_hctx (hctx );
2158
-
2159
2157
if (blk_mq_hw_queue_mapped (hctx ))
2160
2158
blk_mq_tag_idle (hctx );
2161
2159
@@ -2182,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
2182
2180
queue_for_each_hw_ctx (q , hctx , i ) {
2183
2181
if (i == nr_queue )
2184
2182
break ;
2183
+ blk_mq_debugfs_unregister_hctx (hctx );
2185
2184
blk_mq_exit_hctx (q , set , hctx , i );
2186
2185
}
2187
2186
}
@@ -2239,8 +2238,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
2239
2238
if (hctx -> flags & BLK_MQ_F_BLOCKING )
2240
2239
init_srcu_struct (hctx -> srcu );
2241
2240
2242
- blk_mq_debugfs_register_hctx (q , hctx );
2243
-
2244
2241
return 0 ;
2245
2242
2246
2243
free_fq :
@@ -2529,8 +2526,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2529
2526
int i , j ;
2530
2527
struct blk_mq_hw_ctx * * hctxs = q -> queue_hw_ctx ;
2531
2528
2532
- blk_mq_sysfs_unregister (q );
2533
-
2534
2529
/* protect against switching io scheduler */
2535
2530
mutex_lock (& q -> sysfs_lock );
2536
2531
for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
@@ -2578,7 +2573,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2578
2573
}
2579
2574
q -> nr_hw_queues = i ;
2580
2575
mutex_unlock (& q -> sysfs_lock );
2581
- blk_mq_sysfs_register (q );
2582
2576
}
2583
2577
2584
2578
struct request_queue * blk_mq_init_allocated_queue (struct blk_mq_tag_set * set ,
@@ -2676,25 +2670,6 @@ void blk_mq_free_queue(struct request_queue *q)
2676
2670
blk_mq_exit_hw_queues (q , set , set -> nr_hw_queues );
2677
2671
}
2678
2672
2679
- /* Basically redo blk_mq_init_queue with queue frozen */
2680
- static void blk_mq_queue_reinit (struct request_queue * q )
2681
- {
2682
- WARN_ON_ONCE (!atomic_read (& q -> mq_freeze_depth ));
2683
-
2684
- blk_mq_debugfs_unregister_hctxs (q );
2685
- blk_mq_sysfs_unregister (q );
2686
-
2687
- /*
2688
- * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2689
- * we should change hctx numa_node according to the new topology (this
2690
- * involves freeing and re-allocating memory, worth doing?)
2691
- */
2692
- blk_mq_map_swqueue (q );
2693
-
2694
- blk_mq_sysfs_register (q );
2695
- blk_mq_debugfs_register_hctxs (q );
2696
- }
2697
-
2698
2673
static int __blk_mq_alloc_rq_maps (struct blk_mq_tag_set * set )
2699
2674
{
2700
2675
int i ;
@@ -3004,11 +2979,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3004
2979
if (!blk_mq_elv_switch_none (& head , q ))
3005
2980
goto switch_back ;
3006
2981
2982
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
2983
+ blk_mq_debugfs_unregister_hctxs (q );
2984
+ blk_mq_sysfs_unregister (q );
2985
+ }
2986
+
3007
2987
set -> nr_hw_queues = nr_hw_queues ;
3008
2988
blk_mq_update_queue_map (set );
3009
2989
list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
3010
2990
blk_mq_realloc_hw_ctxs (set , q );
3011
- blk_mq_queue_reinit (q );
2991
+ blk_mq_map_swqueue (q );
2992
+ }
2993
+
2994
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
2995
+ blk_mq_sysfs_register (q );
2996
+ blk_mq_debugfs_register_hctxs (q );
3012
2997
}
3013
2998
3014
2999
switch_back :
0 commit comments