Skip to content

Commit 477e19d

Browse files
Jianchao Wangaxboe
authored andcommitted
blk-mq: adjust debugfs and sysfs register when updating nr_hw_queues
blk-mq debugfs and sysfs entries need to be removed before updating queue map, otherwise, we get get wrong result there. This patch fixes it and remove the redundant debugfs and sysfs register/unregister operations during __blk_mq_update_nr_hw_queues. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 2d29c9f commit 477e19d

File tree

1 file changed

+12
-27
lines changed

1 file changed

+12
-27
lines changed

block/blk-mq.c

Lines changed: 12 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2154,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
21542154
struct blk_mq_tag_set *set,
21552155
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
21562156
{
2157-
blk_mq_debugfs_unregister_hctx(hctx);
2158-
21592157
if (blk_mq_hw_queue_mapped(hctx))
21602158
blk_mq_tag_idle(hctx);
21612159

@@ -2182,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
21822180
queue_for_each_hw_ctx(q, hctx, i) {
21832181
if (i == nr_queue)
21842182
break;
2183+
blk_mq_debugfs_unregister_hctx(hctx);
21852184
blk_mq_exit_hctx(q, set, hctx, i);
21862185
}
21872186
}
@@ -2239,8 +2238,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
22392238
if (hctx->flags & BLK_MQ_F_BLOCKING)
22402239
init_srcu_struct(hctx->srcu);
22412240

2242-
blk_mq_debugfs_register_hctx(q, hctx);
2243-
22442241
return 0;
22452242

22462243
free_fq:
@@ -2529,8 +2526,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
25292526
int i, j;
25302527
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
25312528

2532-
blk_mq_sysfs_unregister(q);
2533-
25342529
/* protect against switching io scheduler */
25352530
mutex_lock(&q->sysfs_lock);
25362531
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2578,7 +2573,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
25782573
}
25792574
q->nr_hw_queues = i;
25802575
mutex_unlock(&q->sysfs_lock);
2581-
blk_mq_sysfs_register(q);
25822576
}
25832577

25842578
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
@@ -2676,25 +2670,6 @@ void blk_mq_free_queue(struct request_queue *q)
26762670
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
26772671
}
26782672

2679-
/* Basically redo blk_mq_init_queue with queue frozen */
2680-
static void blk_mq_queue_reinit(struct request_queue *q)
2681-
{
2682-
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2683-
2684-
blk_mq_debugfs_unregister_hctxs(q);
2685-
blk_mq_sysfs_unregister(q);
2686-
2687-
/*
2688-
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2689-
* we should change hctx numa_node according to the new topology (this
2690-
* involves freeing and re-allocating memory, worth doing?)
2691-
*/
2692-
blk_mq_map_swqueue(q);
2693-
2694-
blk_mq_sysfs_register(q);
2695-
blk_mq_debugfs_register_hctxs(q);
2696-
}
2697-
26982673
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
26992674
{
27002675
int i;
@@ -3004,11 +2979,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
30042979
if (!blk_mq_elv_switch_none(&head, q))
30052980
goto switch_back;
30062981

2982+
list_for_each_entry(q, &set->tag_list, tag_set_list) {
2983+
blk_mq_debugfs_unregister_hctxs(q);
2984+
blk_mq_sysfs_unregister(q);
2985+
}
2986+
30072987
set->nr_hw_queues = nr_hw_queues;
30082988
blk_mq_update_queue_map(set);
30092989
list_for_each_entry(q, &set->tag_list, tag_set_list) {
30102990
blk_mq_realloc_hw_ctxs(set, q);
3011-
blk_mq_queue_reinit(q);
2991+
blk_mq_map_swqueue(q);
2992+
}
2993+
2994+
list_for_each_entry(q, &set->tag_list, tag_set_list) {
2995+
blk_mq_sysfs_register(q);
2996+
blk_mq_debugfs_register_hctxs(q);
30122997
}
30132998

30142999
switch_back:

0 commit comments

Comments
 (0)