Skip to content

Commit e01ad46

Browse files
Jianchao Wangaxboe
authored andcommitted
blk-mq: fallback to previous nr_hw_queues when updating fails
When we try to increate the nr_hw_queues, we may fail due to shortage of memory or other reason, then blk_mq_realloc_hw_ctxs stops and some entries in q->queue_hw_ctx are left with NULL. However, because queue map has been updated with new nr_hw_queues, some cpus have been mapped to hw queue which just encounters allocation failure, thus blk_mq_map_queue could return NULL. This will cause panic in following blk_mq_map_swqueue. To fix it, when increase nr_hw_queues fails, fallback to previous nr_hw_queues and post warning. At the same time, driver's .map_queues usually use completion irq affinity to map hw and cpu, fallback nr_hw_queues will cause lack of some cpu's map to hw, so use default blk_mq_map_queues to do that. Reported-by: syzbot+83e8cbe702263932d9d4@syzkaller.appspotmail.com Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 34d11ff commit e01ad46

File tree

1 file changed

+24
-3
lines changed

1 file changed

+24
-3
lines changed

block/blk-mq.c

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2557,7 +2557,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
25572557
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
25582558
struct request_queue *q)
25592559
{
2560-
int i, j;
2560+
int i, j, end;
25612561
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
25622562

25632563
/* protect against switching io scheduler */
@@ -2591,8 +2591,20 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
25912591
break;
25922592
}
25932593
}
2594+
/*
2595+
* Increasing nr_hw_queues fails. Free the newly allocated
2596+
* hctxs and keep the previous q->nr_hw_queues.
2597+
*/
2598+
if (i != set->nr_hw_queues) {
2599+
j = q->nr_hw_queues;
2600+
end = i;
2601+
} else {
2602+
j = i;
2603+
end = q->nr_hw_queues;
2604+
q->nr_hw_queues = set->nr_hw_queues;
2605+
}
25942606

2595-
for (j = i; j < q->nr_hw_queues; j++) {
2607+
for (; j < end; j++) {
25962608
struct blk_mq_hw_ctx *hctx = hctxs[j];
25972609

25982610
if (hctx) {
@@ -2604,7 +2616,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
26042616

26052617
}
26062618
}
2607-
q->nr_hw_queues = i;
26082619
mutex_unlock(&q->sysfs_lock);
26092620
}
26102621

@@ -2989,6 +3000,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
29893000
{
29903001
struct request_queue *q;
29913002
LIST_HEAD(head);
3003+
int prev_nr_hw_queues;
29923004

29933005
lockdep_assert_held(&set->tag_list_lock);
29943006

@@ -3017,10 +3029,19 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
30173029
blk_mq_sysfs_unregister(q);
30183030
}
30193031

3032+
prev_nr_hw_queues = set->nr_hw_queues;
30203033
set->nr_hw_queues = nr_hw_queues;
30213034
blk_mq_update_queue_map(set);
3035+
fallback:
30223036
list_for_each_entry(q, &set->tag_list, tag_set_list) {
30233037
blk_mq_realloc_hw_ctxs(set, q);
3038+
if (q->nr_hw_queues != set->nr_hw_queues) {
3039+
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3040+
nr_hw_queues, prev_nr_hw_queues);
3041+
set->nr_hw_queues = prev_nr_hw_queues;
3042+
blk_mq_map_queues(set);
3043+
goto fallback;
3044+
}
30243045
blk_mq_map_swqueue(q);
30253046
}
30263047

0 commit comments

Comments
 (0)