Skip to content

Commit 476f8c9

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: avoid to write intermediate result to hctx->next_cpu
This patch figures out the final selected CPU, then writes it to hctx->next_cpu once, then we can avoid to intermediate next cpu observed from other dispatch paths. Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent bffa990 commit 476f8c9

File tree

1 file changed

+9
-8
lines changed

1 file changed

+9
-8
lines changed

block/blk-mq.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1344,34 +1344,32 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
13441344
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
13451345
{
13461346
bool tried = false;
1347+
int next_cpu = hctx->next_cpu;
13471348

13481349
if (hctx->queue->nr_hw_queues == 1)
13491350
return WORK_CPU_UNBOUND;
13501351

13511352
if (--hctx->next_cpu_batch <= 0) {
1352-
int next_cpu;
13531353
select_cpu:
1354-
next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
1354+
next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
13551355
cpu_online_mask);
13561356
if (next_cpu >= nr_cpu_ids)
1357-
next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
1357+
next_cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
13581358

13591359
/*
13601360
* No online CPU is found, so have to make sure hctx->next_cpu
13611361
* is set correctly for not breaking workqueue.
13621362
*/
13631363
if (next_cpu >= nr_cpu_ids)
1364-
hctx->next_cpu = cpumask_first(hctx->cpumask);
1365-
else
1366-
hctx->next_cpu = next_cpu;
1364+
next_cpu = cpumask_first(hctx->cpumask);
13671365
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
13681366
}
13691367

13701368
/*
13711369
* Do unbound schedule if we can't find a online CPU for this hctx,
13721370
* and it should only happen in the path of handling CPU DEAD.
13731371
*/
1374-
if (!cpu_online(hctx->next_cpu)) {
1372+
if (!cpu_online(next_cpu)) {
13751373
if (!tried) {
13761374
tried = true;
13771375
goto select_cpu;
@@ -1381,10 +1379,13 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
13811379
* Make sure to re-select CPU next time once after CPUs
13821380
* in hctx->cpumask become online again.
13831381
*/
1382+
hctx->next_cpu = next_cpu;
13841383
hctx->next_cpu_batch = 1;
13851384
return WORK_CPU_UNBOUND;
13861385
}
1387-
return hctx->next_cpu;
1386+
1387+
hctx->next_cpu = next_cpu;
1388+
return next_cpu;
13881389
}
13891390

13901391
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,

0 commit comments

Comments
 (0)