37
37
#include "blk-wbt.h"
38
38
#include "blk-mq-sched.h"
39
39
40
- static DEFINE_MUTEX (all_q_mutex );
41
- static LIST_HEAD (all_q_list );
42
-
43
40
static void blk_mq_poll_stats_start (struct request_queue * q );
44
41
static void blk_mq_poll_stats_fn (struct blk_stat_callback * cb );
45
42
static void __blk_mq_stop_hw_queues (struct request_queue * q , bool sync );
@@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
1975
1972
INIT_LIST_HEAD (& __ctx -> rq_list );
1976
1973
__ctx -> queue = q ;
1977
1974
1978
- /* If the cpu isn't online , the cpu is mapped to first hctx */
1979
- if (!cpu_online (i ))
1975
+ /* If the cpu isn't present , the cpu is mapped to first hctx */
1976
+ if (!cpu_present (i ))
1980
1977
continue ;
1981
1978
1982
1979
hctx = blk_mq_map_queue (q , i );
@@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2019
2016
}
2020
2017
}
2021
2018
2022
- static void blk_mq_map_swqueue (struct request_queue * q ,
2023
- const struct cpumask * online_mask )
2019
+ static void blk_mq_map_swqueue (struct request_queue * q )
2024
2020
{
2025
2021
unsigned int i , hctx_idx ;
2026
2022
struct blk_mq_hw_ctx * hctx ;
@@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
2038
2034
}
2039
2035
2040
2036
/*
2041
- * Map software to hardware queues
2037
+ * Map software to hardware queues.
2038
+ *
2039
+ * If the cpu isn't present, the cpu is mapped to first hctx.
2042
2040
*/
2043
- for_each_possible_cpu (i ) {
2044
- /* If the cpu isn't online, the cpu is mapped to first hctx */
2045
- if (!cpumask_test_cpu (i , online_mask ))
2046
- continue ;
2047
-
2041
+ for_each_present_cpu (i ) {
2048
2042
hctx_idx = q -> mq_map [i ];
2049
2043
/* unmapped hw queue can be remapped after CPU topo changed */
2050
2044
if (!set -> tags [hctx_idx ] &&
@@ -2330,16 +2324,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2330
2324
blk_queue_softirq_done (q , set -> ops -> complete );
2331
2325
2332
2326
blk_mq_init_cpu_queues (q , set -> nr_hw_queues );
2333
-
2334
- get_online_cpus ();
2335
- mutex_lock (& all_q_mutex );
2336
-
2337
- list_add_tail (& q -> all_q_node , & all_q_list );
2338
2327
blk_mq_add_queue_tag_set (set , q );
2339
- blk_mq_map_swqueue (q , cpu_online_mask );
2340
-
2341
- mutex_unlock (& all_q_mutex );
2342
- put_online_cpus ();
2328
+ blk_mq_map_swqueue (q );
2343
2329
2344
2330
if (!(set -> flags & BLK_MQ_F_NO_SCHED )) {
2345
2331
int ret ;
@@ -2365,18 +2351,12 @@ void blk_mq_free_queue(struct request_queue *q)
2365
2351
{
2366
2352
struct blk_mq_tag_set * set = q -> tag_set ;
2367
2353
2368
- mutex_lock (& all_q_mutex );
2369
- list_del_init (& q -> all_q_node );
2370
- mutex_unlock (& all_q_mutex );
2371
-
2372
2354
blk_mq_del_queue_tag_set (q );
2373
-
2374
2355
blk_mq_exit_hw_queues (q , set , set -> nr_hw_queues );
2375
2356
}
2376
2357
2377
2358
/* Basically redo blk_mq_init_queue with queue frozen */
2378
- static void blk_mq_queue_reinit (struct request_queue * q ,
2379
- const struct cpumask * online_mask )
2359
+ static void blk_mq_queue_reinit (struct request_queue * q )
2380
2360
{
2381
2361
WARN_ON_ONCE (!atomic_read (& q -> mq_freeze_depth ));
2382
2362
@@ -2389,76 +2369,12 @@ static void blk_mq_queue_reinit(struct request_queue *q,
2389
2369
* involves free and re-allocate memory, worthy doing?)
2390
2370
*/
2391
2371
2392
- blk_mq_map_swqueue (q , online_mask );
2372
+ blk_mq_map_swqueue (q );
2393
2373
2394
2374
blk_mq_sysfs_register (q );
2395
2375
blk_mq_debugfs_register_hctxs (q );
2396
2376
}
2397
2377
2398
- /*
2399
- * New online cpumask which is going to be set in this hotplug event.
2400
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
2401
- * one-by-one and dynamically allocating this could result in a failure.
2402
- */
2403
- static struct cpumask cpuhp_online_new ;
2404
-
2405
- static void blk_mq_queue_reinit_work (void )
2406
- {
2407
- struct request_queue * q ;
2408
-
2409
- mutex_lock (& all_q_mutex );
2410
- /*
2411
- * We need to freeze and reinit all existing queues. Freezing
2412
- * involves synchronous wait for an RCU grace period and doing it
2413
- * one by one may take a long time. Start freezing all queues in
2414
- * one swoop and then wait for the completions so that freezing can
2415
- * take place in parallel.
2416
- */
2417
- list_for_each_entry (q , & all_q_list , all_q_node )
2418
- blk_freeze_queue_start (q );
2419
- list_for_each_entry (q , & all_q_list , all_q_node )
2420
- blk_mq_freeze_queue_wait (q );
2421
-
2422
- list_for_each_entry (q , & all_q_list , all_q_node )
2423
- blk_mq_queue_reinit (q , & cpuhp_online_new );
2424
-
2425
- list_for_each_entry (q , & all_q_list , all_q_node )
2426
- blk_mq_unfreeze_queue (q );
2427
-
2428
- mutex_unlock (& all_q_mutex );
2429
- }
2430
-
2431
- static int blk_mq_queue_reinit_dead (unsigned int cpu )
2432
- {
2433
- cpumask_copy (& cpuhp_online_new , cpu_online_mask );
2434
- blk_mq_queue_reinit_work ();
2435
- return 0 ;
2436
- }
2437
-
2438
- /*
2439
- * Before hotadded cpu starts handling requests, new mappings must be
2440
- * established. Otherwise, these requests in hw queue might never be
2441
- * dispatched.
2442
- *
2443
- * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2444
- * for CPU0, and ctx1 for CPU1).
2445
- *
2446
- * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2447
- * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2448
- *
2449
- * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2450
- * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2451
- * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2452
- * ignored.
2453
- */
2454
- static int blk_mq_queue_reinit_prepare (unsigned int cpu )
2455
- {
2456
- cpumask_copy (& cpuhp_online_new , cpu_online_mask );
2457
- cpumask_set_cpu (cpu , & cpuhp_online_new );
2458
- blk_mq_queue_reinit_work ();
2459
- return 0 ;
2460
- }
2461
-
2462
2378
static int __blk_mq_alloc_rq_maps (struct blk_mq_tag_set * set )
2463
2379
{
2464
2380
int i ;
@@ -2669,7 +2585,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2669
2585
blk_mq_update_queue_map (set );
2670
2586
list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
2671
2587
blk_mq_realloc_hw_ctxs (set , q );
2672
- blk_mq_queue_reinit (q , cpu_online_mask );
2588
+ blk_mq_queue_reinit (q );
2673
2589
}
2674
2590
2675
2591
list_for_each_entry (q , & set -> tag_list , tag_set_list )
@@ -2885,24 +2801,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2885
2801
}
2886
2802
EXPORT_SYMBOL_GPL (blk_mq_poll );
2887
2803
2888
- void blk_mq_disable_hotplug (void )
2889
- {
2890
- mutex_lock (& all_q_mutex );
2891
- }
2892
-
2893
- void blk_mq_enable_hotplug (void )
2894
- {
2895
- mutex_unlock (& all_q_mutex );
2896
- }
2897
-
2898
2804
static int __init blk_mq_init (void )
2899
2805
{
2900
2806
cpuhp_setup_state_multi (CPUHP_BLK_MQ_DEAD , "block/mq:dead" , NULL ,
2901
2807
blk_mq_hctx_notify_dead );
2902
-
2903
- cpuhp_setup_state_nocalls (CPUHP_BLK_MQ_PREPARE , "block/mq:prepare" ,
2904
- blk_mq_queue_reinit_prepare ,
2905
- blk_mq_queue_reinit_dead );
2906
2808
return 0 ;
2907
2809
}
2908
2810
subsys_initcall (blk_mq_init );
0 commit comments