@@ -544,7 +544,10 @@ EXPORT_SYMBOL(blk_mq_abort_requeue_list);
544
544
545
545
struct request * blk_mq_tag_to_rq (struct blk_mq_tags * tags , unsigned int tag )
546
546
{
547
- return tags -> rqs [tag ];
547
+ if (tag < tags -> nr_tags )
548
+ return tags -> rqs [tag ];
549
+
550
+ return NULL ;
548
551
}
549
552
EXPORT_SYMBOL (blk_mq_tag_to_rq );
550
553
@@ -1744,31 +1747,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
1744
1747
return -1 ;
1745
1748
}
1746
1749
1747
- static int blk_mq_init_hw_queues (struct request_queue * q ,
1748
- struct blk_mq_tag_set * set )
1749
- {
1750
- struct blk_mq_hw_ctx * hctx ;
1751
- unsigned int i ;
1752
-
1753
- /*
1754
- * Initialize hardware queues
1755
- */
1756
- queue_for_each_hw_ctx (q , hctx , i ) {
1757
- if (blk_mq_init_hctx (q , set , hctx , i ))
1758
- break ;
1759
- }
1760
-
1761
- if (i == q -> nr_hw_queues )
1762
- return 0 ;
1763
-
1764
- /*
1765
- * Init failed
1766
- */
1767
- blk_mq_exit_hw_queues (q , set , i );
1768
-
1769
- return 1 ;
1770
- }
1771
-
1772
1750
static void blk_mq_init_cpu_queues (struct request_queue * q ,
1773
1751
unsigned int nr_hw_queues )
1774
1752
{
@@ -1826,6 +1804,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
1826
1804
continue ;
1827
1805
1828
1806
hctx = q -> mq_ops -> map_queue (q , i );
1807
+
1829
1808
cpumask_set_cpu (i , hctx -> cpumask );
1830
1809
ctx -> index_hw = hctx -> nr_ctx ;
1831
1810
hctx -> ctxs [hctx -> nr_ctx ++ ] = ctx ;
@@ -1974,56 +1953,93 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1974
1953
}
1975
1954
EXPORT_SYMBOL (blk_mq_init_queue );
1976
1955
1977
- struct request_queue * blk_mq_init_allocated_queue (struct blk_mq_tag_set * set ,
1978
- struct request_queue * q )
1956
+ static void blk_mq_realloc_hw_ctxs (struct blk_mq_tag_set * set ,
1957
+ struct request_queue * q )
1979
1958
{
1980
- struct blk_mq_hw_ctx * * hctxs ;
1981
- struct blk_mq_ctx __percpu * ctx ;
1982
- unsigned int * map ;
1983
- int i ;
1984
-
1985
- ctx = alloc_percpu (struct blk_mq_ctx );
1986
- if (!ctx )
1987
- return ERR_PTR (- ENOMEM );
1988
-
1989
- hctxs = kmalloc_node (set -> nr_hw_queues * sizeof (* hctxs ), GFP_KERNEL ,
1990
- set -> numa_node );
1991
-
1992
- if (!hctxs )
1993
- goto err_percpu ;
1994
-
1995
- map = blk_mq_make_queue_map (set );
1996
- if (!map )
1997
- goto err_map ;
1959
+ int i , j ;
1960
+ struct blk_mq_hw_ctx * * hctxs = q -> queue_hw_ctx ;
1998
1961
1962
+ blk_mq_sysfs_unregister (q );
1999
1963
for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
2000
- int node = blk_mq_hw_queue_to_node ( map , i ) ;
1964
+ int node ;
2001
1965
1966
+ if (hctxs [i ])
1967
+ continue ;
1968
+
1969
+ node = blk_mq_hw_queue_to_node (q -> mq_map , i );
2002
1970
hctxs [i ] = kzalloc_node (sizeof (struct blk_mq_hw_ctx ),
2003
1971
GFP_KERNEL , node );
2004
1972
if (!hctxs [i ])
2005
- goto err_hctxs ;
1973
+ break ;
2006
1974
2007
1975
if (!zalloc_cpumask_var_node (& hctxs [i ]-> cpumask , GFP_KERNEL ,
2008
- node ))
2009
- goto err_hctxs ;
1976
+ node )) {
1977
+ kfree (hctxs [i ]);
1978
+ hctxs [i ] = NULL ;
1979
+ break ;
1980
+ }
2010
1981
2011
1982
atomic_set (& hctxs [i ]-> nr_active , 0 );
2012
1983
hctxs [i ]-> numa_node = node ;
2013
1984
hctxs [i ]-> queue_num = i ;
1985
+
1986
+ if (blk_mq_init_hctx (q , set , hctxs [i ], i )) {
1987
+ free_cpumask_var (hctxs [i ]-> cpumask );
1988
+ kfree (hctxs [i ]);
1989
+ hctxs [i ] = NULL ;
1990
+ break ;
1991
+ }
1992
+ blk_mq_hctx_kobj_init (hctxs [i ]);
2014
1993
}
1994
+ for (j = i ; j < q -> nr_hw_queues ; j ++ ) {
1995
+ struct blk_mq_hw_ctx * hctx = hctxs [j ];
1996
+
1997
+ if (hctx ) {
1998
+ if (hctx -> tags ) {
1999
+ blk_mq_free_rq_map (set , hctx -> tags , j );
2000
+ set -> tags [j ] = NULL ;
2001
+ }
2002
+ blk_mq_exit_hctx (q , set , hctx , j );
2003
+ free_cpumask_var (hctx -> cpumask );
2004
+ kobject_put (& hctx -> kobj );
2005
+ kfree (hctx -> ctxs );
2006
+ kfree (hctx );
2007
+ hctxs [j ] = NULL ;
2008
+
2009
+ }
2010
+ }
2011
+ q -> nr_hw_queues = i ;
2012
+ blk_mq_sysfs_register (q );
2013
+ }
2014
+
2015
+ struct request_queue * blk_mq_init_allocated_queue (struct blk_mq_tag_set * set ,
2016
+ struct request_queue * q )
2017
+ {
2018
+ /* mark the queue as mq asap */
2019
+ q -> mq_ops = set -> ops ;
2020
+
2021
+ q -> queue_ctx = alloc_percpu (struct blk_mq_ctx );
2022
+ if (!q -> queue_ctx )
2023
+ return ERR_PTR (- ENOMEM );
2024
+
2025
+ q -> queue_hw_ctx = kzalloc_node (nr_cpu_ids * sizeof (* (q -> queue_hw_ctx )),
2026
+ GFP_KERNEL , set -> numa_node );
2027
+ if (!q -> queue_hw_ctx )
2028
+ goto err_percpu ;
2029
+
2030
+ q -> mq_map = blk_mq_make_queue_map (set );
2031
+ if (!q -> mq_map )
2032
+ goto err_map ;
2033
+
2034
+ blk_mq_realloc_hw_ctxs (set , q );
2035
+ if (!q -> nr_hw_queues )
2036
+ goto err_hctxs ;
2015
2037
2016
2038
INIT_WORK (& q -> timeout_work , blk_mq_timeout_work );
2017
2039
blk_queue_rq_timeout (q , set -> timeout ? set -> timeout : 30 * HZ );
2018
2040
2019
2041
q -> nr_queues = nr_cpu_ids ;
2020
- q -> nr_hw_queues = set -> nr_hw_queues ;
2021
- q -> mq_map = map ;
2022
-
2023
- q -> queue_ctx = ctx ;
2024
- q -> queue_hw_ctx = hctxs ;
2025
2042
2026
- q -> mq_ops = set -> ops ;
2027
2043
q -> queue_flags |= QUEUE_FLAG_MQ_DEFAULT ;
2028
2044
2029
2045
if (!(set -> flags & BLK_MQ_F_SG_MERGE ))
@@ -2050,9 +2066,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2050
2066
2051
2067
blk_mq_init_cpu_queues (q , set -> nr_hw_queues );
2052
2068
2053
- if (blk_mq_init_hw_queues (q , set ))
2054
- goto err_hctxs ;
2055
-
2056
2069
get_online_cpus ();
2057
2070
mutex_lock (& all_q_mutex );
2058
2071
@@ -2066,17 +2079,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2066
2079
return q ;
2067
2080
2068
2081
err_hctxs :
2069
- kfree (map );
2070
- for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
2071
- if (!hctxs [i ])
2072
- break ;
2073
- free_cpumask_var (hctxs [i ]-> cpumask );
2074
- kfree (hctxs [i ]);
2075
- }
2082
+ kfree (q -> mq_map );
2076
2083
err_map :
2077
- kfree (hctxs );
2084
+ kfree (q -> queue_hw_ctx );
2078
2085
err_percpu :
2079
- free_percpu (ctx );
2086
+ free_percpu (q -> queue_ctx );
2080
2087
return ERR_PTR (- ENOMEM );
2081
2088
}
2082
2089
EXPORT_SYMBOL (blk_mq_init_allocated_queue );
@@ -2284,9 +2291,13 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2284
2291
set -> nr_hw_queues = 1 ;
2285
2292
set -> queue_depth = min (64U , set -> queue_depth );
2286
2293
}
2294
+ /*
2295
+ * There is no use for more h/w queues than cpus.
2296
+ */
2297
+ if (set -> nr_hw_queues > nr_cpu_ids )
2298
+ set -> nr_hw_queues = nr_cpu_ids ;
2287
2299
2288
- set -> tags = kmalloc_node (set -> nr_hw_queues *
2289
- sizeof (struct blk_mq_tags * ),
2300
+ set -> tags = kzalloc_node (nr_cpu_ids * sizeof (struct blk_mq_tags * ),
2290
2301
GFP_KERNEL , set -> numa_node );
2291
2302
if (!set -> tags )
2292
2303
return - ENOMEM ;
@@ -2309,7 +2320,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2309
2320
{
2310
2321
int i ;
2311
2322
2312
- for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
2323
+ for (i = 0 ; i < nr_cpu_ids ; i ++ ) {
2313
2324
if (set -> tags [i ])
2314
2325
blk_mq_free_rq_map (set , set -> tags [i ], i );
2315
2326
}
@@ -2330,6 +2341,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2330
2341
2331
2342
ret = 0 ;
2332
2343
queue_for_each_hw_ctx (q , hctx , i ) {
2344
+ if (!hctx -> tags )
2345
+ continue ;
2333
2346
ret = blk_mq_tag_update_depth (hctx -> tags , nr );
2334
2347
if (ret )
2335
2348
break ;
@@ -2341,6 +2354,35 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2341
2354
return ret ;
2342
2355
}
2343
2356
2357
+ void blk_mq_update_nr_hw_queues (struct blk_mq_tag_set * set , int nr_hw_queues )
2358
+ {
2359
+ struct request_queue * q ;
2360
+
2361
+ if (nr_hw_queues > nr_cpu_ids )
2362
+ nr_hw_queues = nr_cpu_ids ;
2363
+ if (nr_hw_queues < 1 || nr_hw_queues == set -> nr_hw_queues )
2364
+ return ;
2365
+
2366
+ list_for_each_entry (q , & set -> tag_list , tag_set_list )
2367
+ blk_mq_freeze_queue (q );
2368
+
2369
+ set -> nr_hw_queues = nr_hw_queues ;
2370
+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
2371
+ blk_mq_realloc_hw_ctxs (set , q );
2372
+
2373
+ if (q -> nr_hw_queues > 1 )
2374
+ blk_queue_make_request (q , blk_mq_make_request );
2375
+ else
2376
+ blk_queue_make_request (q , blk_sq_make_request );
2377
+
2378
+ blk_mq_queue_reinit (q , cpu_online_mask );
2379
+ }
2380
+
2381
+ list_for_each_entry (q , & set -> tag_list , tag_set_list )
2382
+ blk_mq_unfreeze_queue (q );
2383
+ }
2384
+ EXPORT_SYMBOL_GPL (blk_mq_update_nr_hw_queues );
2385
+
2344
2386
void blk_mq_disable_hotplug (void )
2345
2387
{
2346
2388
mutex_lock (& all_q_mutex );
0 commit comments