@@ -1553,8 +1553,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1553
1553
return cookie ;
1554
1554
}
1555
1555
1556
- void blk_mq_free_rq_map (struct blk_mq_tag_set * set , struct blk_mq_tags * tags ,
1557
- unsigned int hctx_idx )
1556
+ void blk_mq_free_rqs (struct blk_mq_tag_set * set , struct blk_mq_tags * tags ,
1557
+ unsigned int hctx_idx )
1558
1558
{
1559
1559
struct page * page ;
1560
1560
@@ -1580,49 +1580,62 @@ void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1580
1580
kmemleak_free (page_address (page ));
1581
1581
__free_pages (page , page -> private );
1582
1582
}
1583
+ }
1583
1584
1585
+ void blk_mq_free_rq_map (struct blk_mq_tags * tags )
1586
+ {
1584
1587
kfree (tags -> rqs );
1588
+ tags -> rqs = NULL ;
1585
1589
1586
1590
blk_mq_free_tags (tags );
1587
1591
}
1588
1592
1589
- static size_t order_to_size (unsigned int order )
1590
- {
1591
- return (size_t )PAGE_SIZE << order ;
1592
- }
1593
-
1594
- struct blk_mq_tags * blk_mq_init_rq_map (struct blk_mq_tag_set * set ,
1595
- unsigned int hctx_idx )
1593
+ struct blk_mq_tags * blk_mq_alloc_rq_map (struct blk_mq_tag_set * set ,
1594
+ unsigned int hctx_idx ,
1595
+ unsigned int nr_tags ,
1596
+ unsigned int reserved_tags )
1596
1597
{
1597
1598
struct blk_mq_tags * tags ;
1598
- unsigned int i , j , entries_per_page , max_order = 4 ;
1599
- size_t rq_size , left ;
1600
1599
1601
- tags = blk_mq_init_tags (set -> queue_depth , set -> reserved_tags ,
1600
+ tags = blk_mq_init_tags (nr_tags , reserved_tags ,
1602
1601
set -> numa_node ,
1603
1602
BLK_MQ_FLAG_TO_ALLOC_POLICY (set -> flags ));
1604
1603
if (!tags )
1605
1604
return NULL ;
1606
1605
1607
- INIT_LIST_HEAD (& tags -> page_list );
1608
-
1609
- tags -> rqs = kzalloc_node (set -> queue_depth * sizeof (struct request * ),
1606
+ tags -> rqs = kzalloc_node (nr_tags * sizeof (struct request * ),
1610
1607
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY ,
1611
1608
set -> numa_node );
1612
1609
if (!tags -> rqs ) {
1613
1610
blk_mq_free_tags (tags );
1614
1611
return NULL ;
1615
1612
}
1616
1613
1614
+ return tags ;
1615
+ }
1616
+
1617
+ static size_t order_to_size (unsigned int order )
1618
+ {
1619
+ return (size_t )PAGE_SIZE << order ;
1620
+ }
1621
+
1622
+ int blk_mq_alloc_rqs (struct blk_mq_tag_set * set , struct blk_mq_tags * tags ,
1623
+ unsigned int hctx_idx , unsigned int depth )
1624
+ {
1625
+ unsigned int i , j , entries_per_page , max_order = 4 ;
1626
+ size_t rq_size , left ;
1627
+
1628
+ INIT_LIST_HEAD (& tags -> page_list );
1629
+
1617
1630
/*
1618
1631
* rq_size is the size of the request plus driver payload, rounded
1619
1632
* to the cacheline size
1620
1633
*/
1621
1634
rq_size = round_up (sizeof (struct request ) + set -> cmd_size ,
1622
1635
cache_line_size ());
1623
- left = rq_size * set -> queue_depth ;
1636
+ left = rq_size * depth ;
1624
1637
1625
- for (i = 0 ; i < set -> queue_depth ; ) {
1638
+ for (i = 0 ; i < depth ; ) {
1626
1639
int this_order = max_order ;
1627
1640
struct page * page ;
1628
1641
int to_do ;
@@ -1656,7 +1669,7 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1656
1669
*/
1657
1670
kmemleak_alloc (p , order_to_size (this_order ), 1 , GFP_NOIO );
1658
1671
entries_per_page = order_to_size (this_order ) / rq_size ;
1659
- to_do = min (entries_per_page , set -> queue_depth - i );
1672
+ to_do = min (entries_per_page , depth - i );
1660
1673
left -= to_do * rq_size ;
1661
1674
for (j = 0 ; j < to_do ; j ++ ) {
1662
1675
tags -> rqs [i ] = p ;
@@ -1673,11 +1686,11 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1673
1686
i ++ ;
1674
1687
}
1675
1688
}
1676
- return tags ;
1689
+ return 0 ;
1677
1690
1678
1691
fail :
1679
- blk_mq_free_rq_map (set , tags , hctx_idx );
1680
- return NULL ;
1692
+ blk_mq_free_rqs (set , tags , hctx_idx );
1693
+ return - ENOMEM ;
1681
1694
}
1682
1695
1683
1696
/*
@@ -1869,6 +1882,33 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
1869
1882
}
1870
1883
}
1871
1884
1885
+ static bool __blk_mq_alloc_rq_map (struct blk_mq_tag_set * set , int hctx_idx )
1886
+ {
1887
+ int ret = 0 ;
1888
+
1889
+ set -> tags [hctx_idx ] = blk_mq_alloc_rq_map (set , hctx_idx ,
1890
+ set -> queue_depth , set -> reserved_tags );
1891
+ if (!set -> tags [hctx_idx ])
1892
+ return false;
1893
+
1894
+ ret = blk_mq_alloc_rqs (set , set -> tags [hctx_idx ], hctx_idx ,
1895
+ set -> queue_depth );
1896
+ if (!ret )
1897
+ return true;
1898
+
1899
+ blk_mq_free_rq_map (set -> tags [hctx_idx ]);
1900
+ set -> tags [hctx_idx ] = NULL ;
1901
+ return false;
1902
+ }
1903
+
1904
+ static void blk_mq_free_map_and_requests (struct blk_mq_tag_set * set ,
1905
+ unsigned int hctx_idx )
1906
+ {
1907
+ blk_mq_free_rqs (set , set -> tags [hctx_idx ], hctx_idx );
1908
+ blk_mq_free_rq_map (set -> tags [hctx_idx ]);
1909
+ set -> tags [hctx_idx ] = NULL ;
1910
+ }
1911
+
1872
1912
static void blk_mq_map_swqueue (struct request_queue * q ,
1873
1913
const struct cpumask * online_mask )
1874
1914
{
@@ -1897,17 +1937,15 @@ static void blk_mq_map_swqueue(struct request_queue *q,
1897
1937
1898
1938
hctx_idx = q -> mq_map [i ];
1899
1939
/* unmapped hw queue can be remapped after CPU topo changed */
1900
- if (!set -> tags [hctx_idx ]) {
1901
- set -> tags [hctx_idx ] = blk_mq_init_rq_map (set , hctx_idx );
1902
-
1940
+ if (!set -> tags [hctx_idx ] &&
1941
+ !__blk_mq_alloc_rq_map (set , hctx_idx )) {
1903
1942
/*
1904
1943
* If tags initialization fail for some hctx,
1905
1944
* that hctx won't be brought online. In this
1906
1945
* case, remap the current ctx to hctx[0] which
1907
1946
* is guaranteed to always have tags allocated
1908
1947
*/
1909
- if (!set -> tags [hctx_idx ])
1910
- q -> mq_map [i ] = 0 ;
1948
+ q -> mq_map [i ] = 0 ;
1911
1949
}
1912
1950
1913
1951
ctx = per_cpu_ptr (q -> queue_ctx , i );
@@ -1930,10 +1968,9 @@ static void blk_mq_map_swqueue(struct request_queue *q,
1930
1968
* fallback in case of a new remap fails
1931
1969
* allocation
1932
1970
*/
1933
- if (i && set -> tags [i ]) {
1934
- blk_mq_free_rq_map (set , set -> tags [i ], i );
1935
- set -> tags [i ] = NULL ;
1936
- }
1971
+ if (i && set -> tags [i ])
1972
+ blk_mq_free_map_and_requests (set , i );
1973
+
1937
1974
hctx -> tags = NULL ;
1938
1975
continue ;
1939
1976
}
@@ -2100,10 +2137,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2100
2137
struct blk_mq_hw_ctx * hctx = hctxs [j ];
2101
2138
2102
2139
if (hctx ) {
2103
- if (hctx -> tags ) {
2104
- blk_mq_free_rq_map (set , hctx -> tags , j );
2105
- set -> tags [j ] = NULL ;
2106
- }
2140
+ if (hctx -> tags )
2141
+ blk_mq_free_map_and_requests (set , j );
2107
2142
blk_mq_exit_hctx (q , set , hctx , j );
2108
2143
free_cpumask_var (hctx -> cpumask );
2109
2144
kobject_put (& hctx -> kobj );
@@ -2299,17 +2334,15 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2299
2334
{
2300
2335
int i ;
2301
2336
2302
- for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
2303
- set -> tags [i ] = blk_mq_init_rq_map (set , i );
2304
- if (!set -> tags [i ])
2337
+ for (i = 0 ; i < set -> nr_hw_queues ; i ++ )
2338
+ if (!__blk_mq_alloc_rq_map (set , i ))
2305
2339
goto out_unwind ;
2306
- }
2307
2340
2308
2341
return 0 ;
2309
2342
2310
2343
out_unwind :
2311
2344
while (-- i >= 0 )
2312
- blk_mq_free_rq_map (set , set -> tags [i ], i );
2345
+ blk_mq_free_rq_map (set -> tags [i ]);
2313
2346
2314
2347
return - ENOMEM ;
2315
2348
}
@@ -2433,10 +2466,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2433
2466
{
2434
2467
int i ;
2435
2468
2436
- for (i = 0 ; i < nr_cpu_ids ; i ++ ) {
2437
- if (set -> tags [i ])
2438
- blk_mq_free_rq_map (set , set -> tags [i ], i );
2439
- }
2469
+ for (i = 0 ; i < nr_cpu_ids ; i ++ )
2470
+ blk_mq_free_map_and_requests (set , i );
2440
2471
2441
2472
kfree (set -> mq_map );
2442
2473
set -> mq_map = NULL ;
0 commit comments