Skip to content

Commit cc71a6f

Browse files
committed
blk-mq: abstract out helpers for allocating/freeing tag maps
Prep patch for adding an extra tag map for scheduler requests. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
1 parent 4941115 commit cc71a6f

File tree

2 files changed

+83
-48
lines changed

2 files changed

+83
-48
lines changed

block/blk-mq.c

Lines changed: 74 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1553,8 +1553,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
15531553
return cookie;
15541554
}
15551555

1556-
void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1557-
unsigned int hctx_idx)
1556+
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1557+
unsigned int hctx_idx)
15581558
{
15591559
struct page *page;
15601560

@@ -1580,49 +1580,62 @@ void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
15801580
kmemleak_free(page_address(page));
15811581
__free_pages(page, page->private);
15821582
}
1583+
}
15831584

1585+
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1586+
{
15841587
kfree(tags->rqs);
1588+
tags->rqs = NULL;
15851589

15861590
blk_mq_free_tags(tags);
15871591
}
15881592

1589-
static size_t order_to_size(unsigned int order)
1590-
{
1591-
return (size_t)PAGE_SIZE << order;
1592-
}
1593-
1594-
struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1595-
unsigned int hctx_idx)
1593+
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1594+
unsigned int hctx_idx,
1595+
unsigned int nr_tags,
1596+
unsigned int reserved_tags)
15961597
{
15971598
struct blk_mq_tags *tags;
1598-
unsigned int i, j, entries_per_page, max_order = 4;
1599-
size_t rq_size, left;
16001599

1601-
tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1600+
tags = blk_mq_init_tags(nr_tags, reserved_tags,
16021601
set->numa_node,
16031602
BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
16041603
if (!tags)
16051604
return NULL;
16061605

1607-
INIT_LIST_HEAD(&tags->page_list);
1608-
1609-
tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1606+
tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
16101607
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
16111608
set->numa_node);
16121609
if (!tags->rqs) {
16131610
blk_mq_free_tags(tags);
16141611
return NULL;
16151612
}
16161613

1614+
return tags;
1615+
}
1616+
1617+
static size_t order_to_size(unsigned int order)
1618+
{
1619+
return (size_t)PAGE_SIZE << order;
1620+
}
1621+
1622+
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1623+
unsigned int hctx_idx, unsigned int depth)
1624+
{
1625+
unsigned int i, j, entries_per_page, max_order = 4;
1626+
size_t rq_size, left;
1627+
1628+
INIT_LIST_HEAD(&tags->page_list);
1629+
16171630
/*
16181631
* rq_size is the size of the request plus driver payload, rounded
16191632
* to the cacheline size
16201633
*/
16211634
rq_size = round_up(sizeof(struct request) + set->cmd_size,
16221635
cache_line_size());
1623-
left = rq_size * set->queue_depth;
1636+
left = rq_size * depth;
16241637

1625-
for (i = 0; i < set->queue_depth; ) {
1638+
for (i = 0; i < depth; ) {
16261639
int this_order = max_order;
16271640
struct page *page;
16281641
int to_do;
@@ -1656,7 +1669,7 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
16561669
*/
16571670
kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
16581671
entries_per_page = order_to_size(this_order) / rq_size;
1659-
to_do = min(entries_per_page, set->queue_depth - i);
1672+
to_do = min(entries_per_page, depth - i);
16601673
left -= to_do * rq_size;
16611674
for (j = 0; j < to_do; j++) {
16621675
tags->rqs[i] = p;
@@ -1673,11 +1686,11 @@ struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
16731686
i++;
16741687
}
16751688
}
1676-
return tags;
1689+
return 0;
16771690

16781691
fail:
1679-
blk_mq_free_rq_map(set, tags, hctx_idx);
1680-
return NULL;
1692+
blk_mq_free_rqs(set, tags, hctx_idx);
1693+
return -ENOMEM;
16811694
}
16821695

16831696
/*
@@ -1869,6 +1882,33 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
18691882
}
18701883
}
18711884

1885+
static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
1886+
{
1887+
int ret = 0;
1888+
1889+
set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
1890+
set->queue_depth, set->reserved_tags);
1891+
if (!set->tags[hctx_idx])
1892+
return false;
1893+
1894+
ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
1895+
set->queue_depth);
1896+
if (!ret)
1897+
return true;
1898+
1899+
blk_mq_free_rq_map(set->tags[hctx_idx]);
1900+
set->tags[hctx_idx] = NULL;
1901+
return false;
1902+
}
1903+
1904+
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
1905+
unsigned int hctx_idx)
1906+
{
1907+
blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
1908+
blk_mq_free_rq_map(set->tags[hctx_idx]);
1909+
set->tags[hctx_idx] = NULL;
1910+
}
1911+
18721912
static void blk_mq_map_swqueue(struct request_queue *q,
18731913
const struct cpumask *online_mask)
18741914
{
@@ -1897,17 +1937,15 @@ static void blk_mq_map_swqueue(struct request_queue *q,
18971937

18981938
hctx_idx = q->mq_map[i];
18991939
/* unmapped hw queue can be remapped after CPU topo changed */
1900-
if (!set->tags[hctx_idx]) {
1901-
set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
1902-
1940+
if (!set->tags[hctx_idx] &&
1941+
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
19031942
/*
19041943
* If tags initialization fail for some hctx,
19051944
* that hctx won't be brought online. In this
19061945
* case, remap the current ctx to hctx[0] which
19071946
* is guaranteed to always have tags allocated
19081947
*/
1909-
if (!set->tags[hctx_idx])
1910-
q->mq_map[i] = 0;
1948+
q->mq_map[i] = 0;
19111949
}
19121950

19131951
ctx = per_cpu_ptr(q->queue_ctx, i);
@@ -1930,10 +1968,9 @@ static void blk_mq_map_swqueue(struct request_queue *q,
19301968
* fallback in case of a new remap fails
19311969
* allocation
19321970
*/
1933-
if (i && set->tags[i]) {
1934-
blk_mq_free_rq_map(set, set->tags[i], i);
1935-
set->tags[i] = NULL;
1936-
}
1971+
if (i && set->tags[i])
1972+
blk_mq_free_map_and_requests(set, i);
1973+
19371974
hctx->tags = NULL;
19381975
continue;
19391976
}
@@ -2100,10 +2137,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
21002137
struct blk_mq_hw_ctx *hctx = hctxs[j];
21012138

21022139
if (hctx) {
2103-
if (hctx->tags) {
2104-
blk_mq_free_rq_map(set, hctx->tags, j);
2105-
set->tags[j] = NULL;
2106-
}
2140+
if (hctx->tags)
2141+
blk_mq_free_map_and_requests(set, j);
21072142
blk_mq_exit_hctx(q, set, hctx, j);
21082143
free_cpumask_var(hctx->cpumask);
21092144
kobject_put(&hctx->kobj);
@@ -2299,17 +2334,15 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
22992334
{
23002335
int i;
23012336

2302-
for (i = 0; i < set->nr_hw_queues; i++) {
2303-
set->tags[i] = blk_mq_init_rq_map(set, i);
2304-
if (!set->tags[i])
2337+
for (i = 0; i < set->nr_hw_queues; i++)
2338+
if (!__blk_mq_alloc_rq_map(set, i))
23052339
goto out_unwind;
2306-
}
23072340

23082341
return 0;
23092342

23102343
out_unwind:
23112344
while (--i >= 0)
2312-
blk_mq_free_rq_map(set, set->tags[i], i);
2345+
blk_mq_free_rq_map(set->tags[i]);
23132346

23142347
return -ENOMEM;
23152348
}
@@ -2433,10 +2466,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
24332466
{
24342467
int i;
24352468

2436-
for (i = 0; i < nr_cpu_ids; i++) {
2437-
if (set->tags[i])
2438-
blk_mq_free_rq_map(set, set->tags[i], i);
2439-
}
2469+
for (i = 0; i < nr_cpu_ids; i++)
2470+
blk_mq_free_map_and_requests(set, i);
24402471

24412472
kfree(set->mq_map);
24422473
set->mq_map = NULL;

block/blk-mq.h

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,17 +37,21 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
3737
/*
3838
* Internal helpers for allocating/freeing the request map
3939
*/
40-
void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
41-
unsigned int hctx_idx);
42-
struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
43-
unsigned int hctx_idx);
40+
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
41+
unsigned int hctx_idx);
42+
void blk_mq_free_rq_map(struct blk_mq_tags *tags);
43+
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
44+
unsigned int hctx_idx,
45+
unsigned int nr_tags,
46+
unsigned int reserved_tags);
47+
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
48+
unsigned int hctx_idx, unsigned int depth);
4449

4550
/*
4651
* Internal helpers for request insertion into sw queues
4752
*/
4853
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4954
bool at_head);
50-
5155
/*
5256
* CPU hotplug helpers
5357
*/

0 commit comments

Comments
 (0)