Skip to content

Commit ae11889

Browse files
htejunaxboe
authored andcommitted
blkcg: consolidate blkg creation in blkcg_bio_issue_check()
blkg (blkcg_gq) currently is created by blkcg policies invoking blkg_lookup_create() which ends up repeating about the same code in different policies. Theoretically, this can avoid the overhead of looking and/or creating blkg's if blkcg is enabled but no policy is in use; however, the cost of blkg lookup / creation is very low especially if only the root blkcg is in use which is highly likely if no blkcg policy is in active use - it boils down to a single very predictable conditional and surrounding RCU protection. This patch consolidates blkg creation to a new function blkcg_bio_issue_check() which is called during bio issue from generic_make_request_checks(). blkcg_bio_issue_check() is now the only function which tries to create missing blkg's. The subsequent policy and request_list operations just perform blkg_lookup() and if missing falls back to the root. * blk_get_rl() no longer tries to create blkg. It uses blkg_lookup() instead of blkg_lookup_create(). * blk_throtl_bio() is now called from blkcg_bio_issue_check() with rcu read locked and blkg already looked up. Both throtl_lookup_tg() and throtl_lookup_create_tg() are dropped. * cfq is similarly updated. cfq_lookup_create_cfqg() is replaced with cfq_lookup_cfqg()which uses blkg_lookup(). This consolidates blkg handling and avoids unnecessary blkg creation retries under memory pressure. In addition, this provides a common bio entry point into blkcg where things like common accounting can be performed. v2: Build fixes for !CONFIG_CFQ_GROUP_IOSCHED and !CONFIG_BLK_DEV_THROTTLING. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Arianna Avanzini <avanzini.arianna@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
1 parent c9589f0 commit ae11889

File tree

6 files changed

+57
-99
lines changed

6 files changed

+57
-99
lines changed

block/blk-cgroup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
153153

154154
return NULL;
155155
}
156+
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
156157

157158
/*
158159
* If @new_blkg is %NULL, this function tries to allocate a new one as
@@ -295,7 +296,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
295296
return blkg;
296297
}
297298
}
298-
EXPORT_SYMBOL_GPL(blkg_lookup_create);
299299

300300
static void blkg_destroy(struct blkcg_gq *blkg)
301301
{

block/blk-core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1889,8 +1889,8 @@ generic_make_request_checks(struct bio *bio)
18891889
*/
18901890
create_io_context(GFP_ATOMIC, q->node);
18911891

1892-
if (blk_throtl_bio(q, bio))
1893-
return false; /* throttled, will be resubmitted later */
1892+
if (!blkcg_bio_issue_check(q, bio))
1893+
return false;
18941894

18951895
trace_block_bio_queue(q, bio);
18961896
return true;

block/blk-throttle.c

Lines changed: 6 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -182,11 +182,6 @@ static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
182182
return pd_to_blkg(&tg->pd);
183183
}
184184

185-
static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
186-
{
187-
return blkg_to_tg(td->queue->root_blkg);
188-
}
189-
190185
/**
191186
* sq_to_tg - return the throl_grp the specified service queue belongs to
192187
* @sq: the throtl_service_queue of interest
@@ -449,39 +444,6 @@ static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
449444
}
450445
}
451446

452-
static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
453-
struct blkcg *blkcg)
454-
{
455-
return blkg_to_tg(blkg_lookup(blkcg, td->queue));
456-
}
457-
458-
static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
459-
struct blkcg *blkcg)
460-
{
461-
struct request_queue *q = td->queue;
462-
struct throtl_grp *tg = NULL;
463-
464-
/*
465-
* This is the common case when there are no blkcgs. Avoid lookup
466-
* in this case
467-
*/
468-
if (blkcg == &blkcg_root) {
469-
tg = td_root_tg(td);
470-
} else {
471-
struct blkcg_gq *blkg;
472-
473-
blkg = blkg_lookup_create(blkcg, q);
474-
475-
/* if %NULL and @q is alive, fall back to root_tg */
476-
if (!IS_ERR(blkg))
477-
tg = blkg_to_tg(blkg);
478-
else
479-
tg = td_root_tg(td);
480-
}
481-
482-
return tg;
483-
}
484-
485447
static struct throtl_grp *
486448
throtl_rb_first(struct throtl_service_queue *parent_sq)
487449
{
@@ -1403,46 +1365,26 @@ static struct blkcg_policy blkcg_policy_throtl = {
14031365
.pd_reset_stats_fn = throtl_pd_reset_stats,
14041366
};
14051367

1406-
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1368+
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1369+
struct bio *bio)
14071370
{
1408-
struct throtl_data *td = q->td;
14091371
struct throtl_qnode *qn = NULL;
1410-
struct throtl_grp *tg;
1372+
struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
14111373
struct throtl_service_queue *sq;
14121374
bool rw = bio_data_dir(bio);
1413-
struct blkcg *blkcg;
14141375
bool throttled = false;
14151376

1377+
WARN_ON_ONCE(!rcu_read_lock_held());
1378+
14161379
/* see throtl_charge_bio() */
1417-
if (bio->bi_rw & REQ_THROTTLED)
1380+
if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
14181381
goto out;
14191382

1420-
/*
1421-
* A throtl_grp pointer retrieved under rcu can be used to access
1422-
* basic fields like stats and io rates. If a group has no rules,
1423-
* just update the dispatch stats in lockless manner and return.
1424-
*/
1425-
rcu_read_lock();
1426-
blkcg = bio_blkcg(bio);
1427-
tg = throtl_lookup_tg(td, blkcg);
1428-
if (tg) {
1429-
if (!tg->has_rules[rw]) {
1430-
throtl_update_dispatch_stats(tg_to_blkg(tg),
1431-
bio->bi_iter.bi_size, bio->bi_rw);
1432-
goto out_unlock_rcu;
1433-
}
1434-
}
1435-
1436-
/*
1437-
* Either group has not been allocated yet or it is not an unlimited
1438-
* IO group
1439-
*/
14401383
spin_lock_irq(q->queue_lock);
14411384

14421385
if (unlikely(blk_queue_bypass(q)))
14431386
goto out_unlock;
14441387

1445-
tg = throtl_lookup_create_tg(td, blkcg);
14461388
sq = &tg->service_queue;
14471389

14481390
while (true) {
@@ -1507,8 +1449,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
15071449

15081450
out_unlock:
15091451
spin_unlock_irq(q->queue_lock);
1510-
out_unlock_rcu:
1511-
rcu_read_unlock();
15121452
out:
15131453
/*
15141454
* As multiple blk-throtls may stack in the same issue path, we

block/blk.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,15 +266,10 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
266266
* Internal throttling interface
267267
*/
268268
#ifdef CONFIG_BLK_DEV_THROTTLING
269-
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
270269
extern void blk_throtl_drain(struct request_queue *q);
271270
extern int blk_throtl_init(struct request_queue *q);
272271
extern void blk_throtl_exit(struct request_queue *q);
273272
#else /* CONFIG_BLK_DEV_THROTTLING */
274-
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
275-
{
276-
return false;
277-
}
278273
static inline void blk_throtl_drain(struct request_queue *q) { }
279274
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
280275
static inline void blk_throtl_exit(struct request_queue *q) { }

block/cfq-iosched.c

Lines changed: 10 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1683,28 +1683,15 @@ static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
16831683
cfqg_stats_reset(&cfqg->dead_stats);
16841684
}
16851685

1686-
/*
1687-
* Search for the cfq group current task belongs to. request_queue lock must
1688-
* be held.
1689-
*/
1690-
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1691-
struct blkcg *blkcg)
1686+
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1687+
struct blkcg *blkcg)
16921688
{
1693-
struct request_queue *q = cfqd->queue;
1694-
struct cfq_group *cfqg = NULL;
1695-
1696-
/* avoid lookup for the common case where there's no blkcg */
1697-
if (blkcg == &blkcg_root) {
1698-
cfqg = cfqd->root_group;
1699-
} else {
1700-
struct blkcg_gq *blkg;
1701-
1702-
blkg = blkg_lookup_create(blkcg, q);
1703-
if (!IS_ERR(blkg))
1704-
cfqg = blkg_to_cfqg(blkg);
1705-
}
1689+
struct blkcg_gq *blkg;
17061690

1707-
return cfqg;
1691+
blkg = blkg_lookup(blkcg, cfqd->queue);
1692+
if (likely(blkg))
1693+
return blkg_to_cfqg(blkg);
1694+
return NULL;
17081695
}
17091696

17101697
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
@@ -2108,8 +2095,8 @@ static struct cftype cfq_blkcg_files[] = {
21082095
{ } /* terminate */
21092096
};
21102097
#else /* GROUP_IOSCHED */
2111-
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
2112-
struct blkcg *blkcg)
2098+
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2099+
struct blkcg *blkcg)
21132100
{
21142101
return cfqd->root_group;
21152102
}
@@ -3716,7 +3703,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
37163703
struct cfq_group *cfqg;
37173704

37183705
rcu_read_lock();
3719-
cfqg = cfq_lookup_create_cfqg(cfqd, bio_blkcg(bio));
3706+
cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
37203707
if (!cfqg) {
37213708
cfqq = &cfqd->oom_cfqq;
37223709
goto out;

include/linux/blk-cgroup.h

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -421,8 +421,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
421421
* or if either the blkcg or queue is going away. Fall back to
422422
* root_rl in such cases.
423423
*/
424-
blkg = blkg_lookup_create(blkcg, q);
425-
if (unlikely(IS_ERR(blkg)))
424+
blkg = blkg_lookup(blkcg, q);
425+
if (unlikely(!blkg))
426426
goto root_rl;
427427

428428
blkg_get(blkg);
@@ -636,6 +636,39 @@ static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
636636
u64_stats_update_end(&to->syncp);
637637
}
638638

639+
#ifdef CONFIG_BLK_DEV_THROTTLING
640+
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
641+
struct bio *bio);
642+
#else
643+
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
644+
struct bio *bio) { return false; }
645+
#endif
646+
647+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
648+
struct bio *bio)
649+
{
650+
struct blkcg *blkcg;
651+
struct blkcg_gq *blkg;
652+
bool throtl = false;
653+
654+
rcu_read_lock();
655+
blkcg = bio_blkcg(bio);
656+
657+
blkg = blkg_lookup(blkcg, q);
658+
if (unlikely(!blkg)) {
659+
spin_lock_irq(q->queue_lock);
660+
blkg = blkg_lookup_create(blkcg, q);
661+
if (IS_ERR(blkg))
662+
blkg = NULL;
663+
spin_unlock_irq(q->queue_lock);
664+
}
665+
666+
throtl = blk_throtl_bio(q, blkg, bio);
667+
668+
rcu_read_unlock();
669+
return !throtl;
670+
}
671+
639672
#else /* CONFIG_BLK_CGROUP */
640673

641674
struct blkcg {
@@ -689,6 +722,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
689722
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
690723
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
691724

725+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
726+
struct bio *bio) { return true; }
727+
692728
#define blk_queue_for_each_rl(rl, q) \
693729
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
694730

0 commit comments

Comments
 (0)