Skip to content

Commit a7b39b4

Browse files
dennisszhouaxboe
authored andcommitted
blkcg: always associate a bio with a blkg
Previously, blkg's were only assigned as needed by blk-iolatency and blk-throttle. bio->css was also always being associated while blkg was being looked up and then thrown away in blkcg_bio_issue_check. This patch begins the cleanup of bio->css and bio->bi_blkg by always associating a blkg in blkcg_bio_issue_check. This tries to create the blkg, but if it is not possible, falls back to using the root_blkg of the request_queue. Therefore, a bio will always be associated with a blkg. The duplicate association logic is removed from blk-throttle and blk-iolatency. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 07b05bc commit a7b39b4

File tree

5 files changed

+46
-40
lines changed

5 files changed

+46
-40
lines changed

block/bio.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2028,6 +2028,41 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
20282028
return 0;
20292029
}
20302030

2031+
/**
2032+
* bio_associate_create_blkg - associate a bio with a blkg from q
2033+
* @q: request_queue where bio is going
2034+
* @bio: target bio
2035+
*
2036+
* Associate @bio with the blkg found from the bio's css and the request_queue.
2037+
* If one is not found, bio_lookup_blkg creates the blkg.
2038+
*/
2039+
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
2040+
{
2041+
struct blkcg *blkcg;
2042+
struct blkcg_gq *blkg;
2043+
int ret = 0;
2044+
2045+
/* someone has already associated this bio with a blkg */
2046+
if (bio->bi_blkg)
2047+
return ret;
2048+
2049+
rcu_read_lock();
2050+
2051+
bio_associate_blkcg(bio, NULL);
2052+
blkcg = bio_blkcg(bio);
2053+
2054+
if (!blkcg->css.parent) {
2055+
ret = bio_associate_blkg(bio, q->root_blkg);
2056+
} else {
2057+
blkg = blkg_lookup_create(blkcg, q);
2058+
2059+
ret = bio_associate_blkg(bio, blkg);
2060+
}
2061+
2062+
rcu_read_unlock();
2063+
return ret;
2064+
}
2065+
20312066
/**
20322067
* bio_disassociate_task - undo bio_associate_current()
20332068
* @bio: target bio
@@ -2057,6 +2092,9 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
20572092
{
20582093
if (src->bi_css)
20592094
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2095+
2096+
if (src->bi_blkg)
2097+
bio_associate_blkg(dst, src->bi_blkg);
20602098
}
20612099
EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
20622100
#endif /* CONFIG_BLK_CGROUP */

block/blk-iolatency.c

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -392,34 +392,14 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
392392
spinlock_t *lock)
393393
{
394394
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
395-
struct blkcg *blkcg;
396-
struct blkcg_gq *blkg;
397-
struct request_queue *q = rqos->q;
395+
struct blkcg_gq *blkg = bio->bi_blkg;
398396
bool issue_as_root = bio_issue_as_root_blkg(bio);
399397

400398
if (!blk_iolatency_enabled(blkiolat))
401399
return;
402400

403-
rcu_read_lock();
404-
bio_associate_blkcg(bio, NULL);
405-
blkcg = bio_blkcg(bio);
406-
blkg = blkg_lookup(blkcg, q);
407-
if (unlikely(!blkg)) {
408-
if (!lock)
409-
spin_lock_irq(q->queue_lock);
410-
blkg = __blkg_lookup_create(blkcg, q);
411-
if (IS_ERR(blkg))
412-
blkg = NULL;
413-
if (!lock)
414-
spin_unlock_irq(q->queue_lock);
415-
}
416-
if (!blkg)
417-
goto out;
418-
419401
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
420-
bio_associate_blkg(bio, blkg);
421-
out:
422-
rcu_read_unlock();
402+
423403
while (blkg && blkg->parent) {
424404
struct iolatency_grp *iolat = blkg_to_lat(blkg);
425405
if (!iolat) {

block/blk-throttle.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2118,9 +2118,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
21182118
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
21192119
{
21202120
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2121-
/* fallback to root_blkg if we fail to get a blkg ref */
2122-
if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
2123-
bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
21242121
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
21252122
#endif
21262123
}
@@ -2129,7 +2126,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
21292126
struct bio *bio)
21302127
{
21312128
struct throtl_qnode *qn = NULL;
2132-
struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2129+
struct throtl_grp *tg = blkg_to_tg(blkg);
21332130
struct throtl_service_queue *sq;
21342131
bool rw = bio_data_dir(bio);
21352132
bool throttled = false;

include/linux/bio.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -542,11 +542,14 @@ static inline int bio_associate_blkcg_from_page(struct bio *bio,
542542
#ifdef CONFIG_BLK_CGROUP
543543
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
544544
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
545+
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
545546
void bio_disassociate_task(struct bio *bio);
546547
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
547548
#else /* CONFIG_BLK_CGROUP */
548549
static inline int bio_associate_blkcg(struct bio *bio,
549550
struct cgroup_subsys_state *blkcg_css) { return 0; }
551+
static inline int bio_associate_create_blkg(struct request_queue *q,
552+
struct bio *bio) { return 0; }
550553
static inline void bio_disassociate_task(struct bio *bio) { }
551554
static inline void bio_clone_blkcg_association(struct bio *dst,
552555
struct bio *src) { }

include/linux/blk-cgroup.h

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -900,29 +900,17 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
900900
static inline bool blkcg_bio_issue_check(struct request_queue *q,
901901
struct bio *bio)
902902
{
903-
struct blkcg *blkcg;
904903
struct blkcg_gq *blkg;
905904
bool throtl = false;
906905

907906
rcu_read_lock();
908907

909-
/* associate blkcg if bio hasn't attached one */
910-
bio_associate_blkcg(bio, NULL);
911-
blkcg = bio_blkcg(bio);
912-
913-
blkg = blkg_lookup(blkcg, q);
914-
if (unlikely(!blkg)) {
915-
spin_lock_irq(q->queue_lock);
916-
blkg = __blkg_lookup_create(blkcg, q);
917-
if (IS_ERR(blkg))
918-
blkg = NULL;
919-
spin_unlock_irq(q->queue_lock);
920-
}
908+
bio_associate_create_blkg(q, bio);
909+
blkg = bio->bi_blkg;
921910

922911
throtl = blk_throtl_bio(q, blkg, bio);
923912

924913
if (!throtl) {
925-
blkg = blkg ?: q->root_blkg;
926914
/*
927915
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
928916
* is a split bio and we would have already accounted for the

0 commit comments

Comments
 (0)