Skip to content

Commit b5f2954

Browse files
dennisszhouaxboe
authored andcommitted
blkcg: revert blkcg cleanups series
This reverts a series committed earlier due to null pointer exception bug report in [1]. It seems there are edge case interactions that I did not consider and will need some time to understand what causes the adverse interactions. The original series can be found in [2] with a follow up series in [3]. [1] https://www.spinics.net/lists/cgroups/msg20719.html [2] https://lore.kernel.org/lkml/20180911184137.35897-1-dennisszhou@gmail.com/ [3] https://lore.kernel.org/lkml/20181020185612.51587-1-dennis@kernel.org/ This reverts the following commits: d459d85, b2c3fa5, 101246e, b3b9f24, e2b0989, f0fcb3e, c839e7a, bdc2491, 74b7c02, 5bf9a1f, a7b39b4, 07b05bc, 49f4c2d, 27e6fa9 Signed-off-by: Dennis Zhou <dennis@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 153fcd5 commit b5f2954

File tree

22 files changed

+208
-403
lines changed

22 files changed

+208
-403
lines changed

Documentation/admin-guide/cgroup-v2.rst

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1857,10 +1857,8 @@ following two functions.
18571857

18581858
wbc_init_bio(@wbc, @bio)
18591859
Should be called for each bio carrying writeback data and
1860-
associates the bio with the inode's owner cgroup and the
1861-
corresponding request queue. This must be called after
1862-
a queue (device) has been associated with the bio and
1863-
before submission.
1860+
associates the bio with the inode's owner cgroup. Can be
1861+
called anytime between bio allocation and submission.
18641862

18651863
wbc_account_io(@wbc, @page, @bytes)
18661864
Should be called for each data segment being written out.
@@ -1879,7 +1877,7 @@ the configuration, the bio may be executed at a lower priority and if
18791877
the writeback session is holding shared resources, e.g. a journal
18801878
entry, may lead to priority inversion. There is no one easy solution
18811879
for the problem. Filesystems can try to work around specific problem
1882-
cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
1880+
cases by skipping wbc_init_bio() or using bio_associate_blkcg()
18831881
directly.
18841882

18851883

block/bfq-cgroup.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
642642
uint64_t serial_nr;
643643

644644
rcu_read_lock();
645-
serial_nr = __bio_blkcg(bio)->css.serial_nr;
645+
serial_nr = bio_blkcg(bio)->css.serial_nr;
646646

647647
/*
648648
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
651651
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
652652
goto out;
653653

654-
bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
654+
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
655655
/*
656656
* Update blkg_path for bfq_log_* functions. We cache this
657657
* path, and update it here, for the following

block/bfq-iosched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4384,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
43844384

43854385
rcu_read_lock();
43864386

4387-
bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
4387+
bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
43884388
if (!bfqg) {
43894389
bfqq = &bfqd->oom_bfqq;
43904390
goto out;

block/bio.c

Lines changed: 47 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -609,9 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
609609
bio->bi_iter = bio_src->bi_iter;
610610
bio->bi_io_vec = bio_src->bi_io_vec;
611611

612-
bio_clone_blkg_association(bio, bio_src);
613-
614-
blkcg_bio_issue_init(bio);
612+
bio_clone_blkcg_association(bio, bio_src);
615613
}
616614
EXPORT_SYMBOL(__bio_clone_fast);
617615

@@ -1956,151 +1954,69 @@ EXPORT_SYMBOL(bioset_init_from_src);
19561954

19571955
#ifdef CONFIG_BLK_CGROUP
19581956

1959-
/**
1960-
* bio_associate_blkg - associate a bio with the a blkg
1961-
* @bio: target bio
1962-
* @blkg: the blkg to associate
1963-
*
1964-
* This tries to associate @bio with the specified blkg. Association failure
1965-
* is handled by walking up the blkg tree. Therefore, the blkg associated can
1966-
* be anything between @blkg and the root_blkg. This situation only happens
1967-
* when a cgroup is dying and then the remaining bios will spill to the closest
1968-
* alive blkg.
1969-
*
1970-
* A reference will be taken on the @blkg and will be released when @bio is
1971-
* freed.
1972-
*/
1973-
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
1974-
{
1975-
if (unlikely(bio->bi_blkg))
1976-
return -EBUSY;
1977-
bio->bi_blkg = blkg_tryget_closest(blkg);
1978-
return 0;
1979-
}
1980-
1981-
/**
1982-
* __bio_associate_blkg_from_css - internal blkg association function
1983-
*
1984-
* This in the core association function that all association paths rely on.
1985-
* A blkg reference is taken which is released upon freeing of the bio.
1986-
*/
1987-
static int __bio_associate_blkg_from_css(struct bio *bio,
1988-
struct cgroup_subsys_state *css)
1989-
{
1990-
struct request_queue *q = bio->bi_disk->queue;
1991-
struct blkcg_gq *blkg;
1992-
int ret;
1993-
1994-
rcu_read_lock();
1995-
1996-
if (!css || !css->parent)
1997-
blkg = q->root_blkg;
1998-
else
1999-
blkg = blkg_lookup_create(css_to_blkcg(css), q);
2000-
2001-
ret = bio_associate_blkg(bio, blkg);
2002-
2003-
rcu_read_unlock();
2004-
return ret;
2005-
}
2006-
2007-
/**
2008-
* bio_associate_blkg_from_css - associate a bio with a specified css
2009-
* @bio: target bio
2010-
* @css: target css
2011-
*
2012-
* Associate @bio with the blkg found by combining the css's blkg and the
2013-
* request_queue of the @bio. This falls back to the queue's root_blkg if
2014-
* the association fails with the css.
2015-
*/
2016-
int bio_associate_blkg_from_css(struct bio *bio,
2017-
struct cgroup_subsys_state *css)
2018-
{
2019-
if (unlikely(bio->bi_blkg))
2020-
return -EBUSY;
2021-
return __bio_associate_blkg_from_css(bio, css);
2022-
}
2023-
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2024-
20251957
#ifdef CONFIG_MEMCG
20261958
/**
2027-
* bio_associate_blkg_from_page - associate a bio with the page's blkg
1959+
* bio_associate_blkcg_from_page - associate a bio with the page's blkcg
20281960
* @bio: target bio
20291961
* @page: the page to lookup the blkcg from
20301962
*
2031-
* Associate @bio with the blkg from @page's owning memcg and the respective
2032-
* request_queue. If cgroup_e_css returns NULL, fall back to the queue's
2033-
* root_blkg.
2034-
*
2035-
* Note: this must be called after bio has an associated device.
1963+
* Associate @bio with the blkcg from @page's owning memcg. This works like
1964+
* every other associate function wrt references.
20361965
*/
2037-
int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
1966+
int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
20381967
{
2039-
struct cgroup_subsys_state *css;
2040-
int ret;
1968+
struct cgroup_subsys_state *blkcg_css;
20411969

2042-
if (unlikely(bio->bi_blkg))
1970+
if (unlikely(bio->bi_css))
20431971
return -EBUSY;
20441972
if (!page->mem_cgroup)
20451973
return 0;
2046-
2047-
rcu_read_lock();
2048-
2049-
css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2050-
2051-
ret = __bio_associate_blkg_from_css(bio, css);
2052-
2053-
rcu_read_unlock();
2054-
return ret;
1974+
blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
1975+
&io_cgrp_subsys);
1976+
bio->bi_css = blkcg_css;
1977+
return 0;
20551978
}
20561979
#endif /* CONFIG_MEMCG */
20571980

20581981
/**
2059-
* bio_associate_create_blkg - associate a bio with a blkg from q
2060-
* @q: request_queue where bio is going
1982+
* bio_associate_blkcg - associate a bio with the specified blkcg
20611983
* @bio: target bio
1984+
* @blkcg_css: css of the blkcg to associate
1985+
*
1986+
* Associate @bio with the blkcg specified by @blkcg_css. Block layer will
1987+
* treat @bio as if it were issued by a task which belongs to the blkcg.
20621988
*
2063-
* Associate @bio with the blkg found from the bio's css and the request_queue.
2064-
* If one is not found, bio_lookup_blkg creates the blkg. This falls back to
2065-
* the queue's root_blkg if association fails.
1989+
* This function takes an extra reference of @blkcg_css which will be put
1990+
* when @bio is released. The caller must own @bio and is responsible for
1991+
* synchronizing calls to this function.
20661992
*/
2067-
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
1993+
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
20681994
{
2069-
struct cgroup_subsys_state *css;
2070-
int ret = 0;
2071-
2072-
/* someone has already associated this bio with a blkg */
2073-
if (bio->bi_blkg)
2074-
return ret;
2075-
2076-
rcu_read_lock();
2077-
2078-
css = blkcg_css();
2079-
2080-
ret = __bio_associate_blkg_from_css(bio, css);
2081-
2082-
rcu_read_unlock();
2083-
return ret;
1995+
if (unlikely(bio->bi_css))
1996+
return -EBUSY;
1997+
css_get(blkcg_css);
1998+
bio->bi_css = blkcg_css;
1999+
return 0;
20842000
}
2001+
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
20852002

20862003
/**
2087-
* bio_reassociate_blkg - reassociate a bio with a blkg from q
2088-
* @q: request_queue where bio is going
2004+
* bio_associate_blkg - associate a bio with the specified blkg
20892005
* @bio: target bio
2006+
* @blkg: the blkg to associate
20902007
*
2091-
* When submitting a bio, multiple recursive calls to make_request() may occur.
2092-
* This causes the initial associate done in blkcg_bio_issue_check() to be
2093-
* incorrect and reference the prior request_queue. This performs reassociation
2094-
* when this situation happens.
2008+
* Associate @bio with the blkg specified by @blkg. This is the queue specific
2009+
* blkcg information associated with the @bio, a reference will be taken on the
2010+
* @blkg and will be freed when the bio is freed.
20952011
*/
2096-
int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
2012+
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
20972013
{
2098-
if (bio->bi_blkg) {
2099-
blkg_put(bio->bi_blkg);
2100-
bio->bi_blkg = NULL;
2101-
}
2102-
2103-
return bio_associate_create_blkg(q, bio);
2014+
if (unlikely(bio->bi_blkg))
2015+
return -EBUSY;
2016+
if (!blkg_try_get(blkg))
2017+
return -ENODEV;
2018+
bio->bi_blkg = blkg;
2019+
return 0;
21042020
}
21052021

21062022
/**
@@ -2113,23 +2029,27 @@ void bio_disassociate_task(struct bio *bio)
21132029
put_io_context(bio->bi_ioc);
21142030
bio->bi_ioc = NULL;
21152031
}
2032+
if (bio->bi_css) {
2033+
css_put(bio->bi_css);
2034+
bio->bi_css = NULL;
2035+
}
21162036
if (bio->bi_blkg) {
21172037
blkg_put(bio->bi_blkg);
21182038
bio->bi_blkg = NULL;
21192039
}
21202040
}
21212041

21222042
/**
2123-
* bio_clone_blkg_association - clone blkg association from src to dst bio
2043+
* bio_clone_blkcg_association - clone blkcg association from src to dst bio
21242044
* @dst: destination bio
21252045
* @src: source bio
21262046
*/
2127-
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
2047+
void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
21282048
{
2129-
if (src->bi_blkg)
2130-
bio_associate_blkg(dst, src->bi_blkg);
2049+
if (src->bi_css)
2050+
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
21312051
}
2132-
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2052+
EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
21332053
#endif /* CONFIG_BLK_CGROUP */
21342054

21352055
static void __init biovec_init_slabs(void)

0 commit comments

Comments
 (0)