Skip to content

Commit 4c69948

Browse files
josephhzaxboe
authored andcommitted
blk-throttle: fix race between blkcg_bio_issue_check() and cgroup_rmdir()
We've triggered a WARNING in blk_throtl_bio() when throttling writeback io, which complains blkg->refcnt is already 0 when calling blkg_get(), and then kernel crashes with invalid page request. After investigating this issue, we've found it is caused by a race between blkcg_bio_issue_check() and cgroup_rmdir(), which is described below: writeback kworker cgroup_rmdir cgroup_destroy_locked kill_css css_killed_ref_fn css_killed_work_fn offline_css blkcg_css_offline blkcg_bio_issue_check rcu_read_lock blkg_lookup spin_trylock(q->queue_lock) blkg_destroy spin_unlock(q->queue_lock) blk_throtl_bio spin_lock_irq(q->queue_lock) ... spin_unlock_irq(q->queue_lock) rcu_read_unlock Since rcu can only prevent blkg from releasing when it is being used, the blkg->refcnt can be decreased to 0 during blkg_destroy() and schedule blkg release. Then trying to blkg_get() in blk_throtl_bio() will complains the WARNING. And then the corresponding blkg_put() will schedule blkg release again, which result in double free. This race is introduced by commit ae11889 ("blkcg: consolidate blkg creation in blkcg_bio_issue_check()"). Before this commit, it will lookup first and then try to lookup/create again with queue_lock. Since revive this logic is a bit drastic, so fix it by only offlining pd during blkcg_css_offline(), and move the rest destruction (especially blkg_put()) into blkcg_css_free(), which should be the right way as discussed. Fixes: ae11889 ("blkcg: consolidate blkg creation in blkcg_bio_issue_check()") Reported-by: Jiufei Xue <jiufei.xue@linux.alibaba.com> Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 5f990d3 commit 4c69948

File tree

2 files changed

+63
-16
lines changed

2 files changed

+63
-16
lines changed

block/blk-cgroup.c

Lines changed: 62 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -307,11 +307,28 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
307307
}
308308
}
309309

310+
static void blkg_pd_offline(struct blkcg_gq *blkg)
311+
{
312+
int i;
313+
314+
lockdep_assert_held(blkg->q->queue_lock);
315+
lockdep_assert_held(&blkg->blkcg->lock);
316+
317+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
318+
struct blkcg_policy *pol = blkcg_policy[i];
319+
320+
if (blkg->pd[i] && !blkg->pd[i]->offline &&
321+
pol->pd_offline_fn) {
322+
pol->pd_offline_fn(blkg->pd[i]);
323+
blkg->pd[i]->offline = true;
324+
}
325+
}
326+
}
327+
310328
static void blkg_destroy(struct blkcg_gq *blkg)
311329
{
312330
struct blkcg *blkcg = blkg->blkcg;
313331
struct blkcg_gq *parent = blkg->parent;
314-
int i;
315332

316333
lockdep_assert_held(blkg->q->queue_lock);
317334
lockdep_assert_held(&blkcg->lock);
@@ -320,13 +337,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
320337
WARN_ON_ONCE(list_empty(&blkg->q_node));
321338
WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
322339

323-
for (i = 0; i < BLKCG_MAX_POLS; i++) {
324-
struct blkcg_policy *pol = blkcg_policy[i];
325-
326-
if (blkg->pd[i] && pol->pd_offline_fn)
327-
pol->pd_offline_fn(blkg->pd[i]);
328-
}
329-
330340
if (parent) {
331341
blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
332342
blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -369,6 +379,7 @@ static void blkg_destroy_all(struct request_queue *q)
369379
struct blkcg *blkcg = blkg->blkcg;
370380

371381
spin_lock(&blkcg->lock);
382+
blkg_pd_offline(blkg);
372383
blkg_destroy(blkg);
373384
spin_unlock(&blkcg->lock);
374385
}
@@ -995,25 +1006,25 @@ static struct cftype blkcg_legacy_files[] = {
9951006
* @css: css of interest
9961007
*
9971008
* This function is called when @css is about to go away and responsible
998-
* for shooting down all blkgs associated with @css. blkgs should be
999-
* removed while holding both q and blkcg locks. As blkcg lock is nested
1000-
* inside q lock, this function performs reverse double lock dancing.
1009+
* for offlining all blkgs pd and killing all wbs associated with @css.
1010+
* blkgs pd offline should be done while holding both q and blkcg locks.
1011+
* As blkcg lock is nested inside q lock, this function performs reverse
1012+
* double lock dancing.
10011013
*
10021014
* This is the blkcg counterpart of ioc_release_fn().
10031015
*/
10041016
static void blkcg_css_offline(struct cgroup_subsys_state *css)
10051017
{
10061018
struct blkcg *blkcg = css_to_blkcg(css);
1019+
struct blkcg_gq *blkg;
10071020

10081021
spin_lock_irq(&blkcg->lock);
10091022

1010-
while (!hlist_empty(&blkcg->blkg_list)) {
1011-
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1012-
struct blkcg_gq, blkcg_node);
1023+
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
10131024
struct request_queue *q = blkg->q;
10141025

10151026
if (spin_trylock(q->queue_lock)) {
1016-
blkg_destroy(blkg);
1027+
blkg_pd_offline(blkg);
10171028
spin_unlock(q->queue_lock);
10181029
} else {
10191030
spin_unlock_irq(&blkcg->lock);
@@ -1027,11 +1038,43 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
10271038
wb_blkcg_offline(blkcg);
10281039
}
10291040

1041+
/**
1042+
* blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
1043+
* @blkcg: blkcg of interest
1044+
*
1045+
* This function is called when blkcg css is about to free and responsible for
1046+
* destroying all blkgs associated with @blkcg.
1047+
* blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1048+
* is nested inside q lock, this function performs reverse double lock dancing.
1049+
*/
1050+
static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
1051+
{
1052+
spin_lock_irq(&blkcg->lock);
1053+
while (!hlist_empty(&blkcg->blkg_list)) {
1054+
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1055+
struct blkcg_gq,
1056+
blkcg_node);
1057+
struct request_queue *q = blkg->q;
1058+
1059+
if (spin_trylock(q->queue_lock)) {
1060+
blkg_destroy(blkg);
1061+
spin_unlock(q->queue_lock);
1062+
} else {
1063+
spin_unlock_irq(&blkcg->lock);
1064+
cpu_relax();
1065+
spin_lock_irq(&blkcg->lock);
1066+
}
1067+
}
1068+
spin_unlock_irq(&blkcg->lock);
1069+
}
1070+
10301071
static void blkcg_css_free(struct cgroup_subsys_state *css)
10311072
{
10321073
struct blkcg *blkcg = css_to_blkcg(css);
10331074
int i;
10341075

1076+
blkcg_destroy_all_blkgs(blkcg);
1077+
10351078
mutex_lock(&blkcg_pol_mutex);
10361079

10371080
list_del(&blkcg->all_blkcgs_node);
@@ -1371,8 +1414,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
13711414
spin_lock(&blkg->blkcg->lock);
13721415

13731416
if (blkg->pd[pol->plid]) {
1374-
if (pol->pd_offline_fn)
1417+
if (!blkg->pd[pol->plid]->offline &&
1418+
pol->pd_offline_fn) {
13751419
pol->pd_offline_fn(blkg->pd[pol->plid]);
1420+
blkg->pd[pol->plid]->offline = true;
1421+
}
13761422
pol->pd_free_fn(blkg->pd[pol->plid]);
13771423
blkg->pd[pol->plid] = NULL;
13781424
}

include/linux/blk-cgroup.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ struct blkg_policy_data {
8888
/* the blkg and policy id this per-policy data belongs to */
8989
struct blkcg_gq *blkg;
9090
int plid;
91+
bool offline;
9192
};
9293

9394
/*

0 commit comments

Comments
 (0)