Skip to content

Commit 8f9bebc

Browse files
Algodev-githubaxboe
authored andcommitted
block, bfq: access and cache blkg data only when safe
In blk-cgroup, operations on blkg objects are protected with the request_queue lock. This is no more the lock that protects I/O-scheduler operations in blk-mq. In fact, the latter are now protected with a finer-grained per-scheduler-instance lock. As a consequence, although blkg lookups are also rcu-protected, blk-mq I/O schedulers may see inconsistent data when they access blkg and blkg-related objects. BFQ does access these objects, and does incur this problem, in the following case. The blkg_lookup performed in bfq_get_queue, being protected (only) through rcu, may happen to return the address of a copy of the original blkg. If this is the case, then the blkg_get performed in bfq_get_queue, to pin down the blkg, is useless: it does not prevent blk-cgroup code from destroying both the original blkg and all objects directly or indirectly referred by the copy of the blkg. BFQ accesses these objects, which typically causes a crash for NULL-pointer dereference of memory-protection violation. Some additional protection mechanism should be added to blk-cgroup to address this issue. In the meantime, this commit provides a quick temporary fix for BFQ: cache (when safe) blkg data that might disappear right after a blkg_lookup. In particular, this commit exploits the following facts to achieve its goal without introducing further locks. Destroy operations on a blkg invoke, as a first step, hooks of the scheduler associated with the blkg. And these hooks are executed with bfqd->lock held for BFQ. As a consequence, for any blkg associated with the request queue an instance of BFQ is attached to, we are guaranteed that such a blkg is not destroyed, and that all the pointers it contains are consistent, while that instance is holding its bfqd->lock. A blkg_lookup performed with bfqd->lock held then returns a fully consistent blkg, which remains consistent until this lock is held. In more detail, this holds even if the returned blkg is a copy of the original one. Finally, also the object describing a group inside BFQ needs to be protected from destruction on the blkg_free of the original blkg (which invokes bfq_pd_free). This commit adds private refcounting for this object, to let it disappear only after no bfq_queue refers to it any longer. This commit also removes or updates some stale comments on locking issues related to blk-cgroup operations. Reported-by: Tomas Konir <tomas.konir@gmail.com> Reported-by: Lee Tibbert <lee.tibbert@gmail.com> Reported-by: Marco Piazza <mpiazza@gmail.com> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Tested-by: Tomas Konir <tomas.konir@gmail.com> Tested-by: Lee Tibbert <lee.tibbert@gmail.com> Tested-by: Marco Piazza <mpiazza@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
1 parent 85d0331 commit 8f9bebc

File tree

3 files changed

+105
-36
lines changed

3 files changed

+105
-36
lines changed

block/bfq-cgroup.c

Lines changed: 93 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
5252
BFQG_FLAG_FNS(empty)
5353
#undef BFQG_FLAG_FNS
5454

55-
/* This should be called with the queue_lock held. */
55+
/* This should be called with the scheduler lock held. */
5656
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
5757
{
5858
unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
6767
bfqg_stats_clear_waiting(stats);
6868
}
6969

70-
/* This should be called with the queue_lock held. */
70+
/* This should be called with the scheduler lock held. */
7171
static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
7272
struct bfq_group *curr_bfqg)
7373
{
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
8181
bfqg_stats_mark_waiting(stats);
8282
}
8383

84-
/* This should be called with the queue_lock held. */
84+
/* This should be called with the scheduler lock held. */
8585
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
8686
{
8787
unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
203203

204204
static void bfqg_get(struct bfq_group *bfqg)
205205
{
206-
return blkg_get(bfqg_to_blkg(bfqg));
206+
bfqg->ref++;
207207
}
208208

209209
void bfqg_put(struct bfq_group *bfqg)
210210
{
211-
return blkg_put(bfqg_to_blkg(bfqg));
211+
bfqg->ref--;
212+
213+
if (bfqg->ref == 0)
214+
kfree(bfqg);
215+
}
216+
217+
static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218+
{
219+
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220+
bfqg_get(bfqg);
221+
222+
blkg_get(bfqg_to_blkg(bfqg));
223+
}
224+
225+
void bfqg_and_blkg_put(struct bfq_group *bfqg)
226+
{
227+
bfqg_put(bfqg);
228+
229+
blkg_put(bfqg_to_blkg(bfqg));
212230
}
213231

214232
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
312330
if (bfqq) {
313331
bfqq->ioprio = bfqq->new_ioprio;
314332
bfqq->ioprio_class = bfqq->new_ioprio_class;
315-
bfqg_get(bfqg);
333+
/*
334+
* Make sure that bfqg and its associated blkg do not
335+
* disappear before entity.
336+
*/
337+
bfqg_and_blkg_get(bfqg);
316338
}
317339
entity->parent = bfqg->my_entity; /* NULL for root group */
318340
entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
399421
return NULL;
400422
}
401423

424+
/* see comments in bfq_bic_update_cgroup for why refcounting */
425+
bfqg_get(bfqg);
402426
return &bfqg->pd;
403427
}
404428

@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
426450
struct bfq_group *bfqg = pd_to_bfqg(pd);
427451

428452
bfqg_stats_exit(&bfqg->stats);
429-
return kfree(bfqg);
453+
bfqg_put(bfqg);
430454
}
431455

432456
void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
496520
* Move @bfqq to @bfqg, deactivating it from its old group and reactivating
497521
* it on the new one. Avoid putting the entity on the old group idle tree.
498522
*
499-
* Must be called under the queue lock; the cgroup owning @bfqg must
500-
* not disappear (by now this just means that we are called under
501-
* rcu_read_lock()).
523+
* Must be called under the scheduler lock, to make sure that the blkg
524+
* owning @bfqg does not disappear (see comments in
525+
* bfq_bic_update_cgroup on guaranteeing the consistency of blkg
526+
* objects).
502527
*/
503528
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
504529
struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
519544
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
520545
else if (entity->on_st)
521546
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
522-
bfqg_put(bfqq_group(bfqq));
547+
bfqg_and_blkg_put(bfqq_group(bfqq));
523548

524-
/*
525-
* Here we use a reference to bfqg. We don't need a refcounter
526-
* as the cgroup reference will not be dropped, so that its
527-
* destroy() callback will not be invoked.
528-
*/
529549
entity->parent = bfqg->my_entity;
530550
entity->sched_data = &bfqg->sched_data;
531-
bfqg_get(bfqg);
551+
/* pin down bfqg and its associated blkg */
552+
bfqg_and_blkg_get(bfqg);
532553

533554
if (bfq_bfqq_busy(bfqq)) {
534555
bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
545566
* @bic: the bic to move.
546567
* @blkcg: the blk-cgroup to move to.
547568
*
548-
* Move bic to blkcg, assuming that bfqd->queue is locked; the caller
549-
* has to make sure that the reference to cgroup is valid across the call.
569+
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
570+
* sure that the reference to cgroup is valid across the call (see
571+
* comments in bfq_bic_update_cgroup on this issue)
550572
*
551573
* NOTE: an alternative approach might have been to store the current
552574
* cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
604626
goto out;
605627

606628
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
629+
/*
630+
* Update blkg_path for bfq_log_* functions. We cache this
631+
* path, and update it here, for the following
632+
* reasons. Operations on blkg objects in blk-cgroup are
633+
* protected with the request_queue lock, and not with the
634+
* lock that protects the instances of this scheduler
635+
* (bfqd->lock). This exposes BFQ to the following sort of
636+
* race.
637+
*
638+
* The blkg_lookup performed in bfq_get_queue, protected
639+
* through rcu, may happen to return the address of a copy of
640+
* the original blkg. If this is the case, then the
641+
* bfqg_and_blkg_get performed in bfq_get_queue, to pin down
642+
* the blkg, is useless: it does not prevent blk-cgroup code
643+
* from destroying both the original blkg and all objects
644+
* directly or indirectly referred by the copy of the
645+
* blkg.
646+
*
647+
* On the bright side, destroy operations on a blkg invoke, as
648+
* a first step, hooks of the scheduler associated with the
649+
* blkg. And these hooks are executed with bfqd->lock held for
650+
* BFQ. As a consequence, for any blkg associated with the
651+
* request queue this instance of the scheduler is attached
652+
* to, we are guaranteed that such a blkg is not destroyed, and
653+
* that all the pointers it contains are consistent, while we
654+
* are holding bfqd->lock. A blkg_lookup performed with
655+
* bfqd->lock held then returns a fully consistent blkg, which
656+
* remains consistent until this lock is held.
657+
*
658+
* Thanks to the last fact, and to the fact that: (1) bfqg has
659+
* been obtained through a blkg_lookup in the above
660+
* assignment, and (2) bfqd->lock is being held, here we can
661+
* safely use the policy data for the involved blkg (i.e., the
662+
* field bfqg->pd) to get to the blkg associated with bfqg,
663+
* and then we can safely use any field of blkg. After we
664+
* release bfqd->lock, even just getting blkg through this
665+
* bfqg may cause dangling references to be traversed, as
666+
* bfqg->pd may not exist any more.
667+
*
668+
* In view of the above facts, here we cache, in the bfqg, any
669+
* blkg data we may need for this bic, and for its associated
670+
* bfq_queue. As of now, we need to cache only the path of the
671+
* blkg, which is used in the bfq_log_* functions.
672+
*
673+
* Finally, note that bfqg itself needs to be protected from
674+
* destruction on the blkg_free of the original blkg (which
675+
* invokes bfq_pd_free). We use an additional private
676+
* refcounter for bfqg, to let it disappear only after no
677+
* bfq_queue refers to it any longer.
678+
*/
679+
blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
607680
bic->blkcg_serial_nr = serial_nr;
608681
out:
609682
rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
640713
* @bfqd: the device data structure with the root group.
641714
* @bfqg: the group to move from.
642715
* @st: the service tree with the entities.
643-
*
644-
* Needs queue_lock to be taken and reference to be valid over the call.
645716
*/
646717
static void bfq_reparent_active_entities(struct bfq_data *bfqd,
647718
struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
692763
/*
693764
* The idle tree may still contain bfq_queues belonging
694765
* to exited task because they never migrated to a different
695-
* cgroup from the one being destroyed now. No one else
696-
* can access them so it's safe to act without any lock.
766+
* cgroup from the one being destroyed now.
697767
*/
698768
bfq_flush_idle_tree(st);
699769

block/bfq-iosched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
36653665

36663666
kmem_cache_free(bfq_pool, bfqq);
36673667
#ifdef CONFIG_BFQ_GROUP_IOSCHED
3668-
bfqg_put(bfqg);
3668+
bfqg_and_blkg_put(bfqg);
36693669
#endif
36703670
}
36713671

block/bfq-iosched.h

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -759,6 +759,12 @@ struct bfq_group {
759759
/* must be the first member */
760760
struct blkg_policy_data pd;
761761

762+
/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
763+
char blkg_path[128];
764+
765+
/* reference counter (see comments in bfq_bic_update_cgroup) */
766+
int ref;
767+
762768
struct bfq_entity entity;
763769
struct bfq_sched_data sched_data;
764770

@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
838844
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
839845
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
840846
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
841-
void bfqg_put(struct bfq_group *bfqg);
847+
void bfqg_and_blkg_put(struct bfq_group *bfqg);
842848

843849
#ifdef CONFIG_BFQ_GROUP_IOSCHED
844850
extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
910916
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
911917

912918
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
913-
char __pbuf[128]; \
914-
\
915-
blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
916-
blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
919+
blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
917920
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
918-
__pbuf, ##args); \
921+
bfqq_group(bfqq)->blkg_path, ##args); \
919922
} while (0)
920923

921-
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
922-
char __pbuf[128]; \
923-
\
924-
blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
925-
blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
926-
} while (0)
924+
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
925+
blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
927926

928927
#else /* CONFIG_BFQ_GROUP_IOSCHED */
929928

0 commit comments

Comments
 (0)