Skip to content

Commit b5c0ce7

Browse files
derklingIngo Molnar
authored andcommitted
sched/fair: Add lsub_positive() and use it consistently
The following pattern: var -= min_t(typeof(var), var, val); is used multiple times in fair.c. The existing sub_positive() already captures that pattern, but it also adds an explicit load-store to properly support lockless observations. In other cases the pattern above is used to update local, and/or not concurrently accessed, variables. Let's add a simpler version of sub_positive(), targeted at local variables updates, which gives the same readability benefits at calling sites, without enforcing {READ,WRITE}_ONCE() barriers. Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Perret <quentin.perret@arm.com> Cc: Steve Muckle <smuckle@google.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Todd Kjos <tkjos@google.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/lkml/20181031184527.GA3178@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 92a801e commit b5c0ce7

File tree

1 file changed

+17
-7
lines changed

1 file changed

+17
-7
lines changed

kernel/sched/fair.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2734,6 +2734,17 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
27342734
WRITE_ONCE(*ptr, res); \
27352735
} while (0)
27362736

2737+
/*
2738+
* Remove and clamp on negative, from a local variable.
2739+
*
2740+
* A variant of sub_positive(), which does not use explicit load-store
2741+
* and is thus optimized for local variable updates.
2742+
*/
2743+
#define lsub_positive(_ptr, _val) do { \
2744+
typeof(_ptr) ptr = (_ptr); \
2745+
*ptr -= min_t(typeof(*ptr), *ptr, _val); \
2746+
} while (0)
2747+
27372748
#ifdef CONFIG_SMP
27382749
static inline void
27392750
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4639,7 +4650,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
46394650
cfs_b->distribute_running = 0;
46404651
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
46414652

4642-
cfs_b->runtime -= min(runtime, cfs_b->runtime);
4653+
lsub_positive(&cfs_b->runtime, runtime);
46434654
}
46444655

46454656
/*
@@ -4773,7 +4784,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
47734784

47744785
raw_spin_lock(&cfs_b->lock);
47754786
if (expires == cfs_b->runtime_expires)
4776-
cfs_b->runtime -= min(runtime, cfs_b->runtime);
4787+
lsub_positive(&cfs_b->runtime, runtime);
47774788
cfs_b->distribute_running = 0;
47784789
raw_spin_unlock(&cfs_b->lock);
47794790
}
@@ -6240,7 +6251,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
62406251
util = READ_ONCE(cfs_rq->avg.util_avg);
62416252

62426253
/* Discount task's util from CPU's util */
6243-
util -= min_t(unsigned int, util, task_util(p));
6254+
lsub_positive(&util, task_util(p));
62446255

62456256
/*
62466257
* Covered cases:
@@ -6289,10 +6300,9 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
62896300
* properly fix the execl regression and it helps in further
62906301
* reducing the chances for the above race.
62916302
*/
6292-
if (unlikely(task_on_rq_queued(p) || current == p)) {
6293-
estimated -= min_t(unsigned int, estimated,
6294-
_task_util_est(p));
6295-
}
6303+
if (unlikely(task_on_rq_queued(p) || current == p))
6304+
lsub_positive(&estimated, _task_util_est(p));
6305+
62966306
util = max(util, estimated);
62976307
}
62986308

0 commit comments

Comments
 (0)