Skip to content

Commit 56f570e

Browse files
paulturnerIngo Molnar
authored andcommitted
sched: Use jump labels to reduce overhead when bandwidth control is inactive
Now that the linkage of jump-labels has been fixed they show a measurable improvement in overhead for the enabled-but-unused case. Workload is: 'taskset -c 0 perf stat --repeat 50 -e instructions,cycles,branches bash -c "for ((i=0;i<5;i++)); do $(dirname $0)/pipe-test 20000; done"' There's a speedup for all situations: instructions cycles branches ------------------------------------------------------------------------- Intel Westmere base 806611770 745895590 146765378 +jumplabel 803090165 (-0.44%) 713381840 (-4.36%) 144561130 AMD Barcelona base 824657415 740055589 148855354 +jumplabel 821056910 (-0.44%) 737558389 (-0.34%) 146635229 Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20111108042736.560831357@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent fccfdc6 commit 56f570e

File tree

2 files changed

+43
-5
lines changed

2 files changed

+43
-5
lines changed

kernel/sched.c

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@
7272
#include <linux/ftrace.h>
7373
#include <linux/slab.h>
7474
#include <linux/init_task.h>
75+
#include <linux/jump_label.h>
7576

7677
#include <asm/tlb.h>
7778
#include <asm/irq_regs.h>
@@ -503,7 +504,32 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
503504
hrtimer_cancel(&cfs_b->period_timer);
504505
hrtimer_cancel(&cfs_b->slack_timer);
505506
}
506-
#else
507+
508+
#ifdef HAVE_JUMP_LABEL
509+
static struct jump_label_key __cfs_bandwidth_used;
510+
511+
static inline bool cfs_bandwidth_used(void)
512+
{
513+
return static_branch(&__cfs_bandwidth_used);
514+
}
515+
516+
static void account_cfs_bandwidth_used(int enabled, int was_enabled)
517+
{
518+
/* only need to count groups transitioning between enabled/!enabled */
519+
if (enabled && !was_enabled)
520+
jump_label_inc(&__cfs_bandwidth_used);
521+
else if (!enabled && was_enabled)
522+
jump_label_dec(&__cfs_bandwidth_used);
523+
}
524+
#else /* !HAVE_JUMP_LABEL */
525+
/* static_branch doesn't help unless supported */
526+
static int cfs_bandwidth_used(void)
527+
{
528+
return 1;
529+
}
530+
static void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
531+
#endif /* HAVE_JUMP_LABEL */
532+
#else /* !CONFIG_CFS_BANDWIDTH */
507533
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
508534
static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
509535
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
@@ -9203,7 +9229,7 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
92039229

92049230
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
92059231
{
9206-
int i, ret = 0, runtime_enabled;
9232+
int i, ret = 0, runtime_enabled, runtime_was_enabled;
92079233
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
92089234

92099235
if (tg == &root_task_group)
@@ -9231,6 +9257,9 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
92319257
goto out_unlock;
92329258

92339259
runtime_enabled = quota != RUNTIME_INF;
9260+
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9261+
account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
9262+
92349263
raw_spin_lock_irq(&cfs_b->lock);
92359264
cfs_b->period = ns_to_ktime(period);
92369265
cfs_b->quota = quota;

kernel/sched_fair.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1421,21 +1421,21 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
14211421
static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
14221422
unsigned long delta_exec)
14231423
{
1424-
if (!cfs_rq->runtime_enabled)
1424+
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
14251425
return;
14261426

14271427
__account_cfs_rq_runtime(cfs_rq, delta_exec);
14281428
}
14291429

14301430
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
14311431
{
1432-
return cfs_rq->throttled;
1432+
return cfs_bandwidth_used() && cfs_rq->throttled;
14331433
}
14341434

14351435
/* check whether cfs_rq, or any parent, is throttled */
14361436
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
14371437
{
1438-
return cfs_rq->throttle_count;
1438+
return cfs_bandwidth_used() && cfs_rq->throttle_count;
14391439
}
14401440

14411441
/*
@@ -1756,6 +1756,9 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
17561756

17571757
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
17581758
{
1759+
if (!cfs_bandwidth_used())
1760+
return;
1761+
17591762
if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
17601763
return;
17611764

@@ -1801,6 +1804,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
18011804
*/
18021805
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
18031806
{
1807+
if (!cfs_bandwidth_used())
1808+
return;
1809+
18041810
/* an active group must be handled by the update_curr()->put() path */
18051811
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
18061812
return;
@@ -1818,6 +1824,9 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
18181824
/* conditionally throttle active cfs_rq's from put_prev_entity() */
18191825
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
18201826
{
1827+
if (!cfs_bandwidth_used())
1828+
return;
1829+
18211830
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
18221831
return;
18231832

0 commit comments

Comments
 (0)