Skip to content

Commit e6e7214

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: cputime fixes, two deadline scheduler fixes and a cgroups scheduling fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cputime: Fix omitted ticks passed in parameter sched/cputime: Fix steal time accounting sched/deadline: Fix lock pinning warning during CPU hotplug sched/cputime: Mitigate performance regression in times()/clock_gettime() sched/fair: Fix typo in sync_throttle() sched/deadline: Fix wrap-around in DL heap
2 parents ad83242 + 26f2c75 commit e6e7214

File tree

5 files changed

+34
-4
lines changed

5 files changed

+34
-4
lines changed

kernel/sched/core.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@
7474
#include <linux/context_tracking.h>
7575
#include <linux/compiler.h>
7676
#include <linux/frame.h>
77+
#include <linux/prefetch.h>
7778

7879
#include <asm/switch_to.h>
7980
#include <asm/tlb.h>
@@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
29712972
EXPORT_PER_CPU_SYMBOL(kstat);
29722973
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
29732974

2975+
/*
2976+
* The function fair_sched_class.update_curr accesses the struct curr
2977+
* and its field curr->exec_start; when called from task_sched_runtime(),
2978+
* we observe a high rate of cache misses in practice.
2979+
* Prefetching this data results in improved performance.
2980+
*/
2981+
static inline void prefetch_curr_exec_start(struct task_struct *p)
2982+
{
2983+
#ifdef CONFIG_FAIR_GROUP_SCHED
2984+
struct sched_entity *curr = (&p->se)->cfs_rq->curr;
2985+
#else
2986+
struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
2987+
#endif
2988+
prefetch(curr);
2989+
prefetch(&curr->exec_start);
2990+
}
2991+
29742992
/*
29752993
* Return accounted runtime for the task.
29762994
* In case the task is currently running, return the runtime plus current's
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
30053023
* thread, breaking clock_gettime().
30063024
*/
30073025
if (task_current(rq, p) && task_on_rq_queued(p)) {
3026+
prefetch_curr_exec_start(p);
30083027
update_rq_clock(rq);
30093028
p->sched_class->update_curr(rq);
30103029
}

kernel/sched/cpudeadline.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
168168

169169
if (old_idx == IDX_INVALID) {
170170
cp->size++;
171-
cp->elements[cp->size - 1].dl = 0;
171+
cp->elements[cp->size - 1].dl = dl;
172172
cp->elements[cp->size - 1].cpu = cpu;
173173
cp->elements[cpu].idx = cp->size - 1;
174174
cpudl_change_key(cp, cp->size - 1, dl);

kernel/sched/cputime.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
508508
*/
509509
void account_idle_ticks(unsigned long ticks)
510510
{
511+
cputime_t cputime, steal;
511512

512513
if (sched_clock_irqtime) {
513514
irqtime_account_idle_ticks(ticks);
514515
return;
515516
}
516517

517-
account_idle_time(jiffies_to_cputime(ticks));
518+
cputime = jiffies_to_cputime(ticks);
519+
steal = steal_account_process_time(cputime);
520+
521+
if (steal >= cputime)
522+
return;
523+
524+
cputime -= steal;
525+
account_idle_time(cputime);
518526
}
519527

520528
/*

kernel/sched/deadline.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
658658
*
659659
* XXX figure out if select_task_rq_dl() deals with offline cpus.
660660
*/
661-
if (unlikely(!rq->online))
661+
if (unlikely(!rq->online)) {
662+
lockdep_unpin_lock(&rq->lock, rf.cookie);
662663
rq = dl_task_offline_migration(rq, p);
664+
rf.cookie = lockdep_pin_lock(&rq->lock);
665+
}
663666

664667
/*
665668
* Queueing this task back might have overloaded rq, check if we need

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
42694269
pcfs_rq = tg->parent->cfs_rq[cpu];
42704270

42714271
cfs_rq->throttle_count = pcfs_rq->throttle_count;
4272-
pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4272+
cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
42734273
}
42744274

42754275
/* conditionally throttle active cfs_rq's from put_prev_entity() */

0 commit comments

Comments
 (0)