Skip to content

Commit acb0405

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/clock: Fix hotplug crash
Mike reported that he could trigger the WARN_ON_ONCE() in set_sched_clock_stable() using hotplug. This exposed a fundamental problem with the interface, we should never mark the TSC stable if we ever find it to be unstable. Therefore set_sched_clock_stable() is a broken interface. The reason it existed is that not having it is a pain, it means all relevant architecture code needs to call clear_sched_clock_stable() where appropriate. Of the three architectures that select HAVE_UNSTABLE_SCHED_CLOCK ia64 and parisc are trivial in that they never called set_sched_clock_stable(), so add an unconditional call to clear_sched_clock_stable() to them. For x86 the story is a lot more involved, and what this patch tries to do is ensure we preserve the status quo. So even is Cyrix or Transmeta have usable TSC they never called set_sched_clock_stable() so they now get an explicit mark unstable. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: 9881b02 ("sched/clock: Delay switching sched_clock to stable") Link: http://lkml.kernel.org/r/20170119133633.GB6536@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 02cfdc9 commit acb0405

File tree

11 files changed

+33
-29
lines changed

11 files changed

+33
-29
lines changed

arch/ia64/kernel/setup.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,8 @@ setup_arch (char **cmdline_p)
619619
check_sal_cache_flush();
620620
#endif
621621
paging_init();
622+
623+
clear_sched_clock_stable();
622624
}
623625

624626
/*

arch/parisc/kernel/setup.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#undef PCI_DEBUG
3737
#include <linux/proc_fs.h>
3838
#include <linux/export.h>
39+
#include <linux/sched.h>
3940

4041
#include <asm/processor.h>
4142
#include <asm/sections.h>
@@ -176,6 +177,7 @@ void __init setup_arch(char **cmdline_p)
176177
conswitchp = &dummy_con; /* we use do_take_over_console() later ! */
177178
#endif
178179

180+
clear_sched_clock_stable();
179181
}
180182

181183
/*

arch/x86/kernel/cpu/amd.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -548,8 +548,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
548548
if (c->x86_power & (1 << 8)) {
549549
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
550550
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
551-
if (!check_tsc_unstable())
552-
set_sched_clock_stable();
551+
if (check_tsc_unstable())
552+
clear_sched_clock_stable();
553+
} else {
554+
clear_sched_clock_stable();
553555
}
554556

555557
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */

arch/x86/kernel/cpu/centaur.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
#include <linux/bitops.h>
2-
#include <linux/kernel.h>
1+
2+
#include <linux/sched.h>
33

44
#include <asm/cpufeature.h>
55
#include <asm/e820.h>
@@ -104,6 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
104104
#ifdef CONFIG_X86_64
105105
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
106106
#endif
107+
108+
clear_sched_clock_stable();
107109
}
108110

109111
static void init_centaur(struct cpuinfo_x86 *c)

arch/x86/kernel/cpu/common.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ static void default_init(struct cpuinfo_x86 *c)
8383
strcpy(c->x86_model_id, "386");
8484
}
8585
#endif
86+
clear_sched_clock_stable();
8687
}
8788

8889
static const struct cpu_dev default_cpu = {
@@ -1055,6 +1056,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
10551056
*/
10561057
if (this_cpu->c_init)
10571058
this_cpu->c_init(c);
1059+
else
1060+
clear_sched_clock_stable();
10581061

10591062
/* Disable the PN if appropriate */
10601063
squash_the_stupid_serial_number(c);

arch/x86/kernel/cpu/cyrix.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <asm/pci-direct.h>
1010
#include <asm/tsc.h>
1111
#include <asm/cpufeature.h>
12+
#include <linux/sched.h>
1213

1314
#include "cpu.h"
1415

@@ -183,6 +184,7 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
183184
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184185
break;
185186
}
187+
clear_sched_clock_stable();
186188
}
187189

188190
static void init_cyrix(struct cpuinfo_x86 *c)

arch/x86/kernel/cpu/intel.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
124124
if (c->x86_power & (1 << 8)) {
125125
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
126126
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
127-
if (!check_tsc_unstable())
128-
set_sched_clock_stable();
127+
if (check_tsc_unstable())
128+
clear_sched_clock_stable();
129+
} else {
130+
clear_sched_clock_stable();
129131
}
130132

131133
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */

arch/x86/kernel/cpu/transmeta.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include <linux/kernel.h>
2+
#include <linux/sched.h>
23
#include <linux/mm.h>
34
#include <asm/cpufeature.h>
45
#include <asm/msr.h>
@@ -14,6 +15,8 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
1415
if (xlvl >= 0x80860001)
1516
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
1617
}
18+
19+
clear_sched_clock_stable();
1720
}
1821

1922
static void init_transmeta(struct cpuinfo_x86 *c)

arch/x86/kernel/kvmclock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,12 @@ static inline void kvm_sched_clock_init(bool stable)
107107
{
108108
if (!stable) {
109109
pv_time_ops.sched_clock = kvm_clock_read;
110+
clear_sched_clock_stable();
110111
return;
111112
}
112113

113114
kvm_sched_clock_offset = kvm_clock_read();
114115
pv_time_ops.sched_clock = kvm_sched_clock_read;
115-
set_sched_clock_stable();
116116

117117
printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
118118
kvm_sched_clock_offset);

include/linux/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2547,7 +2547,6 @@ extern void sched_clock_init_late(void);
25472547
* is reliable after all:
25482548
*/
25492549
extern int sched_clock_stable(void);
2550-
extern void set_sched_clock_stable(void);
25512550
extern void clear_sched_clock_stable(void);
25522551

25532552
extern void sched_clock_tick(void);

kernel/sched/clock.c

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,15 @@ void sched_clock_init(void)
8383
}
8484

8585
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86+
/*
87+
* We must start with !__sched_clock_stable because the unstable -> stable
88+
* transition is accurate, while the stable -> unstable transition is not.
89+
*
90+
* Similarly we start with __sched_clock_stable_early, thereby assuming we
91+
* will become stable, such that there's only a single 1 -> 0 transition.
92+
*/
8693
static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
87-
static int __sched_clock_stable_early;
94+
static int __sched_clock_stable_early = 1;
8895

8996
/*
9097
* We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
@@ -132,24 +139,6 @@ static void __set_sched_clock_stable(void)
132139
tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
133140
}
134141

135-
void set_sched_clock_stable(void)
136-
{
137-
__sched_clock_stable_early = 1;
138-
139-
smp_mb(); /* matches sched_clock_init_late() */
140-
141-
/*
142-
* This really should only be called early (before
143-
* sched_clock_init_late()) when guestimating our sched_clock() is
144-
* solid.
145-
*
146-
* After that we test stability and we can negate our guess using
147-
* clear_sched_clock_stable, possibly from a watchdog.
148-
*/
149-
if (WARN_ON_ONCE(sched_clock_running == 2))
150-
__set_sched_clock_stable();
151-
}
152-
153142
static void __clear_sched_clock_stable(struct work_struct *work)
154143
{
155144
struct sched_clock_data *scd = this_scd();
@@ -199,8 +188,6 @@ void sched_clock_init_late(void)
199188

200189
if (__sched_clock_stable_early)
201190
__set_sched_clock_stable();
202-
else
203-
__clear_sched_clock_stable(NULL);
204191
}
205192

206193
/*

0 commit comments

Comments
 (0)