Skip to content

Commit 13e091b

Browse files
committed
Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 timer updates from Thomas Gleixner: "Early TSC based time stamping to allow better boot time analysis. This comes with a general cleanup of the TSC calibration code which grew warts and duct taping over the years and removes 250 lines of code. Initiated and mostly implemented by Pavel with help from various folks" * 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits) x86/kvmclock: Mark kvm_get_preset_lpj() as __init x86/tsc: Consolidate init code sched/clock: Disable interrupts when calling generic_sched_clock_init() timekeeping: Prevent false warning when persistent clock is not available sched/clock: Close a hole in sched_clock_init() x86/tsc: Make use of tsc_calibrate_cpu_early() x86/tsc: Split native_calibrate_cpu() into early and late parts sched/clock: Use static key for sched_clock_running sched/clock: Enable sched clock early sched/clock: Move sched clock initialization and merge with generic clock x86/tsc: Use TSC as sched clock early x86/tsc: Initialize cyc2ns when tsc frequency is determined x86/tsc: Calibrate tsc only once ARM/time: Remove read_boot_clock64() s390/time: Remove read_boot_clock64() timekeeping: Default boot time offset to local_clock() timekeeping: Replace read_boot_clock64() with read_persistent_wall_and_boot_offset() s390/time: Add read_persistent_wall_and_boot_offset() x86/xen/time: Output xen sched_clock time from 0 x86/xen/time: Initialize pv xen time in init_hypervisor_platform() ...
2 parents eac3411 + 1088c6e commit 13e091b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+508
-754
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2835,8 +2835,6 @@
28352835

28362836
nosync [HW,M68K] Disables sync negotiation for all devices.
28372837

2838-
notsc [BUGS=X86-32] Disable Time Stamp Counter
2839-
28402838
nowatchdog [KNL] Disable both lockup detectors, i.e.
28412839
soft-lockup and NMI watchdog (hard-lockup).
28422840

Documentation/x86/x86_64/boot-options.txt

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,7 @@ APICs
9292
Timing
9393

9494
notsc
95-
Don't use the CPU time stamp counter to read the wall time.
96-
This can be used to work around timing problems on multiprocessor systems
97-
with not properly synchronized CPUs.
95+
Deprecated, use tsc=unstable instead.
9896

9997
nohpet
10098
Don't use the HPET timer.

arch/arm/include/asm/mach/time.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
extern void timer_tick(void);
1414

1515
typedef void (*clock_access_fn)(struct timespec64 *);
16-
extern int register_persistent_clock(clock_access_fn read_boot,
17-
clock_access_fn read_persistent);
16+
extern int register_persistent_clock(clock_access_fn read_persistent);
1817

1918
#endif

arch/arm/kernel/time.c

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
8383
}
8484

8585
static clock_access_fn __read_persistent_clock = dummy_clock_access;
86-
static clock_access_fn __read_boot_clock = dummy_clock_access;
8786

8887
void read_persistent_clock64(struct timespec64 *ts)
8988
{
9089
__read_persistent_clock(ts);
9190
}
9291

93-
void read_boot_clock64(struct timespec64 *ts)
94-
{
95-
__read_boot_clock(ts);
96-
}
97-
98-
int __init register_persistent_clock(clock_access_fn read_boot,
99-
clock_access_fn read_persistent)
92+
int __init register_persistent_clock(clock_access_fn read_persistent)
10093
{
10194
/* Only allow the clockaccess functions to be registered once */
102-
if (__read_persistent_clock == dummy_clock_access &&
103-
__read_boot_clock == dummy_clock_access) {
104-
if (read_boot)
105-
__read_boot_clock = read_boot;
95+
if (__read_persistent_clock == dummy_clock_access) {
10696
if (read_persistent)
10797
__read_persistent_clock = read_persistent;
108-
10998
return 0;
11099
}
111100

arch/arm/plat-omap/counter_32k.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
110110
}
111111

112112
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
113-
register_persistent_clock(NULL, omap_read_persistent_clock64);
113+
register_persistent_clock(omap_read_persistent_clock64);
114114
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
115115

116116
return 0;

arch/s390/kernel/time.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
221221
ext_to_timespec64(clk, ts);
222222
}
223223

224-
void read_boot_clock64(struct timespec64 *ts)
224+
void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
225+
struct timespec64 *boot_offset)
225226
{
226227
unsigned char clk[STORE_CLOCK_EXT_SIZE];
228+
struct timespec64 boot_time;
227229
__u64 delta;
228230

229231
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
230-
memcpy(clk, tod_clock_base, 16);
231-
*(__u64 *) &clk[1] -= delta;
232-
if (*(__u64 *) &clk[1] > delta)
232+
memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
233+
*(__u64 *)&clk[1] -= delta;
234+
if (*(__u64 *)&clk[1] > delta)
233235
clk[0]--;
234-
ext_to_timespec64(clk, ts);
236+
ext_to_timespec64(clk, &boot_time);
237+
238+
read_persistent_clock64(wall_time);
239+
*boot_offset = timespec64_sub(*wall_time, boot_time);
235240
}
236241

237242
static u64 read_tod_clock(struct clocksource *cs)

arch/x86/include/asm/intel-family.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,4 +76,17 @@
7676
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
7777
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
7878

79+
/* Useful macros */
80+
#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
81+
{ \
82+
.vendor = X86_VENDOR_INTEL, \
83+
.family = _family, \
84+
.model = _model, \
85+
.feature = X86_FEATURE_ANY, \
86+
.driver_data = (kernel_ulong_t)&_driver_data \
87+
}
88+
89+
#define INTEL_CPU_FAM6(_model, _driver_data) \
90+
INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
91+
7992
#endif /* _ASM_X86_INTEL_FAMILY_H */

arch/x86/include/asm/intel-mid.h

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -80,35 +80,6 @@ enum intel_mid_cpu_type {
8080

8181
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
8282

83-
/**
84-
* struct intel_mid_ops - Interface between intel-mid & sub archs
85-
* @arch_setup: arch_setup function to re-initialize platform
86-
* structures (x86_init, x86_platform_init)
87-
*
88-
* This structure can be extended if any new interface is required
89-
* between intel-mid & its sub arch files.
90-
*/
91-
struct intel_mid_ops {
92-
void (*arch_setup)(void);
93-
};
94-
95-
/* Helper API's for INTEL_MID_OPS_INIT */
96-
#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
97-
[cpuid] = get_##cpuname##_ops
98-
99-
/* Maximum number of CPU ops */
100-
#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
101-
102-
/*
103-
* For every new cpu addition, a weak get_<cpuname>_ops() function needs be
104-
* declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
105-
*/
106-
#define INTEL_MID_OPS_INIT { \
107-
DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
108-
DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
109-
DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
110-
};
111-
11283
#ifdef CONFIG_X86_INTEL_MID
11384

11485
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -136,20 +107,6 @@ enum intel_mid_timer_options {
136107

137108
extern enum intel_mid_timer_options intel_mid_timer_options;
138109

139-
/*
140-
* Penwell uses spread spectrum clock, so the freq number is not exactly
141-
* the same as reported by MSR based on SDM.
142-
*/
143-
#define FSB_FREQ_83SKU 83200
144-
#define FSB_FREQ_100SKU 99840
145-
#define FSB_FREQ_133SKU 133000
146-
147-
#define FSB_FREQ_167SKU 167000
148-
#define FSB_FREQ_200SKU 200000
149-
#define FSB_FREQ_267SKU 267000
150-
#define FSB_FREQ_333SKU 333000
151-
#define FSB_FREQ_400SKU 400000
152-
153110
/* Bus Select SoC Fuse value */
154111
#define BSEL_SOC_FUSE_MASK 0x7
155112
/* FSB 133MHz */

arch/x86/include/asm/kvm_guest.h

Lines changed: 0 additions & 7 deletions
This file was deleted.

arch/x86/include/asm/kvm_para.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
#include <uapi/asm/kvm_para.h>
88

99
extern void kvmclock_init(void);
10-
extern int kvm_register_clock(char *txt);
1110

1211
#ifdef CONFIG_KVM_GUEST
1312
bool kvm_check_and_clear_guest_paused(void);

arch/x86/include/asm/text-patching.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
3737
extern void *text_poke(void *addr, const void *opcode, size_t len);
3838
extern int poke_int3_handler(struct pt_regs *regs);
3939
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
40+
extern int after_bootmem;
4041

4142
#endif /* _ASM_X86_TEXT_PATCHING_H */

arch/x86/include/asm/tsc.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
3333
extern struct system_counterval_t convert_art_to_tsc(u64 art);
3434
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
3535

36-
extern void tsc_early_delay_calibrate(void);
36+
extern void tsc_early_init(void);
3737
extern void tsc_init(void);
3838
extern void mark_tsc_unstable(char *reason);
3939
extern int unsynchronized_tsc(void);
4040
extern int check_tsc_unstable(void);
4141
extern void mark_tsc_async_resets(char *reason);
42-
extern unsigned long native_calibrate_cpu(void);
42+
extern unsigned long native_calibrate_cpu_early(void);
4343
extern unsigned long native_calibrate_tsc(void);
4444
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
4545

arch/x86/kernel/alternative.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
668668
local_irq_save(flags);
669669
memcpy(addr, opcode, len);
670670
local_irq_restore(flags);
671+
sync_core();
671672
/* Could also do a CLFLUSH here to speed up CPU recovery; but
672673
that causes hangs on some VIA CPUs. */
673674
return addr;
@@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
693694
struct page *pages[2];
694695
int i;
695696

697+
/*
698+
* While boot memory allocator is runnig we cannot use struct
699+
* pages as they are not yet initialized.
700+
*/
701+
BUG_ON(!after_bootmem);
702+
696703
if (!core_kernel_text((unsigned long)addr)) {
697704
pages[0] = vmalloc_to_page(addr);
698705
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);

arch/x86/kernel/cpu/amd.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
232232
}
233233
}
234234

235-
set_cpu_cap(c, X86_FEATURE_K7);
236-
237235
/* calling is from identify_secondary_cpu() ? */
238236
if (!c->cpu_index)
239237
return;
@@ -617,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)
617615

618616
early_init_amd_mc(c);
619617

618+
#ifdef CONFIG_X86_32
619+
if (c->x86 == 6)
620+
set_cpu_cap(c, X86_FEATURE_K7);
621+
#endif
622+
623+
if (c->x86 >= 0xf)
624+
set_cpu_cap(c, X86_FEATURE_K8);
625+
620626
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
621627

622628
/*
@@ -863,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)
863869

864870
init_amd_cacheinfo(c);
865871

866-
if (c->x86 >= 0xf)
867-
set_cpu_cap(c, X86_FEATURE_K8);
868-
869872
if (cpu_has(c, X86_FEATURE_XMM2)) {
870873
unsigned long long val;
871874
int ret;

arch/x86/kernel/cpu/common.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1018,6 +1018,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
10181018
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
10191019
}
10201020

1021+
/*
1022+
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1023+
* unfortunately, that's not true in practice because of early VIA
1024+
* chips and (more importantly) broken virtualizers that are not easy
1025+
* to detect. In the latter case it doesn't even *fail* reliably, so
1026+
* probing for it doesn't even work. Disable it completely on 32-bit
1027+
* unless we can find a reliable way to detect all the broken cases.
1028+
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1029+
*/
1030+
static void detect_nopl(void)
1031+
{
1032+
#ifdef CONFIG_X86_32
1033+
setup_clear_cpu_cap(X86_FEATURE_NOPL);
1034+
#else
1035+
setup_force_cpu_cap(X86_FEATURE_NOPL);
1036+
#endif
1037+
}
1038+
10211039
/*
10221040
* Do minimum CPU detection early.
10231041
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -1092,6 +1110,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
10921110
*/
10931111
if (!pgtable_l5_enabled())
10941112
setup_clear_cpu_cap(X86_FEATURE_LA57);
1113+
1114+
detect_nopl();
10951115
}
10961116

10971117
void __init early_cpu_init(void)
@@ -1127,24 +1147,6 @@ void __init early_cpu_init(void)
11271147
early_identify_cpu(&boot_cpu_data);
11281148
}
11291149

1130-
/*
1131-
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1132-
* unfortunately, that's not true in practice because of early VIA
1133-
* chips and (more importantly) broken virtualizers that are not easy
1134-
* to detect. In the latter case it doesn't even *fail* reliably, so
1135-
* probing for it doesn't even work. Disable it completely on 32-bit
1136-
* unless we can find a reliable way to detect all the broken cases.
1137-
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1138-
*/
1139-
static void detect_nopl(struct cpuinfo_x86 *c)
1140-
{
1141-
#ifdef CONFIG_X86_32
1142-
clear_cpu_cap(c, X86_FEATURE_NOPL);
1143-
#else
1144-
set_cpu_cap(c, X86_FEATURE_NOPL);
1145-
#endif
1146-
}
1147-
11481150
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
11491151
{
11501152
#ifdef CONFIG_X86_64
@@ -1207,8 +1209,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
12071209

12081210
get_model_name(c); /* Default name */
12091211

1210-
detect_nopl(c);
1211-
12121212
detect_null_seg_behavior(c);
12131213

12141214
/*

arch/x86/kernel/jump_label.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
3737
BUG();
3838
}
3939

40-
static void __jump_label_transform(struct jump_entry *entry,
41-
enum jump_label_type type,
42-
void *(*poker)(void *, const void *, size_t),
43-
int init)
40+
static void __ref __jump_label_transform(struct jump_entry *entry,
41+
enum jump_label_type type,
42+
void *(*poker)(void *, const void *, size_t),
43+
int init)
4444
{
4545
union jump_code_union code;
4646
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
4747
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
4848

49+
if (early_boot_irqs_disabled)
50+
poker = text_poke_early;
51+
4952
if (type == JUMP_LABEL_JMP) {
5053
if (init) {
5154
/*

arch/x86/kernel/kvm.c

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@
4545
#include <asm/apic.h>
4646
#include <asm/apicdef.h>
4747
#include <asm/hypervisor.h>
48-
#include <asm/kvm_guest.h>
4948

5049
static int kvmapf = 1;
5150

@@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)
6665

6766
early_param("no-steal-acc", parse_no_stealacc);
6867

69-
static int kvmclock_vsyscall = 1;
70-
static int __init parse_no_kvmclock_vsyscall(char *arg)
71-
{
72-
kvmclock_vsyscall = 0;
73-
return 0;
74-
}
75-
76-
early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77-
7868
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
7969
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
8070
static int has_steal_clock = 0;
@@ -560,9 +550,6 @@ static void __init kvm_guest_init(void)
560550
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
561551
apic_set_eoi_write(kvm_guest_apic_eoi_write);
562552

563-
if (kvmclock_vsyscall)
564-
kvm_setup_vsyscall_timeinfo();
565-
566553
#ifdef CONFIG_SMP
567554
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
568555
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
@@ -628,6 +615,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
628615
.name = "KVM",
629616
.detect = kvm_detect,
630617
.type = X86_HYPER_KVM,
618+
.init.init_platform = kvmclock_init,
631619
.init.guest_late_init = kvm_guest_init,
632620
.init.x2apic_available = kvm_para_available,
633621
};

0 commit comments

Comments
 (0)