Skip to content

Commit b0d04fb

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Ingo writes: "x86 fixes: It's 4 misc fixes, 3 build warning fixes and 3 comment fixes. In hindsight I'd have left out the 3 comment fixes to make the pull request look less scary at such a late point in the cycle. :-/" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/swiotlb: Enable swiotlb for > 4GiG RAM on 32-bit kernels x86/fpu: Fix i486 + no387 boot crash by only saving FPU registers on context switch if there is an FPU x86/fpu: Remove second definition of fpu in __fpu__restore_sig() x86/entry/64: Further improve paranoid_entry comments x86/entry/32: Clear the CS high bits x86/boot: Add -Wno-pointer-sign to KBUILD_CFLAGS x86/time: Correct the attribute on jiffies' definition x86/entry: Add some paranoid entry/exit CR3 handling comments x86/percpu: Fix this_cpu_read() x86/tsc: Force inlining of cyc2ns bits
2 parents 14dbc56 + 485734f commit b0d04fb

File tree

9 files changed

+30
-18
lines changed

9 files changed

+30
-18
lines changed

arch/x86/boot/compressed/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
3737
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
3838
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
3939
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
40+
KBUILD_CFLAGS += -Wno-pointer-sign
4041

4142
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
4243
GCOV_PROFILE := n

arch/x86/entry/entry_32.S

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,13 @@
389389
* that register for the time this macro runs
390390
*/
391391

392+
/*
393+
* The high bits of the CS dword (__csh) are used for
394+
* CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
395+
* hardware didn't do this for us.
396+
*/
397+
andl $(0x0000ffff), PT_CS(%esp)
398+
392399
/* Are we on the entry stack? Bail out if not! */
393400
movl PER_CPU_VAR(cpu_entry_area), %ecx
394401
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
@@ -407,12 +414,6 @@
407414
/* Load top of task-stack into %edi */
408415
movl TSS_entry2task_stack(%edi), %edi
409416

410-
/*
411-
* Clear unused upper bits of the dword containing the word-sized CS
412-
* slot in pt_regs in case hardware didn't clear it for us.
413-
*/
414-
andl $(0x0000ffff), PT_CS(%esp)
415-
416417
/* Special case - entry from kernel mode via entry stack */
417418
#ifdef CONFIG_VM86
418419
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS

arch/x86/entry/entry_64.S

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1187,6 +1187,16 @@ ENTRY(paranoid_entry)
11871187
xorl %ebx, %ebx
11881188

11891189
1:
1190+
/*
1191+
* Always stash CR3 in %r14. This value will be restored,
1192+
* verbatim, at exit. Needed if paranoid_entry interrupted
1193+
* another entry that already switched to the user CR3 value
1194+
* but has not yet returned to userspace.
1195+
*
1196+
* This is also why CS (stashed in the "iret frame" by the
1197+
* hardware at entry) can not be used: this may be a return
1198+
* to kernel code, but with a user CR3 value.
1199+
*/
11901200
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
11911201

11921202
ret
@@ -1211,11 +1221,13 @@ ENTRY(paranoid_exit)
12111221
testl %ebx, %ebx /* swapgs needed? */
12121222
jnz .Lparanoid_exit_no_swapgs
12131223
TRACE_IRQS_IRETQ
1224+
/* Always restore stashed CR3 value (see paranoid_entry) */
12141225
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
12151226
SWAPGS_UNSAFE_STACK
12161227
jmp .Lparanoid_exit_restore
12171228
.Lparanoid_exit_no_swapgs:
12181229
TRACE_IRQS_IRETQ_DEBUG
1230+
/* Always restore stashed CR3 value (see paranoid_entry) */
12191231
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
12201232
.Lparanoid_exit_restore:
12211233
jmp restore_regs_and_return_to_kernel
@@ -1626,6 +1638,7 @@ end_repeat_nmi:
16261638
movq $-1, %rsi
16271639
call do_nmi
16281640

1641+
/* Always restore stashed CR3 value (see paranoid_entry) */
16291642
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
16301643

16311644
testl %ebx, %ebx /* swapgs needed? */

arch/x86/include/asm/fpu/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
528528
static inline void
529529
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
530530
{
531-
if (old_fpu->initialized) {
531+
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
532532
if (!copy_fpregs_to_fpstate(old_fpu))
533533
old_fpu->last_cpu = -1;
534534
else

arch/x86/include/asm/percpu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -185,22 +185,22 @@ do { \
185185
typeof(var) pfo_ret__; \
186186
switch (sizeof(var)) { \
187187
case 1: \
188-
asm(op "b "__percpu_arg(1)",%0" \
188+
asm volatile(op "b "__percpu_arg(1)",%0"\
189189
: "=q" (pfo_ret__) \
190190
: "m" (var)); \
191191
break; \
192192
case 2: \
193-
asm(op "w "__percpu_arg(1)",%0" \
193+
asm volatile(op "w "__percpu_arg(1)",%0"\
194194
: "=r" (pfo_ret__) \
195195
: "m" (var)); \
196196
break; \
197197
case 4: \
198-
asm(op "l "__percpu_arg(1)",%0" \
198+
asm volatile(op "l "__percpu_arg(1)",%0"\
199199
: "=r" (pfo_ret__) \
200200
: "m" (var)); \
201201
break; \
202202
case 8: \
203-
asm(op "q "__percpu_arg(1)",%0" \
203+
asm volatile(op "q "__percpu_arg(1)",%0"\
204204
: "=r" (pfo_ret__) \
205205
: "m" (var)); \
206206
break; \

arch/x86/kernel/fpu/signal.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
314314
* thread's fpu state, reconstruct fxstate from the fsave
315315
* header. Validate and sanitize the copied state.
316316
*/
317-
struct fpu *fpu = &tsk->thread.fpu;
318317
struct user_i387_ia32_struct env;
319318
int err = 0;
320319

arch/x86/kernel/pci-swiotlb.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
4242
int __init pci_swiotlb_detect_4gb(void)
4343
{
4444
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
45-
#ifdef CONFIG_X86_64
4645
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
4746
swiotlb = 1;
48-
#endif
4947

5048
/*
5149
* If SME is active then swiotlb will be set to 1 so that bounce

arch/x86/kernel/time.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
#include <asm/time.h>
2626

2727
#ifdef CONFIG_X86_64
28-
__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
28+
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
2929
#endif
3030

3131
unsigned long profile_pc(struct pt_regs *regs)

arch/x86/kernel/tsc.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ struct cyc2ns {
5858

5959
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
6060

61-
void cyc2ns_read_begin(struct cyc2ns_data *data)
61+
void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
6262
{
6363
int seq, idx;
6464

@@ -75,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
7575
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
7676
}
7777

78-
void cyc2ns_read_end(void)
78+
void __always_inline cyc2ns_read_end(void)
7979
{
8080
preempt_enable_notrace();
8181
}
@@ -104,7 +104,7 @@ void cyc2ns_read_end(void)
104104
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
105105
*/
106106

107-
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
107+
static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
108108
{
109109
struct cyc2ns_data data;
110110
unsigned long long ns;

0 commit comments

Comments
 (0)