Skip to content

Commit 45d7b25

Browse files
joergroedelKAGA-KOKO
authored andcommitted
x86/entry/32: Enter the kernel via trampoline stack
Use the entry-stack as a trampoline to enter the kernel. The entry-stack is already in the cpu_entry_area and will be mapped to userspace when PTI is enabled. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-8-git-send-email-joro@8bytes.org
1 parent 0d2eb73 commit 45d7b25

File tree

6 files changed

+115
-28
lines changed

6 files changed

+115
-28
lines changed

arch/x86/entry/entry_32.S

Lines changed: 99 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@
154154

155155
#endif /* CONFIG_X86_32_LAZY_GS */
156156

157-
.macro SAVE_ALL pt_regs_ax=%eax
157+
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
158158
cld
159159
PUSH_GS
160160
pushl %fs
@@ -173,6 +173,12 @@
173173
movl $(__KERNEL_PERCPU), %edx
174174
movl %edx, %fs
175175
SET_KERNEL_GS %edx
176+
177+
/* Switch to kernel stack if necessary */
178+
.if \switch_stacks > 0
179+
SWITCH_TO_KERNEL_STACK
180+
.endif
181+
176182
.endm
177183

178184
/*
@@ -269,6 +275,73 @@
269275
.Lend_\@:
270276
#endif /* CONFIG_X86_ESPFIX32 */
271277
.endm
278+
279+
280+
/*
281+
* Called with pt_regs fully populated and kernel segments loaded,
282+
* so we can access PER_CPU and use the integer registers.
283+
*
284+
* We need to be very careful here with the %esp switch, because an NMI
285+
* can happen everywhere. If the NMI handler finds itself on the
286+
* entry-stack, it will overwrite the task-stack and everything we
287+
* copied there. So allocate the stack-frame on the task-stack and
288+
* switch to it before we do any copying.
289+
*/
290+
.macro SWITCH_TO_KERNEL_STACK
291+
292+
ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
293+
294+
/* Are we on the entry stack? Bail out if not! */
295+
movl PER_CPU_VAR(cpu_entry_area), %ecx
296+
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
297+
subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
298+
cmpl $SIZEOF_entry_stack, %ecx
299+
jae .Lend_\@
300+
301+
/* Load stack pointer into %esi and %edi */
302+
movl %esp, %esi
303+
movl %esi, %edi
304+
305+
/* Move %edi to the top of the entry stack */
306+
andl $(MASK_entry_stack), %edi
307+
addl $(SIZEOF_entry_stack), %edi
308+
309+
/* Load top of task-stack into %edi */
310+
movl TSS_entry2task_stack(%edi), %edi
311+
312+
/* Bytes to copy */
313+
movl $PTREGS_SIZE, %ecx
314+
315+
#ifdef CONFIG_VM86
316+
testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
317+
jz .Lcopy_pt_regs_\@
318+
319+
/*
320+
* Stack-frame contains 4 additional segment registers when
321+
* coming from VM86 mode
322+
*/
323+
addl $(4 * 4), %ecx
324+
325+
.Lcopy_pt_regs_\@:
326+
#endif
327+
328+
/* Allocate frame on task-stack */
329+
subl %ecx, %edi
330+
331+
/* Switch to task-stack */
332+
movl %edi, %esp
333+
334+
/*
335+
* We are now on the task-stack and can safely copy over the
336+
* stack-frame
337+
*/
338+
shrl $2, %ecx
339+
cld
340+
rep movsl
341+
342+
.Lend_\@:
343+
.endm
344+
272345
/*
273346
* %eax: prev task
274347
* %edx: next task
@@ -469,7 +542,7 @@ ENTRY(entry_SYSENTER_32)
469542
pushl $__USER_CS /* pt_regs->cs */
470543
pushl $0 /* pt_regs->ip = 0 (placeholder) */
471544
pushl %eax /* pt_regs->orig_ax */
472-
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
545+
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
473546

474547
/*
475548
* SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -580,7 +653,8 @@ ENDPROC(entry_SYSENTER_32)
580653
ENTRY(entry_INT80_32)
581654
ASM_CLAC
582655
pushl %eax /* pt_regs->orig_ax */
583-
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
656+
657+
SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
584658

585659
/*
586660
* User mode is traced as though IRQs are on, and the interrupt gate
@@ -677,24 +751,25 @@ END(irq_entries_start)
677751
common_interrupt:
678752
ASM_CLAC
679753
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
680-
SAVE_ALL
754+
755+
SAVE_ALL switch_stacks=1
681756
ENCODE_FRAME_POINTER
682757
TRACE_IRQS_OFF
683758
movl %esp, %eax
684759
call do_IRQ
685760
jmp ret_from_intr
686761
ENDPROC(common_interrupt)
687762

688-
#define BUILD_INTERRUPT3(name, nr, fn) \
689-
ENTRY(name) \
690-
ASM_CLAC; \
691-
pushl $~(nr); \
692-
SAVE_ALL; \
693-
ENCODE_FRAME_POINTER; \
694-
TRACE_IRQS_OFF \
695-
movl %esp, %eax; \
696-
call fn; \
697-
jmp ret_from_intr; \
763+
#define BUILD_INTERRUPT3(name, nr, fn) \
764+
ENTRY(name) \
765+
ASM_CLAC; \
766+
pushl $~(nr); \
767+
SAVE_ALL switch_stacks=1; \
768+
ENCODE_FRAME_POINTER; \
769+
TRACE_IRQS_OFF \
770+
movl %esp, %eax; \
771+
call fn; \
772+
jmp ret_from_intr; \
698773
ENDPROC(name)
699774

700775
#define BUILD_INTERRUPT(name, nr) \
@@ -926,26 +1001,27 @@ common_exception:
9261001
pushl %es
9271002
pushl %ds
9281003
pushl %eax
1004+
movl $(__USER_DS), %eax
1005+
movl %eax, %ds
1006+
movl %eax, %es
1007+
movl $(__KERNEL_PERCPU), %eax
1008+
movl %eax, %fs
9291009
pushl %ebp
9301010
pushl %edi
9311011
pushl %esi
9321012
pushl %edx
9331013
pushl %ecx
9341014
pushl %ebx
1015+
SWITCH_TO_KERNEL_STACK
9351016
ENCODE_FRAME_POINTER
9361017
cld
937-
movl $(__KERNEL_PERCPU), %ecx
938-
movl %ecx, %fs
9391018
UNWIND_ESPFIX_STACK
9401019
GS_TO_REG %ecx
9411020
movl PT_GS(%esp), %edi # get the function address
9421021
movl PT_ORIG_EAX(%esp), %edx # get the error code
9431022
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
9441023
REG_TO_PTGS %ecx
9451024
SET_KERNEL_GS %ecx
946-
movl $(__USER_DS), %ecx
947-
movl %ecx, %ds
948-
movl %ecx, %es
9491025
TRACE_IRQS_OFF
9501026
movl %esp, %eax # pt_regs pointer
9511027
CALL_NOSPEC %edi
@@ -964,6 +1040,7 @@ ENTRY(debug)
9641040
*/
9651041
ASM_CLAC
9661042
pushl $-1 # mark this as an int
1043+
9671044
SAVE_ALL
9681045
ENCODE_FRAME_POINTER
9691046
xorl %edx, %edx # error code 0
@@ -999,6 +1076,7 @@ END(debug)
9991076
*/
10001077
ENTRY(nmi)
10011078
ASM_CLAC
1079+
10021080
#ifdef CONFIG_X86_ESPFIX32
10031081
pushl %eax
10041082
movl %ss, %eax
@@ -1066,7 +1144,8 @@ END(nmi)
10661144
ENTRY(int3)
10671145
ASM_CLAC
10681146
pushl $-1 # mark this as an int
1069-
SAVE_ALL
1147+
1148+
SAVE_ALL switch_stacks=1
10701149
ENCODE_FRAME_POINTER
10711150
TRACE_IRQS_OFF
10721151
xorl %edx, %edx # zero error code

arch/x86/include/asm/switch_to.h

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,23 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
8989
/* This is used when switching tasks or entering/exiting vm86 mode. */
9090
static inline void update_sp0(struct task_struct *task)
9191
{
92-
/* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
92+
/* sp0 always points to the entry trampoline stack, which is constant: */
9393
#ifdef CONFIG_X86_32
94-
load_sp0(task->thread.sp0);
94+
if (static_cpu_has(X86_FEATURE_XENPV))
95+
load_sp0(task->thread.sp0);
96+
else
97+
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
9598
#else
99+
/*
100+
* x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
101+
* doesn't work on x86-32 because sp1 and
102+
* cpu_current_top_of_stack have different values (because of
103+
* the non-zero stack-padding on 32bit).
104+
*/
96105
if (static_cpu_has(X86_FEATURE_XENPV))
97106
load_sp0(task_top_of_stack(task));
98107
#endif
108+
99109
}
100110

101111
#endif /* _ASM_X86_SWITCH_TO_H */

arch/x86/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ void common(void) {
103103
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
104104
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
105105
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
106+
DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
106107

107108
/* Offset for sp0 and sp1 into the tss_struct */
108109
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);

arch/x86/kernel/cpu/common.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1804,11 +1804,12 @@ void cpu_init(void)
18041804
enter_lazy_tlb(&init_mm, curr);
18051805

18061806
/*
1807-
* Initialize the TSS. Don't bother initializing sp0, as the initial
1808-
* task never enters user mode.
1807+
* Initialize the TSS. sp0 points to the entry trampoline stack
1808+
* regardless of what task is running.
18091809
*/
18101810
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
18111811
load_TR_desc();
1812+
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
18121813

18131814
load_mm_ldt(&init_mm);
18141815

arch/x86/kernel/process.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
5757
*/
5858
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
5959

60-
#ifdef CONFIG_X86_64
6160
/*
6261
* .sp1 is cpu_current_top_of_stack. The init task never
6362
* runs user code, but cpu_current_top_of_stack should still
6463
* be well defined before the first context switch.
6564
*/
6665
.sp1 = TOP_OF_INIT_STACK,
67-
#endif
6866

6967
#ifdef CONFIG_X86_32
7068
.ss0 = __KERNEL_DS,

arch/x86/kernel/process_32.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,8 +290,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
290290
this_cpu_write(cpu_current_top_of_stack,
291291
(unsigned long)task_stack_page(next_p) +
292292
THREAD_SIZE);
293-
/* SYSENTER reads the task-stack from tss.sp1 */
294-
this_cpu_write(cpu_tss_rw.x86_tss.sp1, next_p->thread.sp0);
295293

296294
/*
297295
* Restore %gs if needed (which is common)

0 commit comments

Comments
 (0)