Skip to content

Commit b92a165

Browse files
joergroedelKAGA-KOKO
authored andcommitted
x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack
It is possible that the kernel is entered from kernel-mode and on the entry-stack. The most common way this happens is when an exception is triggered while loading the user-space segment registers on the kernel-to-userspace exit path. The segment loading needs to be done after the entry-stack switch, because the stack-switch needs kernel %fs for per_cpu access. When this happens, make sure to leave the kernel with the entry-stack again, so that the interrupted code-path runs on the right stack when switching to the user-cr3. Detect this condition on kernel-entry by checking CS.RPL and %esp, and if it happens, copy over the complete content of the entry stack to the task-stack. This needs to be done because once the exception handler is entereed, the task might be scheduled out or even migrated to a different CPU, so this cannot rely on the entry-stack contents. Leave a marker in the stack-frame to detect this condition on the exit path. On the exit path the copy is reversed, copy all of the remaining task-stack back to the entry-stack and switch to it. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-11-git-send-email-joro@8bytes.org
1 parent 8b376fa commit b92a165

File tree

1 file changed

+115
-1
lines changed

1 file changed

+115
-1
lines changed

arch/x86/entry/entry_32.S

Lines changed: 115 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,9 @@
294294
* copied there. So allocate the stack-frame on the task-stack and
295295
* switch to it before we do any copying.
296296
*/
297+
298+
#define CS_FROM_ENTRY_STACK (1 << 31)
299+
297300
.macro SWITCH_TO_KERNEL_STACK
298301

299302
ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
@@ -316,6 +319,16 @@
316319
/* Load top of task-stack into %edi */
317320
movl TSS_entry2task_stack(%edi), %edi
318321

322+
/*
323+
* Clear unused upper bits of the dword containing the word-sized CS
324+
* slot in pt_regs in case hardware didn't clear it for us.
325+
*/
326+
andl $(0x0000ffff), PT_CS(%esp)
327+
328+
/* Special case - entry from kernel mode via entry stack */
329+
testl $SEGMENT_RPL_MASK, PT_CS(%esp)
330+
jz .Lentry_from_kernel_\@
331+
319332
/* Bytes to copy */
320333
movl $PTREGS_SIZE, %ecx
321334

@@ -329,8 +342,8 @@
329342
*/
330343
addl $(4 * 4), %ecx
331344

332-
.Lcopy_pt_regs_\@:
333345
#endif
346+
.Lcopy_pt_regs_\@:
334347

335348
/* Allocate frame on task-stack */
336349
subl %ecx, %edi
@@ -346,6 +359,56 @@
346359
cld
347360
rep movsl
348361

362+
jmp .Lend_\@
363+
364+
.Lentry_from_kernel_\@:
365+
366+
/*
367+
* This handles the case when we enter the kernel from
368+
* kernel-mode and %esp points to the entry-stack. When this
369+
* happens we need to switch to the task-stack to run C code,
370+
* but switch back to the entry-stack again when we approach
371+
* iret and return to the interrupted code-path. This usually
372+
* happens when we hit an exception while restoring user-space
373+
* segment registers on the way back to user-space.
374+
*
375+
* When we switch to the task-stack here, we can't trust the
376+
* contents of the entry-stack anymore, as the exception handler
377+
* might be scheduled out or moved to another CPU. Therefore we
378+
* copy the complete entry-stack to the task-stack and set a
379+
* marker in the iret-frame (bit 31 of the CS dword) to detect
380+
* what we've done on the iret path.
381+
*
382+
* On the iret path we copy everything back and switch to the
383+
* entry-stack, so that the interrupted kernel code-path
384+
* continues on the same stack it was interrupted with.
385+
*
386+
* Be aware that an NMI can happen anytime in this code.
387+
*
388+
* %esi: Entry-Stack pointer (same as %esp)
389+
* %edi: Top of the task stack
390+
*/
391+
392+
/* Calculate number of bytes on the entry stack in %ecx */
393+
movl %esi, %ecx
394+
395+
/* %ecx to the top of entry-stack */
396+
andl $(MASK_entry_stack), %ecx
397+
addl $(SIZEOF_entry_stack), %ecx
398+
399+
/* Number of bytes on the entry stack to %ecx */
400+
sub %esi, %ecx
401+
402+
/* Mark stackframe as coming from entry stack */
403+
orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
404+
405+
/*
406+
* %esi and %edi are unchanged, %ecx contains the number of
407+
* bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
408+
* the stack-frame on task-stack and copy everything over
409+
*/
410+
jmp .Lcopy_pt_regs_\@
411+
349412
.Lend_\@:
350413
.endm
351414

@@ -403,6 +466,56 @@
403466
.Lend_\@:
404467
.endm
405468

469+
/*
470+
* This macro handles the case when we return to kernel-mode on the iret
471+
* path and have to switch back to the entry stack.
472+
*
473+
* See the comments below the .Lentry_from_kernel_\@ label in the
474+
* SWITCH_TO_KERNEL_STACK macro for more details.
475+
*/
476+
.macro PARANOID_EXIT_TO_KERNEL_MODE
477+
478+
/*
479+
* Test if we entered the kernel with the entry-stack. Most
480+
* likely we did not, because this code only runs on the
481+
* return-to-kernel path.
482+
*/
483+
testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
484+
jz .Lend_\@
485+
486+
/* Unlikely slow-path */
487+
488+
/* Clear marker from stack-frame */
489+
andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
490+
491+
/* Copy the remaining task-stack contents to entry-stack */
492+
movl %esp, %esi
493+
movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
494+
495+
/* Bytes on the task-stack to ecx */
496+
movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
497+
subl %esi, %ecx
498+
499+
/* Allocate stack-frame on entry-stack */
500+
subl %ecx, %edi
501+
502+
/*
503+
* Save future stack-pointer, we must not switch until the
504+
* copy is done, otherwise the NMI handler could destroy the
505+
* contents of the task-stack we are about to copy.
506+
*/
507+
movl %edi, %ebx
508+
509+
/* Do the copy */
510+
shrl $2, %ecx
511+
cld
512+
rep movsl
513+
514+
/* Safe to switch to entry-stack now */
515+
movl %ebx, %esp
516+
517+
.Lend_\@:
518+
.endm
406519
/*
407520
* %eax: prev task
408521
* %edx: next task
@@ -764,6 +877,7 @@ restore_all:
764877

765878
restore_all_kernel:
766879
TRACE_IRQS_IRET
880+
PARANOID_EXIT_TO_KERNEL_MODE
767881
RESTORE_REGS 4
768882
jmp .Lirq_return
769883

0 commit comments

Comments
 (0)