|
342 | 342 | .Lend_\@:
|
343 | 343 | .endm
|
344 | 344 |
|
| 345 | +/* |
| 346 | + * Switch back from the kernel stack to the entry stack. |
| 347 | + * |
| 348 | + * The %esp register must point to pt_regs on the task stack. It will |
| 349 | + * first calculate the size of the stack-frame to copy, depending on |
| 350 | + * whether we return to VM86 mode or not. With that it uses 'rep movsl' |
| 351 | + * to copy the contents of the stack over to the entry stack. |
| 352 | + * |
| 353 | + * We must be very careful here, as we can't trust the contents of the |
| 354 | + * task-stack once we switched to the entry-stack. When an NMI happens |
| 355 | + * while on the entry-stack, the NMI handler will switch back to the top |
| 356 | + * of the task stack, overwriting our stack-frame we are about to copy. |
| 357 | + * Therefore we switch the stack only after everything is copied over. |
| 358 | + */ |
| 359 | +.macro SWITCH_TO_ENTRY_STACK |
| 360 | + |
| 361 | + ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV |
| 362 | + |
| 363 | + /* Bytes to copy */ |
| 364 | + movl $PTREGS_SIZE, %ecx |
| 365 | + |
| 366 | +#ifdef CONFIG_VM86 |
| 367 | + testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) |
| 368 | + jz .Lcopy_pt_regs_\@ |
| 369 | + |
| 370 | + /* Additional 4 registers to copy when returning to VM86 mode */ |
| 371 | + addl $(4 * 4), %ecx |
| 372 | + |
| 373 | +.Lcopy_pt_regs_\@: |
| 374 | +#endif |
| 375 | + |
| 376 | + /* Initialize source and destination for movsl */ |
| 377 | + movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi |
| 378 | + subl %ecx, %edi |
| 379 | + movl %esp, %esi |
| 380 | + |
| 381 | + /* Save future stack pointer in %ebx */ |
| 382 | + movl %edi, %ebx |
| 383 | + |
| 384 | + /* Copy over the stack-frame */ |
| 385 | + shrl $2, %ecx |
| 386 | + cld |
| 387 | + rep movsl |
| 388 | + |
| 389 | + /* |
| 390 | + * Switch to entry-stack - needs to happen after everything is |
| 391 | + * copied because the NMI handler will overwrite the task-stack |
| 392 | + * when on entry-stack |
| 393 | + */ |
| 394 | + movl %ebx, %esp |
| 395 | + |
| 396 | +.Lend_\@: |
| 397 | +.endm |
| 398 | + |
345 | 399 | /*
|
346 | 400 | * %eax: prev task
|
347 | 401 | * %edx: next task
|
@@ -581,25 +635,45 @@ ENTRY(entry_SYSENTER_32)
|
581 | 635 |
|
582 | 636 | /* Opportunistic SYSEXIT */
|
583 | 637 | TRACE_IRQS_ON /* User mode traces as IRQs on. */
|
| 638 | + |
| 639 | + /* |
| 640 | + * Setup entry stack - we keep the pointer in %eax and do the |
| 641 | + * switch after almost all user-state is restored. |
| 642 | + */ |
| 643 | + |
| 644 | + /* Load entry stack pointer and allocate frame for eflags/eax */ |
| 645 | + movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax |
| 646 | + subl $(2*4), %eax |
| 647 | + |
| 648 | + /* Copy eflags and eax to entry stack */ |
| 649 | + movl PT_EFLAGS(%esp), %edi |
| 650 | + movl PT_EAX(%esp), %esi |
| 651 | + movl %edi, (%eax) |
| 652 | + movl %esi, 4(%eax) |
| 653 | + |
| 654 | + /* Restore user registers and segments */ |
584 | 655 | movl PT_EIP(%esp), %edx /* pt_regs->ip */
|
585 | 656 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
|
586 | 657 | 1: mov PT_FS(%esp), %fs
|
587 | 658 | PTGS_TO_GS
|
| 659 | + |
588 | 660 | popl %ebx /* pt_regs->bx */
|
589 | 661 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
|
590 | 662 | popl %esi /* pt_regs->si */
|
591 | 663 | popl %edi /* pt_regs->di */
|
592 | 664 | popl %ebp /* pt_regs->bp */
|
593 |
| - popl %eax /* pt_regs->ax */ |
| 665 | + |
| 666 | + /* Switch to entry stack */ |
| 667 | + movl %eax, %esp |
594 | 668 |
|
595 | 669 | /*
|
596 | 670 | * Restore all flags except IF. (We restore IF separately because
|
597 | 671 | * STI gives a one-instruction window in which we won't be interrupted,
|
598 | 672 | * whereas POPF does not.)
|
599 | 673 | */
|
600 |
| - addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ |
601 | 674 | btrl $X86_EFLAGS_IF_BIT, (%esp)
|
602 | 675 | popfl
|
| 676 | + popl %eax |
603 | 677 |
|
604 | 678 | /*
|
605 | 679 | * Return back to the vDSO, which will pop ecx and edx.
|
@@ -668,6 +742,7 @@ ENTRY(entry_INT80_32)
|
668 | 742 |
|
669 | 743 | restore_all:
|
670 | 744 | TRACE_IRQS_IRET
|
| 745 | + SWITCH_TO_ENTRY_STACK |
671 | 746 | .Lrestore_all_notrace:
|
672 | 747 | CHECK_AND_APPLY_ESPFIX
|
673 | 748 | .Lrestore_nocheck:
|
|
0 commit comments