Skip to content

Commit bf904d2

Browse files
amlutoKAGA-KOKO
authored andcommitted
x86/pti/64: Remove the SYSCALL64 entry trampoline
The SYSCALL64 trampoline has a couple of nice properties: - The usual sequence of SWAPGS followed by two GS-relative accesses to set up RSP is somewhat slow because the GS-relative accesses need to wait for SWAPGS to finish. The trampoline approach allows RIP-relative accesses to set up RSP, which avoids the stall. - The trampoline avoids any percpu access before CR3 is set up, which means that no percpu memory needs to be mapped in the user page tables. This prevents using Meltdown to read any percpu memory outside the cpu_entry_area and prevents using timing leaks to directly locate the percpu areas. The downsides of using a trampoline may outweigh the upsides, however. It adds an extra non-contiguous I$ cache line to system calls, and it forces an indirect jump to transfer control back to the normal kernel text after CR3 is set up. The latter is because x86 lacks a 64-bit direct jump instruction that could jump from the trampoline to the entry text. With retpolines enabled, the indirect jump is extremely slow. Change the code to map the percpu TSS into the user page tables to allow the non-trampoline SYSCALL64 path to work under PTI. This does not add a new direct information leak, since the TSS is readable by Meltdown from the cpu_entry_area alias regardless. It does allow a timing attack to locate the percpu area, but KASLR is more or less a lost cause against local attack on CPUs vulnerable to Meltdown regardless. As far as I'm concerned, on current hardware, KASLR is only useful to mitigate remote attacks that try to attack the kernel without first gaining RCE against a vulnerable user process. On Skylake, with CONFIG_RETPOLINE=y and KPTI on, this reduces syscall overhead from ~237ns to ~228ns. There is a possible alternative approach: Move the trampoline within 2G of the entry text and make a separate copy for each CPU. This would allow a direct jump to rejoin the normal entry path. There are pro's and con's for this approach: + It avoids a pipeline stall - It executes from an extra page and read from another extra page during the syscall. The latter is because it needs to use a relative addressing mode to find sp1 -- it's the same *cacheline*, but accessed using an alias, so it's an extra TLB entry. - Slightly more memory. This would be one page per CPU for a simple implementation and 64-ish bytes per CPU or one page per node for a more complex implementation. - More code complexity. The current approach is chosen for simplicity and because the alternative does not provide a significant benefit, which makes it worth. [ tglx: Added the alternative discussion to the changelog ] Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/8c7c6e483612c3e4e10ca89495dc160b1aa66878.1536015544.git.luto@kernel.org
1 parent 98f05b5 commit bf904d2

File tree

9 files changed

+37
-139
lines changed

9 files changed

+37
-139
lines changed

arch/x86/entry/entry_64.S

Lines changed: 2 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -142,67 +142,6 @@ END(native_usergs_sysret64)
142142
* with them due to bugs in both AMD and Intel CPUs.
143143
*/
144144

145-
.pushsection .entry_trampoline, "ax"
146-
147-
/*
148-
* The code in here gets remapped into cpu_entry_area's trampoline. This means
149-
* that the assembler and linker have the wrong idea as to where this code
150-
* lives (and, in fact, it's mapped more than once, so it's not even at a
151-
* fixed address). So we can't reference any symbols outside the entry
152-
* trampoline and expect it to work.
153-
*
154-
* Instead, we carefully abuse %rip-relative addressing.
155-
* _entry_trampoline(%rip) refers to the start of the remapped) entry
156-
* trampoline. We can thus find cpu_entry_area with this macro:
157-
*/
158-
159-
#define CPU_ENTRY_AREA \
160-
_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
161-
162-
/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
163-
#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \
164-
SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
165-
166-
ENTRY(entry_SYSCALL_64_trampoline)
167-
UNWIND_HINT_EMPTY
168-
swapgs
169-
170-
/* Stash the user RSP. */
171-
movq %rsp, RSP_SCRATCH
172-
173-
/* Note: using %rsp as a scratch reg. */
174-
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
175-
176-
/* Load the top of the task stack into RSP */
177-
movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
178-
179-
/* Start building the simulated IRET frame. */
180-
pushq $__USER_DS /* pt_regs->ss */
181-
pushq RSP_SCRATCH /* pt_regs->sp */
182-
pushq %r11 /* pt_regs->flags */
183-
pushq $__USER_CS /* pt_regs->cs */
184-
pushq %rcx /* pt_regs->ip */
185-
186-
/*
187-
* x86 lacks a near absolute jump, and we can't jump to the real
188-
* entry text with a relative jump. We could push the target
189-
* address and then use retq, but this destroys the pipeline on
190-
* many CPUs (wasting over 20 cycles on Sandy Bridge). Instead,
191-
* spill RDI and restore it in a second-stage trampoline.
192-
*/
193-
pushq %rdi
194-
movq $entry_SYSCALL_64_stage2, %rdi
195-
JMP_NOSPEC %rdi
196-
END(entry_SYSCALL_64_trampoline)
197-
198-
.popsection
199-
200-
ENTRY(entry_SYSCALL_64_stage2)
201-
UNWIND_HINT_EMPTY
202-
popq %rdi
203-
jmp entry_SYSCALL_64_after_hwframe
204-
END(entry_SYSCALL_64_stage2)
205-
206145
ENTRY(entry_SYSCALL_64)
207146
UNWIND_HINT_EMPTY
208147
/*
@@ -212,13 +151,9 @@ ENTRY(entry_SYSCALL_64)
212151
*/
213152

214153
swapgs
215-
/*
216-
* This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
217-
* is not required to switch CR3.
218-
*
219-
* tss.sp2 is scratch space.
220-
*/
154+
/* tss.sp2 is scratch space. */
221155
movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
156+
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
222157
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
223158

224159
/* Construct struct pt_regs on stack */

arch/x86/include/asm/cpu_entry_area.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@ struct cpu_entry_area {
3030
*/
3131
struct tss_struct tss;
3232

33-
char entry_trampoline[PAGE_SIZE];
34-
3533
#ifdef CONFIG_X86_64
3634
/*
3735
* Exception stacks used for IST entries.

arch/x86/include/asm/sections.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ extern char __end_rodata_aligned[];
1111

1212
#if defined(CONFIG_X86_64)
1313
extern char __end_rodata_hpage_align[];
14-
extern char __entry_trampoline_start[], __entry_trampoline_end[];
1514
#endif
1615

1716
#endif /* _ASM_X86_SECTIONS_H */

arch/x86/kernel/asm-offsets.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,6 @@ void common(void) {
9999
OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
100100

101101
/* Layout info for cpu_entry_area */
102-
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
103-
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
104102
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
105103
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
106104
DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));

arch/x86/kernel/cpu/common.c

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1531,19 +1531,10 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count);
15311531
/* May not be marked __init: used by software suspend */
15321532
void syscall_init(void)
15331533
{
1534-
extern char _entry_trampoline[];
1535-
extern char entry_SYSCALL_64_trampoline[];
1536-
1537-
int cpu = smp_processor_id();
1538-
unsigned long SYSCALL64_entry_trampoline =
1539-
(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
1540-
(entry_SYSCALL_64_trampoline - _entry_trampoline);
1534+
int __maybe_unused cpu = smp_processor_id();
15411535

15421536
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1543-
if (static_cpu_has(X86_FEATURE_PTI))
1544-
wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
1545-
else
1546-
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1537+
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
15471538

15481539
#ifdef CONFIG_IA32_EMULATION
15491540
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);

arch/x86/kernel/kprobes/core.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1066,18 +1066,10 @@ NOKPROBE_SYMBOL(kprobe_exceptions_notify);
10661066

10671067
bool arch_within_kprobe_blacklist(unsigned long addr)
10681068
{
1069-
bool is_in_entry_trampoline_section = false;
1070-
1071-
#ifdef CONFIG_X86_64
1072-
is_in_entry_trampoline_section =
1073-
(addr >= (unsigned long)__entry_trampoline_start &&
1074-
addr < (unsigned long)__entry_trampoline_end);
1075-
#endif
10761069
return (addr >= (unsigned long)__kprobes_text_start &&
10771070
addr < (unsigned long)__kprobes_text_end) ||
10781071
(addr >= (unsigned long)__entry_text_start &&
1079-
addr < (unsigned long)__entry_text_end) ||
1080-
is_in_entry_trampoline_section;
1072+
addr < (unsigned long)__entry_text_end);
10811073
}
10821074

10831075
int __init arch_init_kprobes(void)

arch/x86/kernel/vmlinux.lds.S

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -118,16 +118,6 @@ SECTIONS
118118
*(.fixup)
119119
*(.gnu.warning)
120120

121-
#ifdef CONFIG_X86_64
122-
. = ALIGN(PAGE_SIZE);
123-
__entry_trampoline_start = .;
124-
_entry_trampoline = .;
125-
*(.entry_trampoline)
126-
. = ALIGN(PAGE_SIZE);
127-
__entry_trampoline_end = .;
128-
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
129-
#endif
130-
131121
#ifdef CONFIG_RETPOLINE
132122
__indirect_thunk_start = .;
133123
*(.text.__x86.indirect_thunk)

arch/x86/mm/cpu_entry_area.c

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
1515
#ifdef CONFIG_X86_64
1616
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1717
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
18-
static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
1918
#endif
2019

2120
struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -83,8 +82,6 @@ static void percpu_setup_debug_store(int cpu)
8382
static void __init setup_cpu_entry_area(int cpu)
8483
{
8584
#ifdef CONFIG_X86_64
86-
extern char _entry_trampoline[];
87-
8885
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
8986
pgprot_t gdt_prot = PAGE_KERNEL_RO;
9087
pgprot_t tss_prot = PAGE_KERNEL_RO;
@@ -146,43 +143,10 @@ static void __init setup_cpu_entry_area(int cpu)
146143
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
147144
&per_cpu(exception_stacks, cpu),
148145
sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
149-
150-
cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
151-
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
152-
/*
153-
* The cpu_entry_area alias addresses are not in the kernel binary
154-
* so they do not show up in /proc/kcore normally. This adds entries
155-
* for them manually.
156-
*/
157-
kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
158-
_entry_trampoline,
159-
&get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
160146
#endif
161147
percpu_setup_debug_store(cpu);
162148
}
163149

164-
#ifdef CONFIG_X86_64
165-
int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
166-
char *name)
167-
{
168-
unsigned int cpu, ncpu = 0;
169-
170-
if (symnum >= num_possible_cpus())
171-
return -EINVAL;
172-
173-
for_each_possible_cpu(cpu) {
174-
if (ncpu++ >= symnum)
175-
break;
176-
}
177-
178-
*value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
179-
*type = 't';
180-
strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
181-
182-
return 0;
183-
}
184-
#endif
185-
186150
static __init void setup_cpu_entry_area_ptes(void)
187151
{
188152
#ifdef CONFIG_X86_32

arch/x86/mm/pti.c

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,11 +434,42 @@ static void __init pti_clone_p4d(unsigned long addr)
434434
}
435435

436436
/*
437-
* Clone the CPU_ENTRY_AREA into the user space visible page table.
437+
* Clone the CPU_ENTRY_AREA and associated data into the user space visible
438+
* page table.
438439
*/
439440
static void __init pti_clone_user_shared(void)
440441
{
442+
unsigned int cpu;
443+
441444
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
445+
446+
for_each_possible_cpu(cpu) {
447+
/*
448+
* The SYSCALL64 entry code needs to be able to find the
449+
* thread stack and needs one word of scratch space in which
450+
* to spill a register. All of this lives in the TSS, in
451+
* the sp1 and sp2 slots.
452+
*
453+
* This is done for all possible CPUs during boot to ensure
454+
* that it's propagated to all mms. If we were to add one of
455+
* these mappings during CPU hotplug, we would need to take
456+
* some measure to make sure that every mm that subsequently
457+
* ran on that CPU would have the relevant PGD entry in its
458+
* pagetables. The usual vmalloc_fault() mechanism would not
459+
* work for page faults taken in entry_SYSCALL_64 before RSP
460+
* is set up.
461+
*/
462+
463+
unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
464+
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
465+
pte_t *target_pte;
466+
467+
target_pte = pti_user_pagetable_walk_pte(va);
468+
if (WARN_ON(!target_pte))
469+
return;
470+
471+
*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
472+
}
442473
}
443474

444475
#else /* CONFIG_X86_64 */

0 commit comments

Comments
 (0)