Skip to content

Commit 65f4d6d

Browse files
committed
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti fixes from Thomas Gleixner: "A set of updates for the x86/pti related code: - Preserve r8-r11 in int $0x80. r8-r11 need to be preserved, but the int$80 entry code removed that quite some time ago. Make it correct again. - A set of fixes for the Global Bit work which went into 4.17 and caused a bunch of interesting regressions: - Triggering a BUG in the page attribute code due to a missing check for early boot stage - Warnings in the page attribute code about holes in the kernel text mapping which are caused by the freeing of the init code. Handle such holes gracefully. - Reduce the amount of kernel memory which is set global to the actual text and do not incidentally overlap with data. - Disable the global bit when RANDSTRUCT is enabled as it partially defeats the hardening. - Make the page protection setup correct for vma->page_prot population again. The adjustment of the protections fell through the crack during the Global bit rework and triggers warnings on machines which do not support certain features, e.g. NX" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/entry/64/compat: Preserve r8-r11 in int $0x80 x86/pti: Filter at vma->vm_page_prot population x86/pti: Disallow global kernel text with RANDSTRUCT x86/pti: Reduce amount of kernel text allowed to be Global x86/pti: Fix boot warning from Global-bit setting x86/pti: Fix boot problems from Global-bit setting
2 parents 810fb07 + 8bb2610 commit 65f4d6d

File tree

7 files changed

+99
-34
lines changed

7 files changed

+99
-34
lines changed

arch/x86/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ config X86
5252
select ARCH_HAS_DEVMEM_IS_ALLOWED
5353
select ARCH_HAS_ELF_RANDOMIZE
5454
select ARCH_HAS_FAST_MULTIPLIER
55+
select ARCH_HAS_FILTER_PGPROT
5556
select ARCH_HAS_FORTIFY_SOURCE
5657
select ARCH_HAS_GCOV_PROFILE_ALL
5758
select ARCH_HAS_KCOV if X86_64
@@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX
273274
config ARCH_HAS_CACHE_LINE_SIZE
274275
def_bool y
275276

277+
config ARCH_HAS_FILTER_PGPROT
278+
def_bool y
279+
276280
config HAVE_SETUP_PER_CPU_AREA
277281
def_bool y
278282

arch/x86/entry/entry_64_compat.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
8484
pushq %rdx /* pt_regs->dx */
8585
pushq %rcx /* pt_regs->cx */
8686
pushq $-ENOSYS /* pt_regs->ax */
87-
pushq $0 /* pt_regs->r8 = 0 */
87+
pushq %r8 /* pt_regs->r8 */
8888
xorl %r8d, %r8d /* nospec r8 */
89-
pushq $0 /* pt_regs->r9 = 0 */
89+
pushq %r9 /* pt_regs->r9 */
9090
xorl %r9d, %r9d /* nospec r9 */
91-
pushq $0 /* pt_regs->r10 = 0 */
91+
pushq %r10 /* pt_regs->r10 */
9292
xorl %r10d, %r10d /* nospec r10 */
93-
pushq $0 /* pt_regs->r11 = 0 */
93+
pushq %r11 /* pt_regs->r11 */
9494
xorl %r11d, %r11d /* nospec r11 */
9595
pushq %rbx /* pt_regs->rbx */
9696
xorl %ebx, %ebx /* nospec rbx */

arch/x86/include/asm/pgtable.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
601601

602602
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
603603

604+
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
605+
{
606+
return canon_pgprot(prot);
607+
}
608+
604609
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
605610
enum page_cache_mode pcm,
606611
enum page_cache_mode new_pcm)

arch/x86/mm/pageattr.c

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m)
9393
static inline void split_page_count(int level) { }
9494
#endif
9595

96+
static inline int
97+
within(unsigned long addr, unsigned long start, unsigned long end)
98+
{
99+
return addr >= start && addr < end;
100+
}
101+
102+
static inline int
103+
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
104+
{
105+
return addr >= start && addr <= end;
106+
}
107+
96108
#ifdef CONFIG_X86_64
97109

98110
static inline unsigned long highmap_start_pfn(void)
@@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void)
106118
return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
107119
}
108120

109-
#endif
110-
111-
static inline int
112-
within(unsigned long addr, unsigned long start, unsigned long end)
121+
static bool __cpa_pfn_in_highmap(unsigned long pfn)
113122
{
114-
return addr >= start && addr < end;
123+
/*
124+
* Kernel text has an alias mapping at a high address, known
125+
* here as "highmap".
126+
*/
127+
return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
115128
}
116129

117-
static inline int
118-
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
130+
#else
131+
132+
static bool __cpa_pfn_in_highmap(unsigned long pfn)
119133
{
120-
return addr >= start && addr <= end;
134+
/* There is no highmap on 32-bit */
135+
return false;
121136
}
122137

138+
#endif
139+
123140
/*
124141
* Flushing functions
125142
*/
@@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg)
172189

173190
static void cpa_flush_all(unsigned long cache)
174191
{
175-
BUG_ON(irqs_disabled());
192+
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
176193

177194
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
178195
}
@@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
236253
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
237254
#endif
238255

239-
BUG_ON(irqs_disabled());
256+
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
240257

241258
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
242259

@@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
11831200
cpa->numpages = 1;
11841201
cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
11851202
return 0;
1203+
1204+
} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1205+
/* Faults in the highmap are OK, so do not warn: */
1206+
return -EFAULT;
11861207
} else {
11871208
WARN(1, KERN_WARNING "CPA: called for zero pte. "
11881209
"vaddr = %lx cpa->vaddr = %lx\n", vaddr,
@@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
13351356
* to touch the high mapped kernel as well:
13361357
*/
13371358
if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1338-
within_inclusive(cpa->pfn, highmap_start_pfn(),
1339-
highmap_end_pfn())) {
1359+
__cpa_pfn_in_highmap(cpa->pfn)) {
13401360
unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
13411361
__START_KERNEL_map - phys_base;
13421362
alias_cpa = *cpa;

arch/x86/mm/pti.c

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void)
421421
if (boot_cpu_has(X86_FEATURE_K8))
422422
return false;
423423

424+
/*
425+
* RANDSTRUCT derives its hardening benefits from the
426+
* attacker's lack of knowledge about the layout of kernel
427+
* data structures. Keep the kernel image non-global in
428+
* cases where RANDSTRUCT is in use to help keep the layout a
429+
* secret.
430+
*/
431+
if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
432+
return false;
433+
424434
return true;
425435
}
426436

@@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void)
430440
*/
431441
void pti_clone_kernel_text(void)
432442
{
443+
/*
444+
* rodata is part of the kernel image and is normally
445+
* readable on the filesystem or on the web. But, do not
446+
* clone the areas past rodata, they might contain secrets.
447+
*/
433448
unsigned long start = PFN_ALIGN(_text);
434-
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
449+
unsigned long end = (unsigned long)__end_rodata_hpage_align;
435450

436451
if (!pti_kernel_image_global_ok())
437452
return;
438453

454+
pr_debug("mapping partial kernel image into user address space\n");
455+
456+
/*
457+
* Note that this will undo _some_ of the work that
458+
* pti_set_kernel_image_nonglobal() did to clear the
459+
* global bit.
460+
*/
439461
pti_clone_pmds(start, end, _PAGE_RW);
440462
}
441463

@@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void)
458480
if (pti_kernel_image_global_ok())
459481
return;
460482

461-
pr_debug("set kernel image non-global\n");
462-
463483
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
464484
}
465485

mm/mmap.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = {
100100
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
101101
};
102102

103+
#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
104+
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
105+
{
106+
return prot;
107+
}
108+
#endif
109+
103110
pgprot_t vm_get_page_prot(unsigned long vm_flags)
104111
{
105-
return __pgprot(pgprot_val(protection_map[vm_flags &
112+
pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
106113
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
107114
pgprot_val(arch_vm_get_page_prot(vm_flags)));
115+
116+
return arch_filter_pgprot(ret);
108117
}
109118
EXPORT_SYMBOL(vm_get_page_prot);
110119

tools/testing/selftests/x86/test_syscall_vdso.c

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -100,12 +100,19 @@ asm (
100100
" shl $32, %r8\n"
101101
" orq $0x7f7f7f7f, %r8\n"
102102
" movq %r8, %r9\n"
103-
" movq %r8, %r10\n"
104-
" movq %r8, %r11\n"
105-
" movq %r8, %r12\n"
106-
" movq %r8, %r13\n"
107-
" movq %r8, %r14\n"
108-
" movq %r8, %r15\n"
103+
" incq %r9\n"
104+
" movq %r9, %r10\n"
105+
" incq %r10\n"
106+
" movq %r10, %r11\n"
107+
" incq %r11\n"
108+
" movq %r11, %r12\n"
109+
" incq %r12\n"
110+
" movq %r12, %r13\n"
111+
" incq %r13\n"
112+
" movq %r13, %r14\n"
113+
" incq %r14\n"
114+
" movq %r14, %r15\n"
115+
" incq %r15\n"
109116
" ret\n"
110117
" .code32\n"
111118
" .popsection\n"
@@ -128,12 +135,13 @@ int check_regs64(void)
128135
int err = 0;
129136
int num = 8;
130137
uint64_t *r64 = &regs64.r8;
138+
uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
131139

132140
if (!kernel_is_64bit)
133141
return 0;
134142

135143
do {
136-
if (*r64 == 0x7f7f7f7f7f7f7f7fULL)
144+
if (*r64 == expected++)
137145
continue; /* register did not change */
138146
if (syscall_addr != (long)&int80) {
139147
/*
@@ -147,18 +155,17 @@ int check_regs64(void)
147155
continue;
148156
}
149157
} else {
150-
/* INT80 syscall entrypoint can be used by
158+
/*
159+
* INT80 syscall entrypoint can be used by
151160
* 64-bit programs too, unlike SYSCALL/SYSENTER.
152161
* Therefore it must preserve R12+
153162
* (they are callee-saved registers in 64-bit C ABI).
154163
*
155-
* This was probably historically not intended,
156-
* but R8..11 are clobbered (cleared to 0).
157-
* IOW: they are the only registers which aren't
158-
* preserved across INT80 syscall.
164+
* Starting in Linux 4.17 (and any kernel that
165+
* backports the change), R8..11 are preserved.
166+
* Historically (and probably unintentionally), they
167+
* were clobbered or zeroed.
159168
*/
160-
if (*r64 == 0 && num <= 11)
161-
continue;
162169
}
163170
printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
164171
err++;

0 commit comments

Comments
 (0)