Skip to content

Commit 021182e

Browse files
thgarnieIngo Molnar
authored andcommitted
x86/mm: Enable KASLR for physical mapping memory regions
Add the physical mapping in the list of randomized memory regions. The physical memory mapping holds most allocations from boot and heap allocators. Knowing the base address and physical memory size, an attacker can deduce the PDE virtual address for the vDSO memory page. This attack was demonstrated at CanSecWest 2016, in the following presentation: "Getting Physical: Extreme Abuse of Intel Based Paged Systems": https://github.com/n3k/CansecWest2016_Getting_Physical_Extreme_Abuse_of_Intel_Based_Paging_Systems/blob/master/Presentation/CanSec2016_Presentation.pdf (See second part of the presentation). The exploits used against Linux worked successfully against 4.6+ but fail with KASLR memory enabled: https://github.com/n3k/CansecWest2016_Getting_Physical_Extreme_Abuse_of_Intel_Based_Paging_Systems/tree/master/Demos/Linux/exploits Similar research was done at Google leading to this patch proposal. Variants exists to overwrite /proc or /sys objects ACLs leading to elevation of privileges. These variants were tested against 4.6+. The page offset used by the compressed kernel retains the static value since it is not yet randomized during this boot stage. Signed-off-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Alexander Popov <alpopov@ptsecurity.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lv Zheng <lv.zheng@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1466556426-32664-7-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 0483e1f commit 021182e

File tree

5 files changed

+31
-5
lines changed

5 files changed

+31
-5
lines changed

arch/x86/boot/compressed/pagetable.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020
/* These actually do the work of building the kernel identity maps. */
2121
#include <asm/init.h>
2222
#include <asm/pgtable.h>
23+
/* Use the static base for this part of the boot process */
24+
#undef __PAGE_OFFSET
25+
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
2326
#include "../../mm/ident_map.c"
2427

2528
/* Used by pgtable.h asm code to force instruction serialization. */

arch/x86/include/asm/kaslr.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
unsigned long kaslr_get_random_long(const char *purpose);
55

66
#ifdef CONFIG_RANDOMIZE_MEMORY
7+
extern unsigned long page_offset_base;
8+
79
void kernel_randomize_memory(void);
810
#else
911
static inline void kernel_randomize_memory(void) { }

arch/x86/include/asm/page_64_types.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
#ifndef _ASM_X86_PAGE_64_DEFS_H
22
#define _ASM_X86_PAGE_64_DEFS_H
33

4+
#ifndef __ASSEMBLY__
5+
#include <asm/kaslr.h>
6+
#endif
7+
48
#ifdef CONFIG_KASAN
59
#define KASAN_STACK_ORDER 1
610
#else
@@ -32,7 +36,12 @@
3236
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
3337
* what Xen requires.
3438
*/
35-
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
39+
#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
40+
#ifdef CONFIG_RANDOMIZE_MEMORY
41+
#define __PAGE_OFFSET page_offset_base
42+
#else
43+
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
44+
#endif /* CONFIG_RANDOMIZE_MEMORY */
3645

3746
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
3847

arch/x86/kernel/head_64.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
4040

41-
L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41+
L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
4242
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
4343
L3_START_KERNEL = pud_index(__START_KERNEL_map)
4444

arch/x86/mm/kaslr.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,12 @@
4343
* before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
4444
* ensure that this order is correct and won't be changed.
4545
*/
46-
static const unsigned long vaddr_start;
47-
static const unsigned long vaddr_end;
46+
static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
47+
static const unsigned long vaddr_end = VMALLOC_START;
48+
49+
/* Default values */
50+
unsigned long page_offset_base = __PAGE_OFFSET_BASE;
51+
EXPORT_SYMBOL(page_offset_base);
4852

4953
/*
5054
* Memory regions randomized by KASLR (except modules that use a separate logic
@@ -55,6 +59,7 @@ static __initdata struct kaslr_memory_region {
5559
unsigned long *base;
5660
unsigned long size_tb;
5761
} kaslr_regions[] = {
62+
{ &page_offset_base, 64/* Maximum */ },
5863
};
5964

6065
/* Get size in bytes used by the memory region */
@@ -77,13 +82,20 @@ void __init kernel_randomize_memory(void)
7782
{
7883
size_t i;
7984
unsigned long vaddr = vaddr_start;
80-
unsigned long rand;
85+
unsigned long rand, memory_tb;
8186
struct rnd_state rand_state;
8287
unsigned long remain_entropy;
8388

8489
if (!kaslr_memory_enabled())
8590
return;
8691

92+
BUG_ON(kaslr_regions[0].base != &page_offset_base);
93+
memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT);
94+
95+
/* Adapt phyiscal memory region size based on available memory */
96+
if (memory_tb < kaslr_regions[0].size_tb)
97+
kaslr_regions[0].size_tb = memory_tb;
98+
8799
/* Calculate entropy available between regions */
88100
remain_entropy = vaddr_end - vaddr_start;
89101
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)

0 commit comments

Comments
 (0)