Skip to content

Commit 1e6b481

Browse files
committed
ARM: mm: allow non-text sections to be non-executable
Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions into section-sized areas that can have different permisions. Performs the NX permission changes during free_initmem, so that init memory can be reclaimed. This uses section size instead of PMD size to reduce memory lost to padding on non-LPAE systems. Based on work by Brad Spengler, Larry Bassel, and Laura Abbott. Signed-off-by: Kees Cook <keescook@chromium.org> Tested-by: Laura Abbott <lauraa@codeaurora.org> Acked-by: Nicolas Pitre <nico@linaro.org>
1 parent 23a4e40 commit 1e6b481

File tree

4 files changed

+133
-2
lines changed

4 files changed

+133
-2
lines changed

arch/arm/kernel/vmlinux.lds.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88
#include <asm/thread_info.h>
99
#include <asm/memory.h>
1010
#include <asm/page.h>
11+
#ifdef CONFIG_ARM_KERNMEM_PERMS
12+
#include <asm/pgtable.h>
13+
#endif
1114

1215
#define PROC_INFO \
1316
. = ALIGN(4); \
@@ -90,6 +93,11 @@ SECTIONS
9093
_text = .;
9194
HEAD_TEXT
9295
}
96+
97+
#ifdef CONFIG_ARM_KERNMEM_PERMS
98+
. = ALIGN(1<<SECTION_SHIFT);
99+
#endif
100+
93101
.text : { /* Real text segment */
94102
_stext = .; /* Text and read-only data */
95103
__exception_text_start = .;
@@ -145,7 +153,11 @@ SECTIONS
145153
_etext = .; /* End of text and rodata section */
146154

147155
#ifndef CONFIG_XIP_KERNEL
156+
# ifdef CONFIG_ARM_KERNMEM_PERMS
157+
. = ALIGN(1<<SECTION_SHIFT);
158+
# else
148159
. = ALIGN(PAGE_SIZE);
160+
# endif
149161
__init_begin = .;
150162
#endif
151163
/*
@@ -220,7 +232,11 @@ SECTIONS
220232
. = PAGE_OFFSET + TEXT_OFFSET;
221233
#else
222234
__init_end = .;
235+
#ifdef CONFIG_ARM_KERNMEM_PERMS
236+
. = ALIGN(1<<SECTION_SHIFT);
237+
#else
223238
. = ALIGN(THREAD_SIZE);
239+
#endif
224240
__data_loc = .;
225241
#endif
226242

arch/arm/mm/Kconfig

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1008,3 +1008,12 @@ config ARCH_SUPPORTS_BIG_ENDIAN
10081008
help
10091009
This option specifies the architecture can support big endian
10101010
operation.
1011+
1012+
config ARM_KERNMEM_PERMS
1013+
bool "Restrict kernel memory permissions"
1014+
help
1015+
If this is set, kernel memory other than kernel text (and rodata)
1016+
will be made non-executable. The tradeoff is that each region is
1017+
padded to section-size (1MiB) boundaries (because their permissions
1018+
are different and splitting the 1M pages into 4K ones causes TLB
1019+
performance problems), wasting memory.

arch/arm/mm/init.c

Lines changed: 100 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include <asm/prom.h>
3030
#include <asm/sections.h>
3131
#include <asm/setup.h>
32+
#include <asm/system_info.h>
3233
#include <asm/tlb.h>
3334
#include <asm/fixmap.h>
3435

@@ -615,14 +616,112 @@ void __init mem_init(void)
615616
}
616617
}
617618

618-
void free_initmem(void)
619+
#ifdef CONFIG_ARM_KERNMEM_PERMS
620+
struct section_perm {
621+
unsigned long start;
622+
unsigned long end;
623+
pmdval_t mask;
624+
pmdval_t prot;
625+
};
626+
627+
struct section_perm nx_perms[] = {
628+
/* Make pages tables, etc before _stext RW (set NX). */
629+
{
630+
.start = PAGE_OFFSET,
631+
.end = (unsigned long)_stext,
632+
.mask = ~PMD_SECT_XN,
633+
.prot = PMD_SECT_XN,
634+
},
635+
/* Make init RW (set NX). */
636+
{
637+
.start = (unsigned long)__init_begin,
638+
.end = (unsigned long)_sdata,
639+
.mask = ~PMD_SECT_XN,
640+
.prot = PMD_SECT_XN,
641+
},
642+
};
643+
644+
/*
645+
* Updates section permissions only for the current mm (sections are
646+
* copied into each mm). During startup, this is the init_mm. Is only
647+
* safe to be called with preemption disabled, as under stop_machine().
648+
*/
649+
static inline void section_update(unsigned long addr, pmdval_t mask,
650+
pmdval_t prot)
651+
{
652+
struct mm_struct *mm;
653+
pmd_t *pmd;
654+
655+
mm = current->active_mm;
656+
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
657+
658+
#ifdef CONFIG_ARM_LPAE
659+
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
660+
#else
661+
if (addr & SECTION_SIZE)
662+
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
663+
else
664+
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
665+
#endif
666+
flush_pmd_entry(pmd);
667+
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
668+
}
669+
670+
/* Make sure extended page tables are in use. */
671+
static inline bool arch_has_strict_perms(void)
672+
{
673+
if (cpu_architecture() < CPU_ARCH_ARMv6)
674+
return false;
675+
676+
return !!(get_cr() & CR_XP);
677+
}
678+
679+
#define set_section_perms(perms, field) { \
680+
size_t i; \
681+
unsigned long addr; \
682+
\
683+
if (!arch_has_strict_perms()) \
684+
return; \
685+
\
686+
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
687+
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
688+
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
689+
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
690+
perms[i].start, perms[i].end, \
691+
SECTION_SIZE); \
692+
continue; \
693+
} \
694+
\
695+
for (addr = perms[i].start; \
696+
addr < perms[i].end; \
697+
addr += SECTION_SIZE) \
698+
section_update(addr, perms[i].mask, \
699+
perms[i].field); \
700+
} \
701+
}
702+
703+
static inline void fix_kernmem_perms(void)
704+
{
705+
set_section_perms(nx_perms, prot);
706+
}
707+
#else
708+
static inline void fix_kernmem_perms(void) { }
709+
#endif /* CONFIG_ARM_KERNMEM_PERMS */
710+
711+
void free_tcmmem(void)
619712
{
620713
#ifdef CONFIG_HAVE_TCM
621714
extern char __tcm_start, __tcm_end;
622715

623716
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
624717
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
625718
#endif
719+
}
720+
721+
void free_initmem(void)
722+
{
723+
fix_kernmem_perms();
724+
free_tcmmem();
626725

627726
poison_init_mem(__init_begin, __init_end - __init_begin);
628727
if (!machine_is_integrator() && !machine_is_cintegrator())

arch/arm/mm/mmu.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1373,12 +1373,19 @@ static void __init map_lowmem(void)
13731373
if (start >= end)
13741374
break;
13751375

1376-
if (end < kernel_x_start || start >= kernel_x_end) {
1376+
if (end < kernel_x_start) {
13771377
map.pfn = __phys_to_pfn(start);
13781378
map.virtual = __phys_to_virt(start);
13791379
map.length = end - start;
13801380
map.type = MT_MEMORY_RWX;
13811381

1382+
create_mapping(&map);
1383+
} else if (start >= kernel_x_end) {
1384+
map.pfn = __phys_to_pfn(start);
1385+
map.virtual = __phys_to_virt(start);
1386+
map.length = end - start;
1387+
map.type = MT_MEMORY_RW;
1388+
13821389
create_mapping(&map);
13831390
} else {
13841391
/* This better cover the entire kernel */

0 commit comments

Comments
 (0)