Skip to content

Commit cab15ce

Browse files
ctmarinaswildea01
authored andcommitted
arm64: Introduce execute-only page access permissions
The ARMv8 architecture allows execute-only user permissions by clearing the PTE_UXN and PTE_USER bits. However, the kernel running on a CPU implementation without User Access Override (ARMv8.2 onwards) can still access such page, so execute-only page permission does not protect against read(2)/write(2) etc. accesses. Systems requiring such protection must enable features like SECCOMP. This patch changes the arm64 __P100 and __S100 protection_map[] macros to the new __PAGE_EXECONLY attributes. A side effect is that pte_user() no longer triggers for __PAGE_EXECONLY since PTE_USER isn't set. To work around this, the check is done on the PTE_NG bit via the pte_ng() macro. VM_READ is also checked now for page faults. Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 7419333 commit cab15ce

File tree

4 files changed

+15
-10
lines changed

4 files changed

+15
-10
lines changed

arch/arm64/include/asm/pgtable-prot.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,12 +70,13 @@
7070
#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
7171
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
7272
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
73+
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
7374

7475
#define __P000 PAGE_NONE
7576
#define __P001 PAGE_READONLY
7677
#define __P010 PAGE_COPY
7778
#define __P011 PAGE_COPY
78-
#define __P100 PAGE_READONLY_EXEC
79+
#define __P100 PAGE_EXECONLY
7980
#define __P101 PAGE_READONLY_EXEC
8081
#define __P110 PAGE_COPY_EXEC
8182
#define __P111 PAGE_COPY_EXEC
@@ -84,7 +85,7 @@
8485
#define __S001 PAGE_READONLY
8586
#define __S010 PAGE_SHARED
8687
#define __S011 PAGE_SHARED
87-
#define __S100 PAGE_READONLY_EXEC
88+
#define __S100 PAGE_EXECONLY
8889
#define __S101 PAGE_READONLY_EXEC
8990
#define __S110 PAGE_SHARED_EXEC
9091
#define __S111 PAGE_SHARED_EXEC

arch/arm64/include/asm/pgtable.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
7373
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
7474
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
7575
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
76-
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
76+
#define pte_ng(pte) (!!(pte_val(pte) & PTE_NG))
7777

7878
#ifdef CONFIG_ARM64_HW_AFDBM
7979
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -84,8 +84,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
8484
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
8585

8686
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
87-
#define pte_valid_not_user(pte) \
88-
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
87+
#define pte_valid_global(pte) \
88+
((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID)
8989
#define pte_valid_young(pte) \
9090
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
9191

@@ -168,7 +168,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
168168
* Only if the new pte is valid and kernel, otherwise TLB maintenance
169169
* or update_mmu_cache() have the necessary barriers.
170170
*/
171-
if (pte_valid_not_user(pte)) {
171+
if (pte_valid_global(pte)) {
172172
dsb(ishst);
173173
isb();
174174
}
@@ -202,7 +202,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
202202
pte_val(pte) &= ~PTE_RDONLY;
203203
else
204204
pte_val(pte) |= PTE_RDONLY;
205-
if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
205+
if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte))
206206
__sync_icache_dcache(pte, addr);
207207
}
208208

arch/arm64/mm/fault.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -251,8 +251,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
251251
good_area:
252252
/*
253253
* Check that the permissions on the VMA allow for the fault which
254-
* occurred. If we encountered a write or exec fault, we must have
255-
* appropriate permissions, otherwise we allow any permission.
254+
* occurred.
256255
*/
257256
if (!(vma->vm_flags & vm_flags)) {
258257
fault = VM_FAULT_BADACCESS;
@@ -288,7 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
288287
struct task_struct *tsk;
289288
struct mm_struct *mm;
290289
int fault, sig, code;
291-
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
290+
unsigned long vm_flags = VM_READ | VM_WRITE;
292291
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
293292

294293
if (notify_page_fault(regs, esr))

mm/mmap.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,11 @@ static void unmap_region(struct mm_struct *mm,
8888
* w: (no) no w: (no) no w: (copy) copy w: (no) no
8989
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
9090
*
91+
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
92+
* MAP_PRIVATE:
93+
* r: (no) no
94+
* w: (no) no
95+
* x: (yes) yes
9196
*/
9297
pgprot_t protection_map[16] = {
9398
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,

0 commit comments

Comments
 (0)