Skip to content

Commit 57e741b

Browse files
Sai Praneeth PrakhyaArd Biesheuvel
authored andcommitted
x86/mm/pageattr: Introduce helper function to unmap EFI boot services
Ideally, after kernel assumes control of the platform, firmware shouldn't access EFI boot services code/data regions. But, it's noticed that this is not so true in many x86 platforms. Hence, during boot, kernel reserves EFI boot services code/data regions [1] and maps [2] them to efi_pgd so that call to set_virtual_address_map() doesn't fail. After returning from set_virtual_address_map(), kernel frees the reserved regions [3] but they still remain mapped. Hence, introduce kernel_unmap_pages_in_pgd() which will later be used to unmap EFI boot services code/data regions. While at it modify kernel_map_pages_in_pgd() by 1. Adding __init modifier because it's always used *only* during boot. 2. Add a warning if it's used after SMP is initialized because it uses __flush_tlb_all() which flushes mappings only on current CPU. Unmapping EFI boot services code/data regions will result in clearing PAGE_PRESENT bit and it shouldn't bother L1TF cases because it's already handled by protnone_mask() at arch/x86/include/asm/pgtable-invert.h. [1] efi_reserve_boot_services() [2] efi_map_region() -> __map_region() -> kernel_map_pages_in_pgd() [3] efi_free_boot_services() Signed-off-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Bhupesh Sharma <bhsharma@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
1 parent 971d38b commit 57e741b

File tree

2 files changed

+44
-4
lines changed

2 files changed

+44
-4
lines changed

arch/x86/include/asm/pgtable_types.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -564,8 +564,12 @@ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
564564
unsigned int *level);
565565
extern pmd_t *lookup_pmd_address(unsigned long address);
566566
extern phys_addr_t slow_virt_to_phys(void *__address);
567-
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
568-
unsigned numpages, unsigned long page_flags);
567+
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
568+
unsigned long address,
569+
unsigned numpages,
570+
unsigned long page_flags);
571+
extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
572+
unsigned long numpages);
569573
#endif /* !__ASSEMBLY__ */
570574

571575
#endif /* _ASM_X86_PGTABLE_DEFS_H */

arch/x86/mm/pageattr.c

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2338,8 +2338,8 @@ bool kernel_page_present(struct page *page)
23382338

23392339
#endif /* CONFIG_DEBUG_PAGEALLOC */
23402340

2341-
int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2342-
unsigned numpages, unsigned long page_flags)
2341+
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2342+
unsigned numpages, unsigned long page_flags)
23432343
{
23442344
int retval = -EINVAL;
23452345

@@ -2353,6 +2353,8 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
23532353
.flags = 0,
23542354
};
23552355

2356+
WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2357+
23562358
if (!(__supported_pte_mask & _PAGE_NX))
23572359
goto out;
23582360

@@ -2374,6 +2376,40 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
23742376
return retval;
23752377
}
23762378

2379+
/*
2380+
* __flush_tlb_all() flushes mappings only on current CPU and hence this
2381+
* function shouldn't be used in an SMP environment. Presently, it's used only
2382+
* during boot (way before smp_init()) by EFI subsystem and hence is ok.
2383+
*/
2384+
int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2385+
unsigned long numpages)
2386+
{
2387+
int retval;
2388+
2389+
/*
2390+
* The typical sequence for unmapping is to find a pte through
2391+
* lookup_address_in_pgd() (ideally, it should never return NULL because
2392+
* the address is already mapped) and change it's protections. As pfn is
2393+
* the *target* of a mapping, it's not useful while unmapping.
2394+
*/
2395+
struct cpa_data cpa = {
2396+
.vaddr = &address,
2397+
.pfn = 0,
2398+
.pgd = pgd,
2399+
.numpages = numpages,
2400+
.mask_set = __pgprot(0),
2401+
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2402+
.flags = 0,
2403+
};
2404+
2405+
WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2406+
2407+
retval = __change_page_attr_set_clr(&cpa, 0);
2408+
__flush_tlb_all();
2409+
2410+
return retval;
2411+
}
2412+
23772413
/*
23782414
* The testcases use internal knowledge of the implementation that shouldn't
23792415
* be exposed to the rest of the kernel. Include these directly here.

0 commit comments

Comments
 (0)