|
38 | 38 |
|
39 | 39 | #include <asm/cpufeature.h>
|
40 | 40 | #include <asm/hypervisor.h>
|
| 41 | +#include <asm/vsyscall.h> |
41 | 42 | #include <asm/cmdline.h>
|
42 | 43 | #include <asm/pti.h>
|
43 | 44 | #include <asm/pgtable.h>
|
@@ -223,6 +224,69 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
223 | 224 | return pmd_offset(pud, address);
|
224 | 225 | }
|
225 | 226 |
|
| 227 | +#ifdef CONFIG_X86_VSYSCALL_EMULATION |
| 228 | +/* |
| 229 | + * Walk the shadow copy of the page tables (optionally) trying to allocate |
| 230 | + * page table pages on the way down. Does not support large pages. |
| 231 | + * |
| 232 | + * Note: this is only used when mapping *new* kernel data into the |
| 233 | + * user/shadow page tables. It is never used for userspace data. |
| 234 | + * |
| 235 | + * Returns a pointer to a PTE on success, or NULL on failure. |
| 236 | + */ |
| 237 | +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
| 238 | +{ |
| 239 | + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 240 | + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); |
| 241 | + pte_t *pte; |
| 242 | + |
| 243 | + /* We can't do anything sensible if we hit a large mapping. */ |
| 244 | + if (pmd_large(*pmd)) { |
| 245 | + WARN_ON(1); |
| 246 | + return NULL; |
| 247 | + } |
| 248 | + |
| 249 | + if (pmd_none(*pmd)) { |
| 250 | + unsigned long new_pte_page = __get_free_page(gfp); |
| 251 | + if (!new_pte_page) |
| 252 | + return NULL; |
| 253 | + |
| 254 | + if (pmd_none(*pmd)) { |
| 255 | + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); |
| 256 | + new_pte_page = 0; |
| 257 | + } |
| 258 | + if (new_pte_page) |
| 259 | + free_page(new_pte_page); |
| 260 | + } |
| 261 | + |
| 262 | + pte = pte_offset_kernel(pmd, address); |
| 263 | + if (pte_flags(*pte) & _PAGE_USER) { |
| 264 | + WARN_ONCE(1, "attempt to walk to user pte\n"); |
| 265 | + return NULL; |
| 266 | + } |
| 267 | + return pte; |
| 268 | +} |
| 269 | + |
| 270 | +static void __init pti_setup_vsyscall(void) |
| 271 | +{ |
| 272 | + pte_t *pte, *target_pte; |
| 273 | + unsigned int level; |
| 274 | + |
| 275 | + pte = lookup_address(VSYSCALL_ADDR, &level); |
| 276 | + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) |
| 277 | + return; |
| 278 | + |
| 279 | + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); |
| 280 | + if (WARN_ON(!target_pte)) |
| 281 | + return; |
| 282 | + |
| 283 | + *target_pte = *pte; |
| 284 | + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); |
| 285 | +} |
| 286 | +#else |
| 287 | +static void __init pti_setup_vsyscall(void) { } |
| 288 | +#endif |
| 289 | + |
226 | 290 | static void __init
|
227 | 291 | pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
|
228 | 292 | {
|
@@ -319,4 +383,5 @@ void __init pti_init(void)
|
319 | 383 | pti_clone_user_shared();
|
320 | 384 | pti_clone_entry_text();
|
321 | 385 | pti_setup_espfix64();
|
| 386 | + pti_setup_vsyscall(); |
322 | 387 | }
|
0 commit comments