Skip to content

Commit f59dbe9

Browse files
joergroedelKAGA-KOKO
authored andcommitted
x86/pgtable/pae: Use separate kernel PMDs for user page-table
When PTI is enabled, separate kernel PMDs in the user page-table are required to map the per-process LDT for user-space. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-33-git-send-email-joro@8bytes.org
1 parent 4e8537e commit f59dbe9

File tree

1 file changed

+81
-19
lines changed

1 file changed

+81
-19
lines changed

arch/x86/mm/pgtable.c

Lines changed: 81 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
182182
*/
183183
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
184184

185+
/*
186+
* We allocate separate PMDs for the kernel part of the user page-table
187+
* when PTI is enabled. We need them to map the per-process LDT into the
188+
* user-space page-table.
189+
*/
190+
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
191+
KERNEL_PGD_PTRS : 0)
192+
185193
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
186194
{
187195
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
@@ -202,22 +210,22 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
202210

203211
/* No need to prepopulate any pagetable entries in non-PAE modes. */
204212
#define PREALLOCATED_PMDS 0
205-
213+
#define PREALLOCATED_USER_PMDS 0
206214
#endif /* CONFIG_X86_PAE */
207215

208-
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
216+
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
209217
{
210218
int i;
211219

212-
for(i = 0; i < PREALLOCATED_PMDS; i++)
220+
for (i = 0; i < count; i++)
213221
if (pmds[i]) {
214222
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
215223
free_page((unsigned long)pmds[i]);
216224
mm_dec_nr_pmds(mm);
217225
}
218226
}
219227

220-
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
228+
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
221229
{
222230
int i;
223231
bool failed = false;
@@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
226234
if (mm == &init_mm)
227235
gfp &= ~__GFP_ACCOUNT;
228236

229-
for(i = 0; i < PREALLOCATED_PMDS; i++) {
237+
for (i = 0; i < count; i++) {
230238
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
231239
if (!pmd)
232240
failed = true;
@@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
241249
}
242250

243251
if (failed) {
244-
free_pmds(mm, pmds);
252+
free_pmds(mm, pmds, count);
245253
return -ENOMEM;
246254
}
247255

@@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
254262
* preallocate which never got a corresponding vma will need to be
255263
* freed manually.
256264
*/
265+
static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
266+
{
267+
pgd_t pgd = *pgdp;
268+
269+
if (pgd_val(pgd) != 0) {
270+
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
271+
272+
*pgdp = native_make_pgd(0);
273+
274+
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
275+
pmd_free(mm, pmd);
276+
mm_dec_nr_pmds(mm);
277+
}
278+
}
279+
257280
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
258281
{
259282
int i;
260283

261-
for(i = 0; i < PREALLOCATED_PMDS; i++) {
262-
pgd_t pgd = pgdp[i];
284+
for (i = 0; i < PREALLOCATED_PMDS; i++)
285+
mop_up_one_pmd(mm, &pgdp[i]);
263286

264-
if (pgd_val(pgd) != 0) {
265-
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
287+
#ifdef CONFIG_PAGE_TABLE_ISOLATION
266288

267-
pgdp[i] = native_make_pgd(0);
289+
if (!static_cpu_has(X86_FEATURE_PTI))
290+
return;
268291

269-
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
270-
pmd_free(mm, pmd);
271-
mm_dec_nr_pmds(mm);
272-
}
273-
}
292+
pgdp = kernel_to_user_pgdp(pgdp);
293+
294+
for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
295+
mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
296+
#endif
274297
}
275298

276299
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
@@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
296319
}
297320
}
298321

322+
#ifdef CONFIG_PAGE_TABLE_ISOLATION
323+
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
324+
pgd_t *k_pgd, pmd_t *pmds[])
325+
{
326+
pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
327+
pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
328+
p4d_t *u_p4d;
329+
pud_t *u_pud;
330+
int i;
331+
332+
u_p4d = p4d_offset(u_pgd, 0);
333+
u_pud = pud_offset(u_p4d, 0);
334+
335+
s_pgd += KERNEL_PGD_BOUNDARY;
336+
u_pud += KERNEL_PGD_BOUNDARY;
337+
338+
for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
339+
pmd_t *pmd = pmds[i];
340+
341+
memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
342+
sizeof(pmd_t) * PTRS_PER_PMD);
343+
344+
pud_populate(mm, u_pud, pmd);
345+
}
346+
347+
}
348+
#else
349+
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
350+
pgd_t *k_pgd, pmd_t *pmds[])
351+
{
352+
}
353+
#endif
299354
/*
300355
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
301356
* assumes that pgd should be in one page.
@@ -376,6 +431,7 @@ static inline void _pgd_free(pgd_t *pgd)
376431
pgd_t *pgd_alloc(struct mm_struct *mm)
377432
{
378433
pgd_t *pgd;
434+
pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
379435
pmd_t *pmds[PREALLOCATED_PMDS];
380436

381437
pgd = _pgd_alloc();
@@ -385,12 +441,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
385441

386442
mm->pgd = pgd;
387443

388-
if (preallocate_pmds(mm, pmds) != 0)
444+
if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
389445
goto out_free_pgd;
390446

391-
if (paravirt_pgd_alloc(mm) != 0)
447+
if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
392448
goto out_free_pmds;
393449

450+
if (paravirt_pgd_alloc(mm) != 0)
451+
goto out_free_user_pmds;
452+
394453
/*
395454
* Make sure that pre-populating the pmds is atomic with
396455
* respect to anything walking the pgd_list, so that they
@@ -400,13 +459,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
400459

401460
pgd_ctor(mm, pgd);
402461
pgd_prepopulate_pmd(mm, pgd, pmds);
462+
pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
403463

404464
spin_unlock(&pgd_lock);
405465

406466
return pgd;
407467

468+
out_free_user_pmds:
469+
free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
408470
out_free_pmds:
409-
free_pmds(mm, pmds);
471+
free_pmds(mm, pmds, PREALLOCATED_PMDS);
410472
out_free_pgd:
411473
_pgd_free(pgd);
412474
out:

0 commit comments

Comments
 (0)