Skip to content

Commit 64edc8e

Browse files
mat-cIngo Molnar
authored andcommitted
x86: Fix improper large page preservation
This patch fixes a bug in try_preserve_large_page() which may result in improper large page preservation and improper application of page attributes to the memory area outside of the original change request. More specifically, the problem manifests itself when set_memory_*() is called for several pages at the beginning of the large page and try_preserve_large_page() erroneously concludes that the change can be applied to whole large page. The fix consists of 3 parts: 1. Addition of "required" protection attributes in static_protections(), so .data and .bss can be guaranteed to stay "RW" 2. static_protections() is now called for every small page within large page to determine compatibility of new protection attributes (instead of just small pages within the requested range). 3. Large page can be preserved only if attribute change is large-page-aligned and covers whole large page. -v1: Try_preserve_large_page() patch for Linux 2.6.34-rc2 -v2: Replaced pfn check with address check for kernel rw-data Signed-off-by: Siarhei Liakh <sliakh.lkml@gmail.com> Signed-off-by: Xuxian Jiang <jiang@cs.ncsu.edu> Reviewed-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: James Morris <jmorris@namei.org> Cc: Andi Kleen <ak@muc.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Dave Jones <davej@redhat.com> Cc: Kees Cook <kees.cook@canonical.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <4CE2F7F3.8030809@free.fr> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent e53beac commit 64edc8e

File tree

1 file changed

+18
-10
lines changed

1 file changed

+18
-10
lines changed

arch/x86/mm/pageattr.c

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
255255
unsigned long pfn)
256256
{
257257
pgprot_t forbidden = __pgprot(0);
258+
pgprot_t required = __pgprot(0);
258259

259260
/*
260261
* The BIOS area between 640k and 1Mb needs to be executable for
@@ -278,6 +279,12 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
278279
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
279280
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280281
pgprot_val(forbidden) |= _PAGE_RW;
282+
/*
283+
* .data and .bss should always be writable.
284+
*/
285+
if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
286+
within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
287+
pgprot_val(required) |= _PAGE_RW;
281288

282289
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
283290
/*
@@ -317,6 +324,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
317324
#endif
318325

319326
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
327+
prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
320328

321329
return prot;
322330
}
@@ -393,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
393401
{
394402
unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
395403
pte_t new_pte, old_pte, *tmp;
396-
pgprot_t old_prot, new_prot;
404+
pgprot_t old_prot, new_prot, req_prot;
397405
int i, do_split = 1;
398406
unsigned int level;
399407

@@ -438,10 +446,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
438446
* We are safe now. Check whether the new pgprot is the same:
439447
*/
440448
old_pte = *kpte;
441-
old_prot = new_prot = pte_pgprot(old_pte);
449+
old_prot = new_prot = req_prot = pte_pgprot(old_pte);
442450

443-
pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
444-
pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
451+
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
452+
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
445453

446454
/*
447455
* old_pte points to the large page base address. So we need
@@ -450,17 +458,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
450458
pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
451459
cpa->pfn = pfn;
452460

453-
new_prot = static_protections(new_prot, address, pfn);
461+
new_prot = static_protections(req_prot, address, pfn);
454462

455463
/*
456464
* We need to check the full range, whether
457465
* static_protection() requires a different pgprot for one of
458466
* the pages in the range we try to preserve:
459467
*/
460-
addr = address + PAGE_SIZE;
461-
pfn++;
462-
for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
463-
pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
468+
addr = address & pmask;
469+
pfn = pte_pfn(old_pte);
470+
for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
471+
pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
464472

465473
if (pgprot_val(chk_prot) != pgprot_val(new_prot))
466474
goto out_unlock;
@@ -483,7 +491,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
483491
* that we limited the number of possible pages already to
484492
* the number of pages in the large page.
485493
*/
486-
if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
494+
if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
487495
/*
488496
* The address is aligned and the number of pages
489497
* covers the full page.

0 commit comments

Comments
 (0)