Skip to content

Commit 52a628f

Browse files
suryasaimadhuMatt Fleming
authored andcommitted
x86/mm/pageattr: Add last levels of error path
We try to free the pagetable pages once we've unmapped our portion. Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
1 parent 0bb8aee commit 52a628f

File tree

1 file changed

+93
-1
lines changed

1 file changed

+93
-1
lines changed

arch/x86/mm/pageattr.c

Lines changed: 93 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,99 @@ static int split_large_page(pte_t *kpte, unsigned long address)
666666
return 0;
667667
}
668668

669-
#define unmap_pmd_range(pud, start, pre_end) do {} while (0)
669+
static bool try_to_free_pte_page(pte_t *pte)
670+
{
671+
int i;
672+
673+
for (i = 0; i < PTRS_PER_PTE; i++)
674+
if (!pte_none(pte[i]))
675+
return false;
676+
677+
free_page((unsigned long)pte);
678+
return true;
679+
}
680+
681+
static bool try_to_free_pmd_page(pmd_t *pmd)
682+
{
683+
int i;
684+
685+
for (i = 0; i < PTRS_PER_PMD; i++)
686+
if (!pmd_none(pmd[i]))
687+
return false;
688+
689+
free_page((unsigned long)pmd);
690+
return true;
691+
}
692+
693+
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
694+
{
695+
pte_t *pte = pte_offset_kernel(pmd, start);
696+
697+
while (start < end) {
698+
set_pte(pte, __pte(0));
699+
700+
start += PAGE_SIZE;
701+
pte++;
702+
}
703+
704+
if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
705+
pmd_clear(pmd);
706+
return true;
707+
}
708+
return false;
709+
}
710+
711+
static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
712+
unsigned long start, unsigned long end)
713+
{
714+
if (unmap_pte_range(pmd, start, end))
715+
if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
716+
pud_clear(pud);
717+
}
718+
719+
static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
720+
{
721+
pmd_t *pmd = pmd_offset(pud, start);
722+
723+
/*
724+
* Not on a 2MB page boundary?
725+
*/
726+
if (start & (PMD_SIZE - 1)) {
727+
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
728+
unsigned long pre_end = min_t(unsigned long, end, next_page);
729+
730+
__unmap_pmd_range(pud, pmd, start, pre_end);
731+
732+
start = pre_end;
733+
pmd++;
734+
}
735+
736+
/*
737+
* Try to unmap in 2M chunks.
738+
*/
739+
while (end - start >= PMD_SIZE) {
740+
if (pmd_large(*pmd))
741+
pmd_clear(pmd);
742+
else
743+
__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
744+
745+
start += PMD_SIZE;
746+
pmd++;
747+
}
748+
749+
/*
750+
* 4K leftovers?
751+
*/
752+
if (start < end)
753+
return __unmap_pmd_range(pud, pmd, start, end);
754+
755+
/*
756+
* Try again to free the PMD page if haven't succeeded above.
757+
*/
758+
if (!pud_none(*pud))
759+
if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
760+
pud_clear(pud);
761+
}
670762

671763
static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
672764
{

0 commit comments

Comments
 (0)