Skip to content

Commit c32c2cb

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "I thought we were done for 4.5, but then the 64k-page chaps came crawling out of the woodwork. *sigh* The vmemmap fix I sent for -rc7 caused a regression with 64k pages and sparsemem and at some point during the release cycle the new hugetlb code using contiguous ptes started failing the libhugetlbfs tests with 64k pages enabled. So here are a couple of patches that fix the vmemmap alignment and disable the new hugetlb page sizes whilst a proper fix is being developed: - Temporarily disable huge pages built using contiguous ptes - Ensure vmemmap region is sufficiently aligned for sparsemem sections" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hugetlb: partial revert of 66b3923 arm64: account for sparsemem section alignment when choosing vmemmap offset
2 parents 2da33f9 + ff79258 commit c32c2cb

File tree

2 files changed

+3
-16
lines changed

2 files changed

+3
-16
lines changed

arch/arm64/include/asm/pgtable.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
4141
* fixed mappings and modules
4242
*/
43-
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
43+
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
4444

4545
#ifndef CONFIG_KASAN
4646
#define VMALLOC_START (VA_START)
@@ -52,7 +52,8 @@
5252
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
5353

5454
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
55-
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
55+
#define vmemmap ((struct page *)VMEMMAP_START - \
56+
SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
5657

5758
#define FIRST_USER_ADDRESS 0UL
5859

arch/arm64/mm/hugetlbpage.c

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -306,24 +306,10 @@ static __init int setup_hugepagesz(char *opt)
306306
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
307307
} else if (ps == PUD_SIZE) {
308308
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
309-
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
310-
hugetlb_add_hstate(CONT_PTE_SHIFT);
311-
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
312-
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
313309
} else {
314310
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
315311
return 0;
316312
}
317313
return 1;
318314
}
319315
__setup("hugepagesz=", setup_hugepagesz);
320-
321-
#ifdef CONFIG_ARM64_64K_PAGES
322-
static __init int add_default_hugepagesz(void)
323-
{
324-
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
325-
hugetlb_add_hstate(CONT_PMD_SHIFT);
326-
return 0;
327-
}
328-
arch_initcall(add_default_hugepagesz);
329-
#endif

0 commit comments

Comments
 (0)