Skip to content

Commit a77e0c7

Browse files
author
Santosh Shilimkar
committed
ARM: mm: Recreate kernel mappings in early_paging_init()
This patch adds a step in the init sequence, in order to recreate the kernel code/data page table mappings prior to full paging initialization. This is necessary on LPAE systems that run out of a physical address space outside the 4G limit. On these systems, this implementation provides a machine descriptor hook that allows the PHYS_OFFSET to be overridden in a machine specific fashion. Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: R Sricharan <r.sricharan@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
1 parent f52bb72 commit a77e0c7

File tree

3 files changed

+87
-0
lines changed

3 files changed

+87
-0
lines changed

arch/arm/include/asm/mach/arch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ struct machine_desc {
4949
bool (*smp_init)(void);
5050
void (*fixup)(struct tag *, char **,
5151
struct meminfo *);
52+
void (*init_meminfo)(void);
5253
void (*reserve)(void);/* reserve mem blocks */
5354
void (*map_io)(void);/* IO mapping function */
5455
void (*init_early)(void);

arch/arm/kernel/setup.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
7373
#endif
7474

7575
extern void paging_init(const struct machine_desc *desc);
76+
extern void early_paging_init(const struct machine_desc *,
77+
struct proc_info_list *);
7678
extern void sanity_check_meminfo(void);
7779
extern enum reboot_mode reboot_mode;
7880
extern void setup_dma_zone(const struct machine_desc *desc);
@@ -878,6 +880,8 @@ void __init setup_arch(char **cmdline_p)
878880
parse_early_param();
879881

880882
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
883+
884+
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
881885
sanity_check_meminfo();
882886
arm_memblock_init(&meminfo, mdesc);
883887

arch/arm/mm/mmu.c

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
#include <asm/highmem.h>
2929
#include <asm/system_info.h>
3030
#include <asm/traps.h>
31+
#include <asm/procinfo.h>
32+
#include <asm/memory.h>
3133

3234
#include <asm/mach/arch.h>
3335
#include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
13151317
}
13161318
}
13171319

1320+
#ifdef CONFIG_ARM_LPAE
1321+
/*
1322+
* early_paging_init() recreates boot time page table setup, allowing machines
1323+
* to switch over to a high (>4G) address space on LPAE systems
1324+
*/
1325+
void __init early_paging_init(const struct machine_desc *mdesc,
1326+
struct proc_info_list *procinfo)
1327+
{
1328+
pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
1329+
unsigned long map_start, map_end;
1330+
pgd_t *pgd0, *pgdk;
1331+
pud_t *pud0, *pudk, *pud_start;
1332+
pmd_t *pmd0, *pmdk;
1333+
phys_addr_t phys;
1334+
int i;
1335+
1336+
if (!(mdesc->init_meminfo))
1337+
return;
1338+
1339+
/* remap kernel code and data */
1340+
map_start = init_mm.start_code;
1341+
map_end = init_mm.brk;
1342+
1343+
/* get a handle on things... */
1344+
pgd0 = pgd_offset_k(0);
1345+
pud_start = pud0 = pud_offset(pgd0, 0);
1346+
pmd0 = pmd_offset(pud0, 0);
1347+
1348+
pgdk = pgd_offset_k(map_start);
1349+
pudk = pud_offset(pgdk, map_start);
1350+
pmdk = pmd_offset(pudk, map_start);
1351+
1352+
mdesc->init_meminfo();
1353+
1354+
/* Run the patch stub to update the constants */
1355+
fixup_pv_table(&__pv_table_begin,
1356+
(&__pv_table_end - &__pv_table_begin) << 2);
1357+
1358+
/*
1359+
* Cache cleaning operations for self-modifying code
1360+
* We should clean the entries by MVA but running a
1361+
* for loop over every pv_table entry pointer would
1362+
* just complicate the code.
1363+
*/
1364+
flush_cache_louis();
1365+
dsb();
1366+
isb();
1367+
1368+
/* remap level 1 table */
1369+
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
1370+
set_pud(pud0,
1371+
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
1372+
pmd0 += PTRS_PER_PMD;
1373+
}
1374+
1375+
/* remap pmds for kernel mapping */
1376+
phys = __pa(map_start) & PMD_MASK;
1377+
do {
1378+
*pmdk++ = __pmd(phys | pmdprot);
1379+
phys += PMD_SIZE;
1380+
} while (phys < map_end);
1381+
1382+
flush_cache_all();
1383+
cpu_switch_mm(pgd0, &init_mm);
1384+
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
1385+
local_flush_bp_all();
1386+
local_flush_tlb_all();
1387+
}
1388+
1389+
#else
1390+
1391+
void __init early_paging_init(const struct machine_desc *mdesc,
1392+
struct proc_info_list *procinfo)
1393+
{
1394+
if (mdesc->init_meminfo)
1395+
mdesc->init_meminfo();
1396+
}
1397+
1398+
#endif
1399+
13181400
/*
13191401
* paging_init() sets up the page tables, initialises the zone memory
13201402
* maps, and sets up the zero page, bad page and bad page tables.

0 commit comments

Comments
 (0)