Skip to content

Commit b5200ec

Browse files
rarbabmpe
authored andcommitted
powerpc/mm: refactor radix physical page mapping
Move the page mapping code in radix_init_pgtable() into a separate function that will also be used for memory hotplug. The current goto loop progressively decreases its mapping size as it covers the tail of a range whose end is unaligned. Change this to a for loop which can do the same for both ends of the range. Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 023b13a commit b5200ec

File tree

1 file changed

+50
-38
lines changed

1 file changed

+50
-38
lines changed

arch/powerpc/mm/pgtable-radix.c

Lines changed: 50 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -108,54 +108,66 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
108108
return 0;
109109
}
110110

111+
static inline void __meminit print_mapping(unsigned long start,
112+
unsigned long end,
113+
unsigned long size)
114+
{
115+
if (end <= start)
116+
return;
117+
118+
pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
119+
}
120+
121+
static int __meminit create_physical_mapping(unsigned long start,
122+
unsigned long end)
123+
{
124+
unsigned long addr, mapping_size = 0;
125+
126+
start = _ALIGN_UP(start, PAGE_SIZE);
127+
for (addr = start; addr < end; addr += mapping_size) {
128+
unsigned long gap, previous_size;
129+
int rc;
130+
131+
gap = end - addr;
132+
previous_size = mapping_size;
133+
134+
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
135+
mmu_psize_defs[MMU_PAGE_1G].shift)
136+
mapping_size = PUD_SIZE;
137+
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
138+
mmu_psize_defs[MMU_PAGE_2M].shift)
139+
mapping_size = PMD_SIZE;
140+
else
141+
mapping_size = PAGE_SIZE;
142+
143+
if (mapping_size != previous_size) {
144+
print_mapping(start, addr, previous_size);
145+
start = addr;
146+
}
147+
148+
rc = radix__map_kernel_page((unsigned long)__va(addr), addr,
149+
PAGE_KERNEL_X, mapping_size);
150+
if (rc)
151+
return rc;
152+
}
153+
154+
print_mapping(start, addr, mapping_size);
155+
return 0;
156+
}
157+
111158
static void __init radix_init_pgtable(void)
112159
{
113-
int loop_count;
114-
u64 base, end, start_addr;
115160
unsigned long rts_field;
116161
struct memblock_region *reg;
117-
unsigned long linear_page_size;
118162

119163
/* We don't support slb for radix */
120164
mmu_slb_size = 0;
121165
/*
122166
* Create the linear mapping, using standard page size for now
123167
*/
124-
loop_count = 0;
125-
for_each_memblock(memory, reg) {
126-
127-
start_addr = reg->base;
128-
129-
redo:
130-
if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
131-
linear_page_size = PUD_SIZE;
132-
else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
133-
linear_page_size = PMD_SIZE;
134-
else
135-
linear_page_size = PAGE_SIZE;
136-
137-
base = _ALIGN_UP(start_addr, linear_page_size);
138-
end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
139-
140-
pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
141-
(unsigned long)base, (unsigned long)end,
142-
linear_page_size);
143-
144-
while (base < end) {
145-
radix__map_kernel_page((unsigned long)__va(base),
146-
base, PAGE_KERNEL_X,
147-
linear_page_size);
148-
base += linear_page_size;
149-
}
150-
/*
151-
* map the rest using lower page size
152-
*/
153-
if (end < reg->base + reg->size) {
154-
start_addr = end;
155-
loop_count++;
156-
goto redo;
157-
}
158-
}
168+
for_each_memblock(memory, reg)
169+
WARN_ON(create_physical_mapping(reg->base,
170+
reg->base + reg->size));
159171
/*
160172
* Allocate Partition table and process table for the
161173
* host.

0 commit comments

Comments
 (0)