Skip to content

Commit 99cca49

Browse files
committed
x86-32, NUMA: Add @start and @EnD to init_alloc_remap()
Instead of dereferencing node_start/end_pfn[] directly, make init_alloc_remap() take @start and @EnD and let the caller be responsible for making sure the range is sane. This is to prepare for use from unified NUMA init code. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com>
1 parent 38f3e1c commit 99cca49

File tree

1 file changed

+14
-15
lines changed

1 file changed

+14
-15
lines changed

arch/x86/mm/numa_32.c

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -265,8 +265,10 @@ void resume_map_numa_kva(pgd_t *pgd_base)
265265
* opportunistically and the callers will fall back to other memory
266266
* allocation mechanisms on failure.
267267
*/
268-
static __init void init_alloc_remap(int nid)
268+
static __init void init_alloc_remap(int nid, u64 start, u64 end)
269269
{
270+
unsigned long start_pfn = start >> PAGE_SHIFT;
271+
unsigned long end_pfn = end >> PAGE_SHIFT;
270272
unsigned long size, pfn;
271273
u64 node_pa, remap_pa;
272274
void *remap_va;
@@ -276,24 +278,15 @@ static __init void init_alloc_remap(int nid)
276278
* memory could be added but not currently present.
277279
*/
278280
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
279-
nid, node_start_pfn[nid], node_end_pfn[nid]);
280-
if (node_start_pfn[nid] > max_pfn)
281-
return;
282-
if (!node_end_pfn[nid])
283-
return;
284-
if (node_end_pfn[nid] > max_pfn)
285-
node_end_pfn[nid] = max_pfn;
281+
nid, start_pfn, end_pfn);
286282

287283
/* calculate the necessary space aligned to large page size */
288-
size = node_memmap_size_bytes(nid, node_start_pfn[nid],
289-
min(node_end_pfn[nid], max_pfn));
284+
size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
290285
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
291286
size = ALIGN(size, LARGE_PAGE_BYTES);
292287

293288
/* allocate node memory and the lowmem remap area */
294-
node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
295-
(u64)node_end_pfn[nid] << PAGE_SHIFT,
296-
size, LARGE_PAGE_BYTES);
289+
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
297290
if (node_pa == MEMBLOCK_ERROR) {
298291
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
299292
size, nid);
@@ -391,8 +384,14 @@ void __init initmem_init(void)
391384
get_memcfg_numa();
392385
numa_init_array();
393386

394-
for_each_online_node(nid)
395-
init_alloc_remap(nid);
387+
for_each_online_node(nid) {
388+
u64 start = (u64)node_start_pfn[nid] << PAGE_SHIFT;
389+
u64 end = min((u64)node_end_pfn[nid] << PAGE_SHIFT,
390+
(u64)max_pfn << PAGE_SHIFT);
391+
392+
if (start < end)
393+
init_alloc_remap(nid, start, end);
394+
}
396395

397396
#ifdef CONFIG_HIGHMEM
398397
highstart_pfn = highend_pfn = max_pfn;

0 commit comments

Comments
 (0)