Skip to content

Commit 24e6d5a

Browse files
Christoph Hellwigdjbw
authored andcommitted
mm: pass the vmem_altmap to arch_add_memory and __add_pages
We can just pass this on instead of having to do a radix tree lookup without proper locking 2 levels into the callchain. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 55ce6e2 commit 24e6d5a

File tree

10 files changed

+39
-29
lines changed

10 files changed

+39
-29
lines changed

arch/ia64/mm/init.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -647,13 +647,14 @@ mem_init (void)
647647
}
648648

649649
#ifdef CONFIG_MEMORY_HOTPLUG
650-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
650+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
651+
bool want_memblock)
651652
{
652653
unsigned long start_pfn = start >> PAGE_SHIFT;
653654
unsigned long nr_pages = size >> PAGE_SHIFT;
654655
int ret;
655656

656-
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
657+
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
657658
if (ret)
658659
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
659660
__func__, ret);

arch/powerpc/mm/mem.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
127127
return -ENODEV;
128128
}
129129

130-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
130+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
131+
bool want_memblock)
131132
{
132133
unsigned long start_pfn = start >> PAGE_SHIFT;
133134
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -144,7 +145,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
144145
return -EFAULT;
145146
}
146147

147-
return __add_pages(nid, start_pfn, nr_pages, want_memblock);
148+
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
148149
}
149150

150151
#ifdef CONFIG_MEMORY_HOTREMOVE

arch/s390/mm/init.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,8 @@ device_initcall(s390_cma_mem_init);
222222

223223
#endif /* CONFIG_CMA */
224224

225-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
225+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
226+
bool want_memblock)
226227
{
227228
unsigned long start_pfn = PFN_DOWN(start);
228229
unsigned long size_pages = PFN_DOWN(size);
@@ -232,7 +233,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
232233
if (rc)
233234
return rc;
234235

235-
rc = __add_pages(nid, start_pfn, size_pages, want_memblock);
236+
rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
236237
if (rc)
237238
vmem_remove_mapping(start, size);
238239
return rc;

arch/sh/mm/init.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,14 +485,15 @@ void free_initrd_mem(unsigned long start, unsigned long end)
485485
#endif
486486

487487
#ifdef CONFIG_MEMORY_HOTPLUG
488-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
488+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
489+
bool want_memblock)
489490
{
490491
unsigned long start_pfn = PFN_DOWN(start);
491492
unsigned long nr_pages = size >> PAGE_SHIFT;
492493
int ret;
493494

494495
/* We only have ZONE_NORMAL, so this is easy.. */
495-
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
496+
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
496497
if (unlikely(ret))
497498
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
498499

arch/x86/mm/init_32.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -829,12 +829,13 @@ void __init mem_init(void)
829829
}
830830

831831
#ifdef CONFIG_MEMORY_HOTPLUG
832-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
832+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
833+
bool want_memblock)
833834
{
834835
unsigned long start_pfn = start >> PAGE_SHIFT;
835836
unsigned long nr_pages = size >> PAGE_SHIFT;
836837

837-
return __add_pages(nid, start_pfn, nr_pages, want_memblock);
838+
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
838839
}
839840

840841
#ifdef CONFIG_MEMORY_HOTREMOVE

arch/x86/mm/init_64.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -772,12 +772,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
772772
}
773773
}
774774

775-
int add_pages(int nid, unsigned long start_pfn,
776-
unsigned long nr_pages, bool want_memblock)
775+
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
776+
struct vmem_altmap *altmap, bool want_memblock)
777777
{
778778
int ret;
779779

780-
ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
780+
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
781781
WARN_ON_ONCE(ret);
782782

783783
/* update max_pfn, max_low_pfn and high_memory */
@@ -787,14 +787,15 @@ int add_pages(int nid, unsigned long start_pfn,
787787
return ret;
788788
}
789789

790-
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
790+
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
791+
bool want_memblock)
791792
{
792793
unsigned long start_pfn = start >> PAGE_SHIFT;
793794
unsigned long nr_pages = size >> PAGE_SHIFT;
794795

795796
init_memory_mapping(start, start + size);
796797

797-
return add_pages(nid, start_pfn, nr_pages, want_memblock);
798+
return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
798799
}
799800

800801
#define PAGE_INUSE 0xFD

include/linux/memory_hotplug.h

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ struct pglist_data;
1313
struct mem_section;
1414
struct memory_block;
1515
struct resource;
16+
struct vmem_altmap;
1617

1718
#ifdef CONFIG_MEMORY_HOTPLUG
1819
/*
@@ -131,18 +132,19 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
131132
#endif /* CONFIG_MEMORY_HOTREMOVE */
132133

133134
/* reasonably generic interface to expand the physical pages */
134-
extern int __add_pages(int nid, unsigned long start_pfn,
135-
unsigned long nr_pages, bool want_memblock);
135+
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
136+
struct vmem_altmap *altmap, bool want_memblock);
136137

137138
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
138139
static inline int add_pages(int nid, unsigned long start_pfn,
139-
unsigned long nr_pages, bool want_memblock)
140+
unsigned long nr_pages, struct vmem_altmap *altmap,
141+
bool want_memblock)
140142
{
141-
return __add_pages(nid, start_pfn, nr_pages, want_memblock);
143+
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
142144
}
143145
#else /* ARCH_HAS_ADD_PAGES */
144-
int add_pages(int nid, unsigned long start_pfn,
145-
unsigned long nr_pages, bool want_memblock);
146+
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
147+
struct vmem_altmap *altmap, bool want_memblock);
146148
#endif /* ARCH_HAS_ADD_PAGES */
147149

148150
#ifdef CONFIG_NUMA
@@ -318,7 +320,8 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
318320
void *arg, int (*func)(struct memory_block *, void *));
319321
extern int add_memory(int nid, u64 start, u64 size);
320322
extern int add_memory_resource(int nid, struct resource *resource, bool online);
321-
extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
323+
extern int arch_add_memory(int nid, u64 start, u64 size,
324+
struct vmem_altmap *altmap, bool want_memblock);
322325
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
323326
unsigned long nr_pages);
324327
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);

kernel/memremap.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
382382
if (altmap) {
383383
memcpy(&page_map->altmap, altmap, sizeof(*altmap));
384384
pgmap->altmap = &page_map->altmap;
385+
altmap = pgmap->altmap;
385386
}
386387
pgmap->ref = ref;
387388
pgmap->res = &page_map->res;
@@ -427,7 +428,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
427428
goto err_pfn_remap;
428429

429430
mem_hotplug_begin();
430-
error = arch_add_memory(nid, align_start, align_size, false);
431+
error = arch_add_memory(nid, align_start, align_size, altmap, false);
431432
if (!error)
432433
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
433434
align_start >> PAGE_SHIFT,

mm/hmm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -931,10 +931,11 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
931931
* want the linear mapping and thus use arch_add_memory().
932932
*/
933933
if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
934-
ret = arch_add_memory(nid, align_start, align_size, false);
934+
ret = arch_add_memory(nid, align_start, align_size, NULL,
935+
false);
935936
else
936937
ret = add_pages(nid, align_start >> PAGE_SHIFT,
937-
align_size >> PAGE_SHIFT, false);
938+
align_size >> PAGE_SHIFT, NULL, false);
938939
if (ret) {
939940
mem_hotplug_done();
940941
goto error_add_memory;

mm/memory_hotplug.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -292,18 +292,17 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
292292
* add the new pages.
293293
*/
294294
int __ref __add_pages(int nid, unsigned long phys_start_pfn,
295-
unsigned long nr_pages, bool want_memblock)
295+
unsigned long nr_pages, struct vmem_altmap *altmap,
296+
bool want_memblock)
296297
{
297298
unsigned long i;
298299
int err = 0;
299300
int start_sec, end_sec;
300-
struct vmem_altmap *altmap;
301301

302302
/* during initialize mem_map, align hot-added range to section */
303303
start_sec = pfn_to_section_nr(phys_start_pfn);
304304
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
305305

306-
altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
307306
if (altmap) {
308307
/*
309308
* Validate altmap is within bounds of the total request
@@ -1148,7 +1147,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
11481147
}
11491148

11501149
/* call arch's memory hotadd */
1151-
ret = arch_add_memory(nid, start, size, true);
1150+
ret = arch_add_memory(nid, start, size, NULL, true);
11521151

11531152
if (ret < 0)
11541153
goto error;

0 commit comments

Comments
 (0)