Skip to content

Commit b584c25

Browse files
oohalmpe
authored andcommitted
powerpc/vmemmap: Add altmap support
Adds support to powerpc for the altmap feature of ZONE_DEVICE memory. An altmap is a driver provided region that is used to provide the backing storage for the struct pages of ZONE_DEVICE memory. In situations where large amount of ZONE_DEVICE memory is being added to the system the altmap reduces pressure on main system memory by allowing the mm/ metadata to be stored on the device itself rather in main memory. Reviewed-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Oliver O'Halloran <oohall@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent d7d9b61 commit b584c25

File tree

2 files changed

+26
-5
lines changed

2 files changed

+26
-5
lines changed

arch/powerpc/mm/init_64.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
#include <linux/slab.h>
4545
#include <linux/of_fdt.h>
4646
#include <linux/libfdt.h>
47+
#include <linux/memremap.h>
4748

4849
#include <asm/pgalloc.h>
4950
#include <asm/page.h>
@@ -192,13 +193,17 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
192193
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
193194

194195
for (; start < end; start += page_size) {
196+
struct vmem_altmap *altmap;
195197
void *p;
196198
int rc;
197199

198200
if (vmemmap_populated(start, page_size))
199201
continue;
200202

201-
p = vmemmap_alloc_block(page_size, node);
203+
/* altmap lookups only work at section boundaries */
204+
altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
205+
206+
p = __vmemmap_alloc_block_buf(page_size, node, altmap);
202207
if (!p)
203208
return -ENOMEM;
204209

@@ -263,6 +268,8 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
263268

264269
for (; start < end; start += page_size) {
265270
unsigned long nr_pages, addr;
271+
struct vmem_altmap *altmap;
272+
struct page *section_base;
266273
struct page *page;
267274

268275
/*
@@ -278,9 +285,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
278285
continue;
279286

280287
page = pfn_to_page(addr >> PAGE_SHIFT);
288+
section_base = pfn_to_page(vmemmap_section_start(start));
281289
nr_pages = 1 << page_order;
282290

283-
if (PageReserved(page)) {
291+
altmap = to_vmem_altmap((unsigned long) section_base);
292+
if (altmap) {
293+
vmem_altmap_free(altmap, nr_pages);
294+
} else if (PageReserved(page)) {
284295
/* allocated from bootmem */
285296
if (page_size < PAGE_SIZE) {
286297
/*

arch/powerpc/mm/mem.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <linux/hugetlb.h>
3737
#include <linux/slab.h>
3838
#include <linux/vmalloc.h>
39+
#include <linux/memremap.h>
3940

4041
#include <asm/pgalloc.h>
4142
#include <asm/prom.h>
@@ -159,11 +160,20 @@ int arch_remove_memory(u64 start, u64 size)
159160
{
160161
unsigned long start_pfn = start >> PAGE_SHIFT;
161162
unsigned long nr_pages = size >> PAGE_SHIFT;
162-
struct zone *zone;
163+
struct vmem_altmap *altmap;
164+
struct page *page;
163165
int ret;
164166

165-
zone = page_zone(pfn_to_page(start_pfn));
166-
ret = __remove_pages(zone, start_pfn, nr_pages);
167+
/*
168+
* If we have an altmap then we need to skip over any reserved PFNs
169+
* when querying the zone.
170+
*/
171+
page = pfn_to_page(start_pfn);
172+
altmap = to_vmem_altmap((unsigned long) page);
173+
if (altmap)
174+
page += vmem_altmap_offset(altmap);
175+
176+
ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
167177
if (ret)
168178
return ret;
169179

0 commit comments

Comments
 (0)