Skip to content

Commit 5b8e7d8

Browse files
jgross1David Vrabel
authored andcommitted
xen: Delay invalidating extra memory
When the physical memory configuration is initialized the p2m entries for not pouplated memory pages are set to "invalid". As those pages are beyond the hypervisor built p2m list the p2m tree has to be extended. This patch delays processing the extra memory related p2m entries during the boot process until some more basic memory management functions are callable. This removes the need to create new p2m entries until virtual memory management is available. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
1 parent 97f4533 commit 5b8e7d8

File tree

4 files changed

+103
-129
lines changed

4 files changed

+103
-129
lines changed

arch/x86/include/asm/xen/page.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,9 @@ typedef struct xpaddr {
4141

4242
extern unsigned long *machine_to_phys_mapping;
4343
extern unsigned long machine_to_phys_nr;
44+
extern unsigned long *xen_p2m_addr;
45+
extern unsigned long xen_p2m_size;
46+
extern unsigned long xen_max_p2m_pfn;
4447

4548
extern unsigned long get_phys_to_machine(unsigned long pfn);
4649
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);

arch/x86/xen/p2m.c

Lines changed: 25 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,12 @@
181181

182182
static void __init m2p_override_init(void);
183183

184+
unsigned long *xen_p2m_addr __read_mostly;
185+
EXPORT_SYMBOL_GPL(xen_p2m_addr);
186+
unsigned long xen_p2m_size __read_mostly;
187+
EXPORT_SYMBOL_GPL(xen_p2m_size);
184188
unsigned long xen_max_p2m_pfn __read_mostly;
189+
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
185190

186191
static unsigned long *p2m_mid_missing_mfn;
187192
static unsigned long *p2m_top_mfn;
@@ -198,13 +203,6 @@ static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
198203

199204
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
200205

201-
/* For each I/O range remapped we may lose up to two leaf pages for the boundary
202-
* violations and three mid pages to cover up to 3GB. With
203-
* early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
204-
* remapped region.
205-
*/
206-
RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
207-
208206
static int use_brk = 1;
209207

210208
static inline unsigned p2m_top_index(unsigned long pfn)
@@ -381,9 +379,11 @@ void __init xen_build_dynamic_phys_to_machine(void)
381379
if (xen_feature(XENFEAT_auto_translated_physmap))
382380
return;
383381

382+
xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
384383
mfn_list = (unsigned long *)xen_start_info->mfn_list;
385384
max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
386385
xen_max_p2m_pfn = max_pfn;
386+
xen_p2m_size = max_pfn;
387387

388388
p2m_missing = alloc_p2m_page();
389389
p2m_init(p2m_missing);
@@ -499,13 +499,20 @@ unsigned long __init xen_revector_p2m_tree(void)
499499
/* This should be the leafs allocated for identity from _brk. */
500500
}
501501

502+
xen_p2m_size = xen_max_p2m_pfn;
503+
xen_p2m_addr = mfn_list;
504+
505+
xen_inv_extra_mem();
506+
502507
m2p_override_init();
503508
return (unsigned long)mfn_list;
504509
}
505510
#else
506511
unsigned long __init xen_revector_p2m_tree(void)
507512
{
508513
use_brk = 0;
514+
xen_p2m_size = xen_max_p2m_pfn;
515+
xen_inv_extra_mem();
509516
m2p_override_init();
510517
return 0;
511518
}
@@ -514,8 +521,12 @@ unsigned long get_phys_to_machine(unsigned long pfn)
514521
{
515522
unsigned topidx, mididx, idx;
516523

517-
if (unlikely(pfn >= MAX_P2M_PFN))
524+
if (unlikely(pfn >= xen_p2m_size)) {
525+
if (pfn < xen_max_p2m_pfn)
526+
return xen_chk_extra_mem(pfn);
527+
518528
return IDENTITY_FRAME(pfn);
529+
}
519530

520531
topidx = p2m_top_index(pfn);
521532
mididx = p2m_mid_index(pfn);
@@ -613,78 +624,12 @@ static bool alloc_p2m(unsigned long pfn)
613624
return true;
614625
}
615626

616-
static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
617-
{
618-
unsigned topidx, mididx, idx;
619-
unsigned long *p2m;
620-
621-
topidx = p2m_top_index(pfn);
622-
mididx = p2m_mid_index(pfn);
623-
idx = p2m_index(pfn);
624-
625-
/* Pfff.. No boundary cross-over, lets get out. */
626-
if (!idx && check_boundary)
627-
return false;
628-
629-
WARN(p2m_top[topidx][mididx] == p2m_identity,
630-
"P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
631-
topidx, mididx);
632-
633-
/*
634-
* Could be done by xen_build_dynamic_phys_to_machine..
635-
*/
636-
if (p2m_top[topidx][mididx] != p2m_missing)
637-
return false;
638-
639-
/* Boundary cross-over for the edges: */
640-
p2m = alloc_p2m_page();
641-
642-
p2m_init(p2m);
643-
644-
p2m_top[topidx][mididx] = p2m;
645-
646-
return true;
647-
}
648-
649-
static bool __init early_alloc_p2m_middle(unsigned long pfn)
650-
{
651-
unsigned topidx = p2m_top_index(pfn);
652-
unsigned long **mid;
653-
654-
mid = p2m_top[topidx];
655-
if (mid == p2m_mid_missing) {
656-
mid = alloc_p2m_page();
657-
658-
p2m_mid_init(mid, p2m_missing);
659-
660-
p2m_top[topidx] = mid;
661-
}
662-
return true;
663-
}
664-
665-
static void __init early_split_p2m(unsigned long pfn)
666-
{
667-
unsigned long mididx, idx;
668-
669-
mididx = p2m_mid_index(pfn);
670-
idx = p2m_index(pfn);
671-
672-
/*
673-
* Allocate new middle and leaf pages if this pfn lies in the
674-
* middle of one.
675-
*/
676-
if (mididx || idx)
677-
early_alloc_p2m_middle(pfn);
678-
if (idx)
679-
early_alloc_p2m(pfn, false);
680-
}
681-
682627
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
683628
unsigned long pfn_e)
684629
{
685630
unsigned long pfn;
686631

687-
if (unlikely(pfn_s >= MAX_P2M_PFN))
632+
if (unlikely(pfn_s >= xen_p2m_size))
688633
return 0;
689634

690635
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
@@ -693,34 +638,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
693638
if (pfn_s > pfn_e)
694639
return 0;
695640

696-
if (pfn_e > MAX_P2M_PFN)
697-
pfn_e = MAX_P2M_PFN;
698-
699-
early_split_p2m(pfn_s);
700-
early_split_p2m(pfn_e);
701-
702-
for (pfn = pfn_s; pfn < pfn_e;) {
703-
unsigned topidx = p2m_top_index(pfn);
704-
unsigned mididx = p2m_mid_index(pfn);
705-
706-
if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
707-
break;
708-
pfn++;
709-
710-
/*
711-
* If the PFN was set to a middle or leaf identity
712-
* page the remainder must also be identity, so skip
713-
* ahead to the next middle or leaf entry.
714-
*/
715-
if (p2m_top[topidx] == p2m_mid_identity)
716-
pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE);
717-
else if (p2m_top[topidx][mididx] == p2m_identity)
718-
pfn = ALIGN(pfn, P2M_PER_PAGE);
719-
}
641+
if (pfn_e > xen_p2m_size)
642+
pfn_e = xen_p2m_size;
720643

721-
WARN((pfn - pfn_s) != (pfn_e - pfn_s),
722-
"Identity mapping failed. We are %ld short of 1-1 mappings!\n",
723-
(pfn_e - pfn_s) - (pfn - pfn_s));
644+
for (pfn = pfn_s; pfn < pfn_e; pfn++)
645+
xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
724646

725647
return pfn - pfn_s;
726648
}
@@ -734,7 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
734656
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
735657
return true;
736658

737-
if (unlikely(pfn >= MAX_P2M_PFN)) {
659+
if (unlikely(pfn >= xen_p2m_size)) {
738660
BUG_ON(mfn != INVALID_P2M_ENTRY);
739661
return true;
740662
}

arch/x86/xen/setup.c

Lines changed: 73 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
7676

7777
static void __init xen_add_extra_mem(u64 start, u64 size)
7878
{
79-
unsigned long pfn;
8079
int i;
8180

8281
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
@@ -96,17 +95,75 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
9695
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
9796

9897
memblock_reserve(start, size);
98+
}
9999

100-
xen_max_p2m_pfn = PFN_DOWN(start + size);
101-
for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
102-
unsigned long mfn = pfn_to_mfn(pfn);
100+
static void __init xen_del_extra_mem(u64 start, u64 size)
101+
{
102+
int i;
103+
u64 start_r, size_r;
103104

104-
if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
105-
continue;
106-
WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
107-
pfn, mfn);
105+
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
106+
start_r = xen_extra_mem[i].start;
107+
size_r = xen_extra_mem[i].size;
108+
109+
/* Start of region. */
110+
if (start_r == start) {
111+
BUG_ON(size > size_r);
112+
xen_extra_mem[i].start += size;
113+
xen_extra_mem[i].size -= size;
114+
break;
115+
}
116+
/* End of region. */
117+
if (start_r + size_r == start + size) {
118+
BUG_ON(size > size_r);
119+
xen_extra_mem[i].size -= size;
120+
break;
121+
}
122+
/* Mid of region. */
123+
if (start > start_r && start < start_r + size_r) {
124+
BUG_ON(start + size > start_r + size_r);
125+
xen_extra_mem[i].size = start - start_r;
126+
/* Calling memblock_reserve() again is okay. */
127+
xen_add_extra_mem(start + size, start_r + size_r -
128+
(start + size));
129+
break;
130+
}
131+
}
132+
memblock_free(start, size);
133+
}
134+
135+
/*
136+
* Called during boot before the p2m list can take entries beyond the
137+
* hypervisor supplied p2m list. Entries in extra mem are to be regarded as
138+
* invalid.
139+
*/
140+
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
141+
{
142+
int i;
143+
unsigned long addr = PFN_PHYS(pfn);
108144

109-
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
145+
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
146+
if (addr >= xen_extra_mem[i].start &&
147+
addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
148+
return INVALID_P2M_ENTRY;
149+
}
150+
151+
return IDENTITY_FRAME(pfn);
152+
}
153+
154+
/*
155+
* Mark all pfns of extra mem as invalid in p2m list.
156+
*/
157+
void __init xen_inv_extra_mem(void)
158+
{
159+
unsigned long pfn, pfn_s, pfn_e;
160+
int i;
161+
162+
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
163+
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
164+
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
165+
for (pfn = pfn_s; pfn < pfn_e; pfn++)
166+
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
110167
}
111168
}
112169

@@ -268,9 +325,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
268325

269326
BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
270327

271-
/* Don't use memory until remapped */
272-
memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size));
273-
274328
mfn_save = virt_to_mfn(buf);
275329

276330
for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
@@ -314,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
314368
* pages. In the case of an error the underlying memory is simply released back
315369
* to Xen and not remapped.
316370
*/
317-
static unsigned long __init xen_set_identity_and_remap_chunk(
371+
static unsigned long xen_set_identity_and_remap_chunk(
318372
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
319373
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
320374
unsigned long *identity, unsigned long *released)
@@ -371,7 +425,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
371425
return remap_pfn;
372426
}
373427

374-
static unsigned long __init xen_set_identity_and_remap(
428+
static void __init xen_set_identity_and_remap(
375429
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
376430
unsigned long *released)
377431
{
@@ -415,8 +469,6 @@ static unsigned long __init xen_set_identity_and_remap(
415469

416470
pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
417471
pr_info("Released %ld page(s)\n", num_released);
418-
419-
return last_pfn;
420472
}
421473

422474
/*
@@ -456,7 +508,7 @@ void __init xen_remap_memory(void)
456508
} else if (pfn_s + len == xen_remap_buf.target_pfn) {
457509
len += xen_remap_buf.size;
458510
} else {
459-
memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
511+
xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
460512
pfn_s = xen_remap_buf.target_pfn;
461513
len = xen_remap_buf.size;
462514
}
@@ -466,7 +518,7 @@ void __init xen_remap_memory(void)
466518
}
467519

468520
if (pfn_s != ~0UL && len)
469-
memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
521+
xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
470522

471523
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
472524

@@ -533,7 +585,6 @@ char * __init xen_memory_setup(void)
533585
int rc;
534586
struct xen_memory_map memmap;
535587
unsigned long max_pages;
536-
unsigned long last_pfn = 0;
537588
unsigned long extra_pages = 0;
538589
int i;
539590
int op;
@@ -583,15 +634,11 @@ char * __init xen_memory_setup(void)
583634
* Set identity map on non-RAM pages and prepare remapping the
584635
* underlying RAM.
585636
*/
586-
last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
587-
&xen_released_pages);
637+
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
638+
&xen_released_pages);
588639

589640
extra_pages += xen_released_pages;
590641

591-
if (last_pfn > max_pfn) {
592-
max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
593-
mem_end = PFN_PHYS(max_pfn);
594-
}
595642
/*
596643
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
597644
* factor the base size. On non-highmem systems, the base
@@ -618,6 +665,7 @@ char * __init xen_memory_setup(void)
618665
size = min(size, (u64)extra_pages * PAGE_SIZE);
619666
extra_pages -= size / PAGE_SIZE;
620667
xen_add_extra_mem(addr, size);
668+
xen_max_p2m_pfn = PFN_DOWN(addr + size);
621669
} else
622670
type = E820_UNUSABLE;
623671
}

0 commit comments

Comments
 (0)