Skip to content

Commit 0e4c12b

Browse files
tlendackyKAGA-KOKO
authored andcommitted
x86/mm, resource: Use PAGE_KERNEL protection for ioremap of memory pages
In order for memory pages to be properly mapped when SEV is active, it's necessary to use the PAGE_KERNEL protection attribute as the base protection. This ensures that memory mapping of, e.g. ACPI tables, receives the proper mapping attributes. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Tested-by: Borislav Petkov <bp@suse.de> Cc: Laura Abbott <labbott@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: kvm@vger.kernel.org Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Link: https://lkml.kernel.org/r/20171020143059.3291-11-brijesh.singh@amd.com
1 parent 1d2e733 commit 0e4c12b

File tree

3 files changed

+89
-12
lines changed

3 files changed

+89
-12
lines changed

arch/x86/mm/ioremap.c

Lines changed: 67 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,11 @@
2727

2828
#include "physaddr.h"
2929

30+
struct ioremap_mem_flags {
31+
bool system_ram;
32+
bool desc_other;
33+
};
34+
3035
/*
3136
* Fix up the linear direct mapping of the kernel to avoid cache attribute
3237
* conflicts.
@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
5661
return err;
5762
}
5863

59-
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
60-
void *arg)
64+
static bool __ioremap_check_ram(struct resource *res)
6165
{
66+
unsigned long start_pfn, stop_pfn;
6267
unsigned long i;
6368

64-
for (i = 0; i < nr_pages; ++i)
65-
if (pfn_valid(start_pfn + i) &&
66-
!PageReserved(pfn_to_page(start_pfn + i)))
67-
return 1;
69+
if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
70+
return false;
6871

69-
return 0;
72+
start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
73+
stop_pfn = (res->end + 1) >> PAGE_SHIFT;
74+
if (stop_pfn > start_pfn) {
75+
for (i = 0; i < (stop_pfn - start_pfn); ++i)
76+
if (pfn_valid(start_pfn + i) &&
77+
!PageReserved(pfn_to_page(start_pfn + i)))
78+
return true;
79+
}
80+
81+
return false;
82+
}
83+
84+
static int __ioremap_check_desc_other(struct resource *res)
85+
{
86+
return (res->desc != IORES_DESC_NONE);
87+
}
88+
89+
static int __ioremap_res_check(struct resource *res, void *arg)
90+
{
91+
struct ioremap_mem_flags *flags = arg;
92+
93+
if (!flags->system_ram)
94+
flags->system_ram = __ioremap_check_ram(res);
95+
96+
if (!flags->desc_other)
97+
flags->desc_other = __ioremap_check_desc_other(res);
98+
99+
return flags->system_ram && flags->desc_other;
100+
}
101+
102+
/*
103+
* To avoid multiple resource walks, this function walks resources marked as
104+
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
105+
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
106+
*/
107+
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
108+
struct ioremap_mem_flags *flags)
109+
{
110+
u64 start, end;
111+
112+
start = (u64)addr;
113+
end = start + size - 1;
114+
memset(flags, 0, sizeof(*flags));
115+
116+
walk_mem_res(start, end, flags, __ioremap_res_check);
70117
}
71118

72119
/*
@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
87134
unsigned long size, enum page_cache_mode pcm, void *caller)
88135
{
89136
unsigned long offset, vaddr;
90-
resource_size_t pfn, last_pfn, last_addr;
137+
resource_size_t last_addr;
91138
const resource_size_t unaligned_phys_addr = phys_addr;
92139
const unsigned long unaligned_size = size;
140+
struct ioremap_mem_flags mem_flags;
93141
struct vm_struct *area;
94142
enum page_cache_mode new_pcm;
95143
pgprot_t prot;
@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
108156
return NULL;
109157
}
110158

159+
__ioremap_check_mem(phys_addr, size, &mem_flags);
160+
111161
/*
112162
* Don't allow anybody to remap normal RAM that we're using..
113163
*/
114-
pfn = phys_addr >> PAGE_SHIFT;
115-
last_pfn = last_addr >> PAGE_SHIFT;
116-
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
117-
__ioremap_check_ram) == 1) {
164+
if (mem_flags.system_ram) {
118165
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
119166
&phys_addr, &last_addr);
120167
return NULL;
@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
146193
pcm = new_pcm;
147194
}
148195

196+
/*
197+
* If the page being mapped is in memory and SEV is active then
198+
* make sure the memory encryption attribute is enabled in the
199+
* resulting mapping.
200+
*/
149201
prot = PAGE_KERNEL_IO;
202+
if (sev_active() && mem_flags.desc_other)
203+
prot = pgprot_encrypted(prot);
204+
150205
switch (pcm) {
151206
case _PAGE_CACHE_MODE_UC:
152207
default:

include/linux/ioport.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,9 @@ extern int
271271
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
272272
void *arg, int (*func)(unsigned long, unsigned long, void *));
273273
extern int
274+
walk_mem_res(u64 start, u64 end, void *arg,
275+
int (*func)(struct resource *, void *));
276+
extern int
274277
walk_system_ram_res(u64 start, u64 end, void *arg,
275278
int (*func)(struct resource *, void *));
276279
extern int

kernel/resource.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
397397
res->start = p->start;
398398
if (res->end > p->end)
399399
res->end = p->end;
400+
res->flags = p->flags;
401+
res->desc = p->desc;
400402
return 0;
401403
}
402404

@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
467469
arg, func);
468470
}
469471

472+
/*
473+
* This function calls the @func callback against all memory ranges, which
474+
* are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
475+
*/
476+
int walk_mem_res(u64 start, u64 end, void *arg,
477+
int (*func)(struct resource *, void *))
478+
{
479+
struct resource res;
480+
481+
res.start = start;
482+
res.end = end;
483+
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
484+
485+
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
486+
arg, func);
487+
}
488+
470489
#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
471490

472491
/*

0 commit comments

Comments
 (0)