Skip to content

Commit b3f0907

Browse files
codomaniaKAGA-KOKO
authored andcommitted
x86/mm: Add .bss..decrypted section to hold shared variables
kvmclock defines few static variables which are shared with the hypervisor during the kvmclock initialization. When SEV is active, memory is encrypted with a guest-specific key, and if the guest OS wants to share the memory region with the hypervisor then it must clear the C-bit before sharing it. Currently, we use kernel_physical_mapping_init() to split large pages before clearing the C-bit on shared pages. But it fails when called from the kvmclock initialization (mainly because the memblock allocator is not ready that early during boot). Add a __bss_decrypted section attribute which can be used when defining such shared variable. The so-defined variables will be placed in the .bss..decrypted section. This section will be mapped with C=0 early during boot. The .bss..decrypted section has a big chunk of memory that may be unused when memory encryption is not active, free it when memory encryption is not active. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Borislav Petkov <bp@suse.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Radim Krčmář<rkrcmar@redhat.com> Cc: kvm@vger.kernel.org Link: https://lkml.kernel.org/r/1536932759-12905-2-git-send-email-brijesh.singh@amd.com
1 parent 27c5a77 commit b3f0907

File tree

5 files changed

+70
-0
lines changed

5 files changed

+70
-0
lines changed

arch/x86/include/asm/mem_encrypt.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
4848

4949
/* Architecture __weak replacement functions */
5050
void __init mem_encrypt_init(void);
51+
void __init mem_encrypt_free_decrypted_mem(void);
5152

5253
bool sme_active(void);
5354
bool sev_active(void);
5455

56+
#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
57+
5558
#else /* !CONFIG_AMD_MEM_ENCRYPT */
5659

5760
#define sme_me_mask 0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
7780
static inline int __init
7881
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
7982

83+
#define __bss_decrypted
84+
8085
#endif /* CONFIG_AMD_MEM_ENCRYPT */
8186

8287
/*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
8893
#define __sme_pa(x) (__pa(x) | sme_me_mask)
8994
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
9095

96+
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
97+
9198
#endif /* __ASSEMBLY__ */
9299

93100
#endif /* __X86_MEM_ENCRYPT_H__ */

arch/x86/kernel/head64.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ static bool __head check_la57_support(unsigned long physaddr)
112112
unsigned long __head __startup_64(unsigned long physaddr,
113113
struct boot_params *bp)
114114
{
115+
unsigned long vaddr, vaddr_end;
115116
unsigned long load_delta, *p;
116117
unsigned long pgtable_flags;
117118
pgdval_t *pgd;
@@ -234,6 +235,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
234235
/* Encrypt the kernel and related (if SME is active) */
235236
sme_encrypt_kernel(bp);
236237

238+
/*
239+
* Clear the memory encryption mask from the .bss..decrypted section.
240+
* The bss section will be memset to zero later in the initialization so
241+
* there is no need to zero it after changing the memory encryption
242+
* attribute.
243+
*/
244+
if (mem_encrypt_active()) {
245+
vaddr = (unsigned long)__start_bss_decrypted;
246+
vaddr_end = (unsigned long)__end_bss_decrypted;
247+
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
248+
i = pmd_index(vaddr);
249+
pmd[i] -= sme_get_me_mask();
250+
}
251+
}
252+
237253
/*
238254
* Return the SME encryption mask (if SME is active) to be used as a
239255
* modifier for the initial pgdir entry programmed into CR3.

arch/x86/kernel/vmlinux.lds.S

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
6565
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
6666
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
6767

68+
/*
69+
* This section contains data which will be mapped as decrypted. Memory
70+
* encryption operates on a page basis. Make this section PMD-aligned
71+
* to avoid splitting the pages while mapping the section early.
72+
*
73+
* Note: We use a separate section so that only this section gets
74+
* decrypted to avoid exposing more than we wish.
75+
*/
76+
#define BSS_DECRYPTED \
77+
. = ALIGN(PMD_SIZE); \
78+
__start_bss_decrypted = .; \
79+
*(.bss..decrypted); \
80+
. = ALIGN(PAGE_SIZE); \
81+
__start_bss_decrypted_unused = .; \
82+
. = ALIGN(PMD_SIZE); \
83+
__end_bss_decrypted = .; \
84+
6885
#else
6986

7087
#define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
7491

7592
#define ALIGN_ENTRY_TEXT_BEGIN
7693
#define ALIGN_ENTRY_TEXT_END
94+
#define BSS_DECRYPTED
7795

7896
#endif
7997

@@ -355,6 +373,7 @@ SECTIONS
355373
__bss_start = .;
356374
*(.bss..page_aligned)
357375
*(.bss)
376+
BSS_DECRYPTED
358377
. = ALIGN(PAGE_SIZE);
359378
__bss_stop = .;
360379
}

arch/x86/mm/init.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
815815
set_memory_np_noalias(begin_ul, len_pages);
816816
}
817817

818+
void __weak mem_encrypt_free_decrypted_mem(void) { }
819+
818820
void __ref free_initmem(void)
819821
{
820822
e820__reallocate_tables();
821823

824+
mem_encrypt_free_decrypted_mem();
825+
822826
free_kernel_image_pages(&__init_begin, &__init_end);
823827
}
824828

arch/x86/mm/mem_encrypt.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -348,6 +348,30 @@ bool sev_active(void)
348348
EXPORT_SYMBOL(sev_active);
349349

350350
/* Architecture __weak replacement functions */
351+
void __init mem_encrypt_free_decrypted_mem(void)
352+
{
353+
unsigned long vaddr, vaddr_end, npages;
354+
int r;
355+
356+
vaddr = (unsigned long)__start_bss_decrypted_unused;
357+
vaddr_end = (unsigned long)__end_bss_decrypted;
358+
npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
359+
360+
/*
361+
* The unused memory range was mapped decrypted, change the encryption
362+
* attribute from decrypted to encrypted before freeing it.
363+
*/
364+
if (mem_encrypt_active()) {
365+
r = set_memory_encrypted(vaddr, npages);
366+
if (r) {
367+
pr_warn("failed to free unused decrypted pages\n");
368+
return;
369+
}
370+
}
371+
372+
free_init_pages("unused decrypted", vaddr, vaddr_end);
373+
}
374+
351375
void __init mem_encrypt_init(void)
352376
{
353377
if (!sme_me_mask)

0 commit comments

Comments
 (0)