Skip to content

Commit 1958b5f

Browse files
tlendackyKAGA-KOKO
authored andcommitted
x86/boot: Add early boot support when running with SEV active
Early in the boot process, add checks to determine if the kernel is running with Secure Encrypted Virtualization (SEV) active. Checking for SEV requires checking that the kernel is running under a hypervisor (CPUID 0x00000001, bit 31), that the SEV feature is available (CPUID 0x8000001f, bit 1) and then checking a non-interceptable SEV MSR (0xc0010131, bit 0). This check is required so that during early compressed kernel booting the pagetables (both the boot pagetables and KASLR pagetables (if enabled) are updated to include the encryption mask so that when the kernel is decompressed into encrypted memory, it can boot properly. After the kernel is decompressed and continues booting the same logic is used to check if SEV is active and set a flag indicating so. This allows to distinguish between SME and SEV, each of which have unique differences in how certain things are handled: e.g. DMA (always bounce buffered with SEV) or EFI tables (always access decrypted with SME). Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Tested-by: Borislav Petkov <bp@suse.de> Cc: Laura Abbott <labbott@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: kvm@vger.kernel.org Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Link: https://lkml.kernel.org/r/20171020143059.3291-13-brijesh.singh@amd.com
1 parent d7b417f commit 1958b5f

File tree

8 files changed

+186
-15
lines changed

8 files changed

+186
-15
lines changed

arch/x86/boot/compressed/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
7878
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
7979
ifdef CONFIG_X86_64
8080
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
81+
vmlinux-objs-y += $(obj)/mem_encrypt.o
8182
endif
8283

8384
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone

arch/x86/boot/compressed/head_64.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,19 @@ ENTRY(startup_32)
131131
/*
132132
* Build early 4G boot pagetable
133133
*/
134+
/*
135+
* If SEV is active then set the encryption mask in the page tables.
136+
* This will insure that when the kernel is copied and decompressed
137+
* it will be done so encrypted.
138+
*/
139+
call get_sev_encryption_bit
140+
xorl %edx, %edx
141+
testl %eax, %eax
142+
jz 1f
143+
subl $32, %eax /* Encryption bit is always above bit 31 */
144+
bts %eax, %edx /* Set encryption mask for page tables */
145+
1:
146+
134147
/* Initialize Page tables to 0 */
135148
leal pgtable(%ebx), %edi
136149
xorl %eax, %eax
@@ -141,12 +154,14 @@ ENTRY(startup_32)
141154
leal pgtable + 0(%ebx), %edi
142155
leal 0x1007 (%edi), %eax
143156
movl %eax, 0(%edi)
157+
addl %edx, 4(%edi)
144158

145159
/* Build Level 3 */
146160
leal pgtable + 0x1000(%ebx), %edi
147161
leal 0x1007(%edi), %eax
148162
movl $4, %ecx
149163
1: movl %eax, 0x00(%edi)
164+
addl %edx, 0x04(%edi)
150165
addl $0x00001000, %eax
151166
addl $8, %edi
152167
decl %ecx
@@ -157,6 +172,7 @@ ENTRY(startup_32)
157172
movl $0x00000183, %eax
158173
movl $2048, %ecx
159174
1: movl %eax, 0(%edi)
175+
addl %edx, 4(%edi)
160176
addl $0x00200000, %eax
161177
addl $8, %edi
162178
decl %ecx
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
/*
2+
* AMD Memory Encryption Support
3+
*
4+
* Copyright (C) 2017 Advanced Micro Devices, Inc.
5+
*
6+
* Author: Tom Lendacky <thomas.lendacky@amd.com>
7+
*
8+
* This program is free software; you can redistribute it and/or modify
9+
* it under the terms of the GNU General Public License version 2 as
10+
* published by the Free Software Foundation.
11+
*/
12+
13+
#include <linux/linkage.h>
14+
15+
#include <asm/processor-flags.h>
16+
#include <asm/msr.h>
17+
#include <asm/asm-offsets.h>
18+
19+
.text
20+
.code32
21+
ENTRY(get_sev_encryption_bit)
22+
xor %eax, %eax
23+
24+
#ifdef CONFIG_AMD_MEM_ENCRYPT
25+
push %ebx
26+
push %ecx
27+
push %edx
28+
push %edi
29+
30+
/*
31+
* RIP-relative addressing is needed to access the encryption bit
32+
* variable. Since we are running in 32-bit mode we need this call/pop
33+
* sequence to get the proper relative addressing.
34+
*/
35+
call 1f
36+
1: popl %edi
37+
subl $1b, %edi
38+
39+
movl enc_bit(%edi), %eax
40+
cmpl $0, %eax
41+
jge .Lsev_exit
42+
43+
/* Check if running under a hypervisor */
44+
movl $1, %eax
45+
cpuid
46+
bt $31, %ecx /* Check the hypervisor bit */
47+
jnc .Lno_sev
48+
49+
movl $0x80000000, %eax /* CPUID to check the highest leaf */
50+
cpuid
51+
cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
52+
jb .Lno_sev
53+
54+
/*
55+
* Check for the SEV feature:
56+
* CPUID Fn8000_001F[EAX] - Bit 1
57+
* CPUID Fn8000_001F[EBX] - Bits 5:0
58+
* Pagetable bit position used to indicate encryption
59+
*/
60+
movl $0x8000001f, %eax
61+
cpuid
62+
bt $1, %eax /* Check if SEV is available */
63+
jnc .Lno_sev
64+
65+
movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
66+
rdmsr
67+
bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
68+
jnc .Lno_sev
69+
70+
movl %ebx, %eax
71+
andl $0x3f, %eax /* Return the encryption bit location */
72+
movl %eax, enc_bit(%edi)
73+
jmp .Lsev_exit
74+
75+
.Lno_sev:
76+
xor %eax, %eax
77+
movl %eax, enc_bit(%edi)
78+
79+
.Lsev_exit:
80+
pop %edi
81+
pop %edx
82+
pop %ecx
83+
pop %ebx
84+
85+
#endif /* CONFIG_AMD_MEM_ENCRYPT */
86+
87+
ret
88+
ENDPROC(get_sev_encryption_bit)
89+
90+
.code64
91+
ENTRY(get_sev_encryption_mask)
92+
xor %rax, %rax
93+
94+
#ifdef CONFIG_AMD_MEM_ENCRYPT
95+
push %rbp
96+
push %rdx
97+
98+
movq %rsp, %rbp /* Save current stack pointer */
99+
100+
call get_sev_encryption_bit /* Get the encryption bit position */
101+
testl %eax, %eax
102+
jz .Lno_sev_mask
103+
104+
xor %rdx, %rdx
105+
bts %rax, %rdx /* Create the encryption mask */
106+
mov %rdx, %rax /* ... and return it */
107+
108+
.Lno_sev_mask:
109+
movq %rbp, %rsp /* Restore original stack pointer */
110+
111+
pop %rdx
112+
pop %rbp
113+
#endif
114+
115+
ret
116+
ENDPROC(get_sev_encryption_mask)
117+
118+
.data
119+
enc_bit:
120+
.int 0xffffffff

arch/x86/boot/compressed/misc.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,4 +109,6 @@ static inline void console_init(void)
109109
{ }
110110
#endif
111111

112+
unsigned long get_sev_encryption_mask(void);
113+
112114
#endif

arch/x86/boot/compressed/pagetable.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,16 +77,18 @@ static unsigned long top_level_pgt;
7777
* Mapping information structure passed to kernel_ident_mapping_init().
7878
* Due to relocation, pointers must be assigned at run time not build time.
7979
*/
80-
static struct x86_mapping_info mapping_info = {
81-
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
82-
};
80+
static struct x86_mapping_info mapping_info;
8381

8482
/* Locates and clears a region for a new top level page table. */
8583
void initialize_identity_maps(void)
8684
{
85+
unsigned long sev_me_mask = get_sev_encryption_mask();
86+
8787
/* Init mapping_info with run-time function/buffer pointers. */
8888
mapping_info.alloc_pgt_page = alloc_pgt_page;
8989
mapping_info.context = &pgt_data;
90+
mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask;
91+
mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask;
9092

9193
/*
9294
* It should be impossible for this not to already be true,

arch/x86/include/asm/msr-index.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,9 @@
324324
#define MSR_AMD64_IBSBRTARGET 0xc001103b
325325
#define MSR_AMD64_IBSOPDATA4 0xc001103d
326326
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
327+
#define MSR_AMD64_SEV 0xc0010131
328+
#define MSR_AMD64_SEV_ENABLED_BIT 0
329+
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
327330

328331
/* Fam 17h MSRs */
329332
#define MSR_F17H_IRPERF 0xc00000e9

arch/x86/include/uapi/asm/kvm_para.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,5 +110,4 @@ struct kvm_vcpu_pv_apf_data {
110110
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
111111
#define KVM_PV_EOI_DISABLED 0x0
112112

113-
114113
#endif /* _UAPI_ASM_X86_KVM_PARA_H */

arch/x86/mm/mem_encrypt.c

Lines changed: 39 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,9 @@ void __init mem_encrypt_init(void)
313313
if (sev_active())
314314
dma_ops = &sev_dma_ops;
315315

316-
pr_info("AMD Secure Memory Encryption (SME) active\n");
316+
pr_info("AMD %s active\n",
317+
sev_active() ? "Secure Encrypted Virtualization (SEV)"
318+
: "Secure Memory Encryption (SME)");
317319
}
318320

319321
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
@@ -641,37 +643,63 @@ void __init __nostackprotector sme_enable(struct boot_params *bp)
641643
{
642644
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
643645
unsigned int eax, ebx, ecx, edx;
646+
unsigned long feature_mask;
644647
bool active_by_default;
645648
unsigned long me_mask;
646649
char buffer[16];
647650
u64 msr;
648651

649-
/* Check for the SME support leaf */
652+
/* Check for the SME/SEV support leaf */
650653
eax = 0x80000000;
651654
ecx = 0;
652655
native_cpuid(&eax, &ebx, &ecx, &edx);
653656
if (eax < 0x8000001f)
654657
return;
655658

659+
#define AMD_SME_BIT BIT(0)
660+
#define AMD_SEV_BIT BIT(1)
656661
/*
657-
* Check for the SME feature:
658-
* CPUID Fn8000_001F[EAX] - Bit 0
659-
* Secure Memory Encryption support
660-
* CPUID Fn8000_001F[EBX] - Bits 5:0
661-
* Pagetable bit position used to indicate encryption
662+
* Set the feature mask (SME or SEV) based on whether we are
663+
* running under a hypervisor.
664+
*/
665+
eax = 1;
666+
ecx = 0;
667+
native_cpuid(&eax, &ebx, &ecx, &edx);
668+
feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
669+
670+
/*
671+
* Check for the SME/SEV feature:
672+
* CPUID Fn8000_001F[EAX]
673+
* - Bit 0 - Secure Memory Encryption support
674+
* - Bit 1 - Secure Encrypted Virtualization support
675+
* CPUID Fn8000_001F[EBX]
676+
* - Bits 5:0 - Pagetable bit position used to indicate encryption
662677
*/
663678
eax = 0x8000001f;
664679
ecx = 0;
665680
native_cpuid(&eax, &ebx, &ecx, &edx);
666-
if (!(eax & 1))
681+
if (!(eax & feature_mask))
667682
return;
668683

669684
me_mask = 1UL << (ebx & 0x3f);
670685

671-
/* Check if SME is enabled */
672-
msr = __rdmsr(MSR_K8_SYSCFG);
673-
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
686+
/* Check if memory encryption is enabled */
687+
if (feature_mask == AMD_SME_BIT) {
688+
/* For SME, check the SYSCFG MSR */
689+
msr = __rdmsr(MSR_K8_SYSCFG);
690+
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
691+
return;
692+
} else {
693+
/* For SEV, check the SEV MSR */
694+
msr = __rdmsr(MSR_AMD64_SEV);
695+
if (!(msr & MSR_AMD64_SEV_ENABLED))
696+
return;
697+
698+
/* SEV state cannot be controlled by a command line option */
699+
sme_me_mask = me_mask;
700+
sev_enabled = true;
674701
return;
702+
}
675703

676704
/*
677705
* Fixups have not been applied to phys_base yet and we're running

0 commit comments

Comments
 (0)