Skip to content

Commit 288d152

Browse files
nicstangeKAGA-KOKO
authored andcommitted
x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order to evict the L1d cache. However, these pages are never cleared and, in theory, their data could be leaked. More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses. Fix this by initializing the individual vmx_l1d_flush_pages with a different pattern each. Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to "flush_pages" to reflect this change. Fixes: a47dd5f ("x86/KVM/VMX: Add L1D flush algorithm") Signed-off-by: Nicolai Stange <nstange@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 6c26fcd commit 288d152

File tree

1 file changed

+14
-3
lines changed

1 file changed

+14
-3
lines changed

arch/x86/kvm/vmx.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,7 @@ static void *vmx_l1d_flush_pages;
211211
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
212212
{
213213
struct page *page;
214+
unsigned int i;
214215

215216
if (!enable_ept) {
216217
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
@@ -243,6 +244,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
243244
if (!page)
244245
return -ENOMEM;
245246
vmx_l1d_flush_pages = page_address(page);
247+
248+
/*
249+
* Initialize each page with a different pattern in
250+
* order to protect against KSM in the nested
251+
* virtualization case.
252+
*/
253+
for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
254+
memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
255+
PAGE_SIZE);
256+
}
246257
}
247258

248259
l1tf_vmx_mitigation = l1tf;
@@ -9701,7 +9712,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
97019712
/* First ensure the pages are in the TLB */
97029713
"xorl %%eax, %%eax\n"
97039714
".Lpopulate_tlb:\n\t"
9704-
"movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
9715+
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
97059716
"addl $4096, %%eax\n\t"
97069717
"cmpl %%eax, %[size]\n\t"
97079718
"jne .Lpopulate_tlb\n\t"
@@ -9710,12 +9721,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
97109721
/* Now fill the cache */
97119722
"xorl %%eax, %%eax\n"
97129723
".Lfill_cache:\n"
9713-
"movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
9724+
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
97149725
"addl $64, %%eax\n\t"
97159726
"cmpl %%eax, %[size]\n\t"
97169727
"jne .Lfill_cache\n\t"
97179728
"lfence\n"
9718-
:: [empty_zp] "r" (vmx_l1d_flush_pages),
9729+
:: [flush_pages] "r" (vmx_l1d_flush_pages),
97199730
[size] "r" (size)
97209731
: "eax", "ebx", "ecx", "edx");
97219732
}

0 commit comments

Comments
 (0)