Skip to content

Commit 3fa045b

Browse files
bonziniKAGA-KOKO
authored andcommitted
x86/KVM/VMX: Add L1D MSR based flush
336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR (IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other MSRs defined in the document. The semantics of this MSR is to allow "finer granularity invalidation of caching structures than existing mechanisms like WBINVD. It will writeback and invalidate the L1 data cache, including all cachelines brought in by preceding instructions, without invalidating all caches (eg. L2 or LLC). Some processors may also invalidate the first level level instruction cache on a L1D_FLUSH command. The L1 data and instruction caches may be shared across the logical processors of a core." Use it instead of the loop based L1 flush algorithm. A copy of this document is available at https://bugzilla.kernel.org/show_bug.cgi?id=199511 [ tglx: Avoid allocating pages when the MSR is available ] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent a47dd5f commit 3fa045b

File tree

2 files changed

+17
-4
lines changed

2 files changed

+17
-4
lines changed

arch/x86/include/asm/msr-index.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,12 @@
7676
* control required.
7777
*/
7878

79+
#define MSR_IA32_FLUSH_CMD 0x0000010b
80+
#define L1D_FLUSH (1 << 0) /*
81+
* Writeback and invalidate the
82+
* L1 data cache.
83+
*/
84+
7985
#define MSR_IA32_BBL_CR_CTL 0x00000119
8086
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
8187

arch/x86/kvm/vmx.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9580,6 +9580,11 @@ static void __maybe_unused vmx_l1d_flush(void)
95809580
{
95819581
int size = PAGE_SIZE << L1D_CACHE_ORDER;
95829582

9583+
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
9584+
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
9585+
return;
9586+
}
9587+
95839588
asm volatile(
95849589
/* First ensure the pages are in the TLB */
95859590
"xorl %%eax, %%eax\n"
@@ -13158,11 +13163,13 @@ static int __init vmx_setup_l1d_flush(void)
1315813163
!boot_cpu_has_bug(X86_BUG_L1TF))
1315913164
return 0;
1316013165

13161-
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
13162-
if (!page)
13163-
return -ENOMEM;
13166+
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
13167+
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
13168+
if (!page)
13169+
return -ENOMEM;
13170+
vmx_l1d_flush_pages = page_address(page);
13171+
}
1316413172

13165-
vmx_l1d_flush_pages = page_address(page);
1316613173
static_branch_enable(&vmx_l1d_should_flush);
1316713174
return 0;
1316813175
}

0 commit comments

Comments
 (0)