Skip to content

Commit 28a1f3a

Browse files
Junaid Shahidbonzini
authored andcommitted
kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
Always set the 5 upper-most supported physical address bits to 1 for SPTEs that are marked as non-present or reserved, to make them unusable for L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs. (We do not need to mark PTEs that are completely 0 as physical page 0 is already reserved.) This allows mitigation of L1TF without disabling hyper-threading by using shadow paging mode instead of EPT. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent fd8ca6d commit 28a1f3a

File tree

2 files changed

+44
-7
lines changed

2 files changed

+44
-7
lines changed

arch/x86/kvm/mmu.c

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
238238
PT64_EPT_EXECUTABLE_MASK;
239239
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
240240

241+
/*
242+
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
243+
* to guard against L1TF attacks.
244+
*/
245+
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
246+
247+
/*
248+
* The number of high-order 1 bits to use in the mask above.
249+
*/
250+
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
251+
241252
static void mmu_spte_set(u64 *sptep, u64 spte);
242253
static union kvm_mmu_page_role
243254
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -327,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
327338
{
328339
unsigned int gen = kvm_current_mmio_generation(vcpu);
329340
u64 mask = generation_mmio_spte_mask(gen);
341+
u64 gpa = gfn << PAGE_SHIFT;
330342

331343
access &= ACC_WRITE_MASK | ACC_USER_MASK;
332-
mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
344+
mask |= shadow_mmio_value | access;
345+
mask |= gpa | shadow_nonpresent_or_rsvd_mask;
346+
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
347+
<< shadow_nonpresent_or_rsvd_mask_len;
333348

334349
trace_mark_mmio_spte(sptep, gfn, access, gen);
335350
mmu_spte_set(sptep, mask);
@@ -342,8 +357,14 @@ static bool is_mmio_spte(u64 spte)
342357

343358
static gfn_t get_mmio_spte_gfn(u64 spte)
344359
{
345-
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
346-
return (spte & ~mask) >> PAGE_SHIFT;
360+
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
361+
shadow_nonpresent_or_rsvd_mask;
362+
u64 gpa = spte & ~mask;
363+
364+
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
365+
& shadow_nonpresent_or_rsvd_mask;
366+
367+
return gpa >> PAGE_SHIFT;
347368
}
348369

349370
static unsigned get_mmio_spte_access(u64 spte)
@@ -400,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
400421
}
401422
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
402423

403-
static void kvm_mmu_clear_all_pte_masks(void)
424+
static void kvm_mmu_reset_all_pte_masks(void)
404425
{
405426
shadow_user_mask = 0;
406427
shadow_accessed_mask = 0;
@@ -410,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
410431
shadow_mmio_mask = 0;
411432
shadow_present_mask = 0;
412433
shadow_acc_track_mask = 0;
434+
435+
/*
436+
* If the CPU has 46 or less physical address bits, then set an
437+
* appropriate mask to guard against L1TF attacks. Otherwise, it is
438+
* assumed that the CPU is not vulnerable to L1TF.
439+
*/
440+
if (boot_cpu_data.x86_phys_bits <
441+
52 - shadow_nonpresent_or_rsvd_mask_len)
442+
shadow_nonpresent_or_rsvd_mask =
443+
rsvd_bits(boot_cpu_data.x86_phys_bits -
444+
shadow_nonpresent_or_rsvd_mask_len,
445+
boot_cpu_data.x86_phys_bits - 1);
413446
}
414447

415448
static int is_cpuid_PSE36(void)
@@ -5819,7 +5852,7 @@ int kvm_mmu_module_init(void)
58195852
{
58205853
int ret = -ENOMEM;
58215854

5822-
kvm_mmu_clear_all_pte_masks();
5855+
kvm_mmu_reset_all_pte_masks();
58235856

58245857
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
58255858
sizeof(struct pte_list_desc),

arch/x86/kvm/x86.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6536,8 +6536,12 @@ static void kvm_set_mmio_spte_mask(void)
65366536
* Set the reserved bits and the present bit of an paging-structure
65376537
* entry to generate page fault with PFER.RSV = 1.
65386538
*/
6539-
/* Mask the reserved physical address bits. */
6540-
mask = rsvd_bits(maxphyaddr, 51);
6539+
6540+
/*
6541+
* Mask the uppermost physical address bit, which would be reserved as
6542+
* long as the supported physical address width is less than 52.
6543+
*/
6544+
mask = 1ull << 51;
65416545

65426546
/* Set the present bit. */
65436547
mask |= 1ull;

0 commit comments

Comments
 (0)