File tree Expand file tree Collapse file tree 4 files changed +48
-3
lines changed
Documentation/admin-guide Expand file tree Collapse file tree 4 files changed +48
-3
lines changed Original file line number Diff line number Diff line change @@ -546,6 +546,27 @@ available:
546
546
EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
547
547
parameter.
548
548
549
+ 3.4. Nested virtual machines
550
+ """"""""""""""""""""""""""""
551
+
552
+ When nested virtualization is in use, three operating systems are involved:
553
+ the bare metal hypervisor, the nested hypervisor and the nested virtual
554
+ machine. VMENTER operations from the nested hypervisor into the nested
555
+ guest will always be processed by the bare metal hypervisor. If KVM is the
556
+ bare metal hypervisor it wiil:
557
+
558
+ - Flush the L1D cache on every switch from the nested hypervisor to the
559
+ nested virtual machine, so that the nested hypervisor's secrets are not
560
+ exposed to the nested virtual machine;
561
+
562
+ - Flush the L1D cache on every switch from the nested virtual machine to
563
+ the nested hypervisor; this is a complex operation, and flushing the L1D
564
+ cache avoids that the bare metal hypervisor's secrets are exposed to the
565
+ nested virtual machine;
566
+
567
+ - Instruct the nested hypervisor to not perform any L1D cache flush. This
568
+ is an optimization to avoid double L1D flushing.
569
+
549
570
550
571
.. _default_mitigations :
551
572
Original file line number Diff line number Diff line change @@ -1418,6 +1418,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1418
1418
void kvm_vcpu_reset (struct kvm_vcpu * vcpu , bool init_event );
1419
1419
void kvm_vcpu_reload_apic_access_page (struct kvm_vcpu * vcpu );
1420
1420
1421
+ u64 kvm_get_arch_capabilities (void );
1421
1422
void kvm_define_shared_msr (unsigned index , u32 msr );
1422
1423
int kvm_set_shared_msr (unsigned index , u64 val , u64 mask );
1423
1424
Original file line number Diff line number Diff line change @@ -6465,8 +6465,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
6465
6465
++ vmx -> nmsrs ;
6466
6466
}
6467
6467
6468
- if (boot_cpu_has (X86_FEATURE_ARCH_CAPABILITIES ))
6469
- rdmsrl (MSR_IA32_ARCH_CAPABILITIES , vmx -> arch_capabilities );
6468
+ vmx -> arch_capabilities = kvm_get_arch_capabilities ();
6470
6469
6471
6470
vm_exit_controls_init (vmx , vmcs_config .vmexit_ctrl );
6472
6471
Original file line number Diff line number Diff line change @@ -1103,11 +1103,35 @@ static u32 msr_based_features[] = {
1103
1103
1104
1104
static unsigned int num_msr_based_features ;
1105
1105
1106
+ u64 kvm_get_arch_capabilities (void )
1107
+ {
1108
+ u64 data ;
1109
+
1110
+ rdmsrl_safe (MSR_IA32_ARCH_CAPABILITIES , & data );
1111
+
1112
+ /*
1113
+ * If we're doing cache flushes (either "always" or "cond")
1114
+ * we will do one whenever the guest does a vmlaunch/vmresume.
1115
+ * If an outer hypervisor is doing the cache flush for us
1116
+ * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
1117
+ * capability to the guest too, and if EPT is disabled we're not
1118
+ * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1119
+ * require a nested hypervisor to do a flush of its own.
1120
+ */
1121
+ if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER )
1122
+ data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH ;
1123
+
1124
+ return data ;
1125
+ }
1126
+ EXPORT_SYMBOL_GPL (kvm_get_arch_capabilities );
1127
+
1106
1128
static int kvm_get_msr_feature (struct kvm_msr_entry * msr )
1107
1129
{
1108
1130
switch (msr -> index ) {
1109
- case MSR_IA32_UCODE_REV :
1110
1131
case MSR_IA32_ARCH_CAPABILITIES :
1132
+ msr -> data = kvm_get_arch_capabilities ();
1133
+ break ;
1134
+ case MSR_IA32_UCODE_REV :
1111
1135
rdmsrl_safe (msr -> index , & msr -> data );
1112
1136
break ;
1113
1137
default :
You can’t perform that action at this time.
0 commit comments