Skip to content

Commit f2c1242

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "A few simple fixes for ARM, x86, PPC and generic code. The x86 MMU fix is a bit larger because the surrounding code needed a cleanup, but nothing worrisome" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: MMU: fix reserved bit check for ept=0/CR0.WP=0/CR4.SMEP=1/EFER.NX=0 KVM: MMU: fix ept=0/pte.u=1/pte.w=0/CR0.WP=0/CR4.SMEP=1/EFER.NX=0 combo kvm: cap halt polling at exactly halt_poll_ns KVM: s390: correct fprs on SIGP (STOP AND) STORE STATUS KVM: VMX: disable PEBS before a guest entry KVM: PPC: Book3S HV: Sanitize special-purpose register values on guest exit
2 parents c32c2cb + 5f0b819 commit f2c1242

File tree

6 files changed

+53
-16
lines changed

6 files changed

+53
-16
lines changed

Documentation/virtual/kvm/mmu.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,8 @@ In the first case there are two additional complications:
358358
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
359359
the kernel may now execute it. We handle this by also setting spte.nx.
360360
If we get a user fetch or read fault, we'll change spte.u=1 and
361-
spte.nx=gpte.nx back.
361+
spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
362+
shadow paging is in use.
362363
- if CR4.SMAP is disabled: since the page has been changed to a kernel
363364
page, it can not be reused when CR4.SMAP is enabled. We set
364365
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,

arch/powerpc/kvm/book3s_hv_rmhandlers.S

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
13701370
std r6, VCPU_ACOP(r9)
13711371
stw r7, VCPU_GUEST_PID(r9)
13721372
std r8, VCPU_WORT(r9)
1373+
/*
1374+
* Restore various registers to 0, where non-zero values
1375+
* set by the guest could disrupt the host.
1376+
*/
1377+
li r0, 0
1378+
mtspr SPRN_IAMR, r0
1379+
mtspr SPRN_CIABR, r0
1380+
mtspr SPRN_DAWRX, r0
1381+
mtspr SPRN_TCSCR, r0
1382+
mtspr SPRN_WORT, r0
1383+
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1384+
li r0, 1
1385+
sldi r0, r0, 31
1386+
mtspr SPRN_MMCRS, r0
13731387
8:
13741388

13751389
/* Save and reset AMR and UAMOR before turning on the MMU */

arch/s390/kvm/kvm-s390.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2381,7 +2381,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
23812381

23822382
/* manually convert vector registers if necessary */
23832383
if (MACHINE_HAS_VX) {
2384-
convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
2384+
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
23852385
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
23862386
fprs, 128);
23872387
} else {

arch/x86/kvm/mmu.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3721,13 +3721,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
37213721
void
37223722
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
37233723
{
3724+
bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
3725+
37243726
/*
37253727
* Passing "true" to the last argument is okay; it adds a check
37263728
* on bit 8 of the SPTEs which KVM doesn't use anyway.
37273729
*/
37283730
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
37293731
boot_cpu_data.x86_phys_bits,
3730-
context->shadow_root_level, context->nx,
3732+
context->shadow_root_level, uses_nx,
37313733
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
37323734
true);
37333735
}

arch/x86/kvm/vmx.c

Lines changed: 30 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1813,6 +1813,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
18131813
return;
18141814
}
18151815
break;
1816+
case MSR_IA32_PEBS_ENABLE:
1817+
/* PEBS needs a quiescent period after being disabled (to write
1818+
* a record). Disabling PEBS through VMX MSR swapping doesn't
1819+
* provide that period, so a CPU could write host's record into
1820+
* guest's memory.
1821+
*/
1822+
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
18161823
}
18171824

18181825
for (i = 0; i < m->nr; ++i)
@@ -1850,26 +1857,31 @@ static void reload_tss(void)
18501857

18511858
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
18521859
{
1853-
u64 guest_efer;
1854-
u64 ignore_bits;
1860+
u64 guest_efer = vmx->vcpu.arch.efer;
1861+
u64 ignore_bits = 0;
18551862

1856-
guest_efer = vmx->vcpu.arch.efer;
1863+
if (!enable_ept) {
1864+
/*
1865+
* NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
1866+
* host CPUID is more efficient than testing guest CPUID
1867+
* or CR4. Host SMEP is anyway a requirement for guest SMEP.
1868+
*/
1869+
if (boot_cpu_has(X86_FEATURE_SMEP))
1870+
guest_efer |= EFER_NX;
1871+
else if (!(guest_efer & EFER_NX))
1872+
ignore_bits |= EFER_NX;
1873+
}
18571874

18581875
/*
1859-
* NX is emulated; LMA and LME handled by hardware; SCE meaningless
1860-
* outside long mode
1876+
* LMA and LME handled by hardware; SCE meaningless outside long mode.
18611877
*/
1862-
ignore_bits = EFER_NX | EFER_SCE;
1878+
ignore_bits |= EFER_SCE;
18631879
#ifdef CONFIG_X86_64
18641880
ignore_bits |= EFER_LMA | EFER_LME;
18651881
/* SCE is meaningful only in long mode on Intel */
18661882
if (guest_efer & EFER_LMA)
18671883
ignore_bits &= ~(u64)EFER_SCE;
18681884
#endif
1869-
guest_efer &= ~ignore_bits;
1870-
guest_efer |= host_efer & ignore_bits;
1871-
vmx->guest_msrs[efer_offset].data = guest_efer;
1872-
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
18731885

18741886
clear_atomic_switch_msr(vmx, MSR_EFER);
18751887

@@ -1880,16 +1892,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
18801892
*/
18811893
if (cpu_has_load_ia32_efer ||
18821894
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1883-
guest_efer = vmx->vcpu.arch.efer;
18841895
if (!(guest_efer & EFER_LMA))
18851896
guest_efer &= ~EFER_LME;
18861897
if (guest_efer != host_efer)
18871898
add_atomic_switch_msr(vmx, MSR_EFER,
18881899
guest_efer, host_efer);
18891900
return false;
1890-
}
1901+
} else {
1902+
guest_efer &= ~ignore_bits;
1903+
guest_efer |= host_efer & ignore_bits;
18911904

1892-
return true;
1905+
vmx->guest_msrs[efer_offset].data = guest_efer;
1906+
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1907+
1908+
return true;
1909+
}
18931910
}
18941911

18951912
static unsigned long segment_base(u16 selector)

virt/kvm/kvm_main.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1952,6 +1952,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
19521952
else
19531953
val *= halt_poll_ns_grow;
19541954

1955+
if (val > halt_poll_ns)
1956+
val = halt_poll_ns;
1957+
19551958
vcpu->halt_poll_ns = val;
19561959
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
19571960
}

0 commit comments

Comments
 (0)