Skip to content

Commit 541d8f4

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "Miscellaneous bugfixes. The ARM and s390 fixes are for new regressions from the merge window, others are usual stable material" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: compiler-gcc: disable -ftracer for __noclone functions kvm: x86: make lapic hrtimer pinned s390/mm/kvm: fix mis-merge in gmap handling kvm: set page dirty only if page has been writable KVM: x86: reduce default value of halt_poll_ns parameter KVM: Hyper-V: do not do hypercall userspace exits if SynIC is disabled KVM: x86: Inject pending interrupt even if pending nmi exist arm64: KVM: Register CPU notifiers when the kernel runs at HYP arm64: kvm: 4.6-rc1: Fix VTCR_EL2 VS setting
2 parents 5003bc6 + 95272c2 commit 541d8f4

File tree

11 files changed

+76
-42
lines changed

11 files changed

+76
-42
lines changed

arch/arm/kvm/arm.c

Lines changed: 33 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
10611061
kvm_arm_init_debug();
10621062
}
10631063

1064+
static void cpu_hyp_reinit(void)
1065+
{
1066+
if (is_kernel_in_hyp_mode()) {
1067+
/*
1068+
* cpu_init_stage2() is safe to call even if the PM
1069+
* event was cancelled before the CPU was reset.
1070+
*/
1071+
cpu_init_stage2(NULL);
1072+
} else {
1073+
if (__hyp_get_vectors() == hyp_default_vectors)
1074+
cpu_init_hyp_mode(NULL);
1075+
}
1076+
}
1077+
10641078
static int hyp_init_cpu_notify(struct notifier_block *self,
10651079
unsigned long action, void *cpu)
10661080
{
10671081
switch (action) {
10681082
case CPU_STARTING:
10691083
case CPU_STARTING_FROZEN:
1070-
if (__hyp_get_vectors() == hyp_default_vectors)
1071-
cpu_init_hyp_mode(NULL);
1072-
break;
1084+
cpu_hyp_reinit();
10731085
}
10741086

10751087
return NOTIFY_OK;
@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
10841096
unsigned long cmd,
10851097
void *v)
10861098
{
1087-
if (cmd == CPU_PM_EXIT &&
1088-
__hyp_get_vectors() == hyp_default_vectors) {
1089-
cpu_init_hyp_mode(NULL);
1099+
if (cmd == CPU_PM_EXIT) {
1100+
cpu_hyp_reinit();
10901101
return NOTIFY_OK;
10911102
}
10921103

@@ -1127,6 +1138,22 @@ static int init_subsystems(void)
11271138
{
11281139
int err;
11291140

1141+
/*
1142+
* Register CPU Hotplug notifier
1143+
*/
1144+
cpu_notifier_register_begin();
1145+
err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146+
cpu_notifier_register_done();
1147+
if (err) {
1148+
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1149+
return err;
1150+
}
1151+
1152+
/*
1153+
* Register CPU lower-power notifier
1154+
*/
1155+
hyp_cpu_pm_init();
1156+
11301157
/*
11311158
* Init HYP view of VGIC
11321159
*/
@@ -1270,19 +1297,6 @@ static int init_hyp_mode(void)
12701297
free_boot_hyp_pgd();
12711298
#endif
12721299

1273-
cpu_notifier_register_begin();
1274-
1275-
err = __register_cpu_notifier(&hyp_init_cpu_nb);
1276-
1277-
cpu_notifier_register_done();
1278-
1279-
if (err) {
1280-
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1281-
goto out_err;
1282-
}
1283-
1284-
hyp_cpu_pm_init();
1285-
12861300
/* set size of VMID supported by CPU */
12871301
kvm_vmid_bits = kvm_get_vmid_bits();
12881302
kvm_info("%d-bit VMID\n", kvm_vmid_bits);

arch/arm64/include/asm/kvm_arm.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,9 @@
124124
#define VTCR_EL2_SL0_LVL1 (1 << 6)
125125
#define VTCR_EL2_T0SZ_MASK 0x3f
126126
#define VTCR_EL2_T0SZ_40B 24
127-
#define VTCR_EL2_VS 19
127+
#define VTCR_EL2_VS_SHIFT 19
128+
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
129+
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
128130

129131
/*
130132
* We configure the Stage-2 page tables to always restrict the IPA space to be

arch/arm64/include/asm/sysreg.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,9 @@
141141
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
142142
#define ID_AA64MMFR1_HADBS_SHIFT 0
143143

144+
#define ID_AA64MMFR1_VMIDBITS_8 0
145+
#define ID_AA64MMFR1_VMIDBITS_16 2
146+
144147
/* id_aa64mmfr2 */
145148
#define ID_AA64MMFR2_UAO_SHIFT 4
146149

arch/arm64/kvm/hyp/s2-setup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void)
3636
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
3737
* bit in VTCR_EL2.
3838
*/
39-
tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
40-
val |= (tmp == 2) ? VTCR_EL2_VS : 0;
39+
tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
40+
val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
41+
VTCR_EL2_VS_16BIT :
42+
VTCR_EL2_VS_8BIT;
4143

4244
write_sysreg(val, vtcr_el2);
4345
}

arch/s390/mm/gmap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
/**
2424
* gmap_alloc - allocate a guest address space
2525
* @mm: pointer to the parent mm_struct
26-
* @limit: maximum size of the gmap address space
26+
* @limit: maximum address of the gmap address space
2727
*
2828
* Returns a guest address space structure.
2929
*/
@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
292292
if ((from | to | len) & (PMD_SIZE - 1))
293293
return -EINVAL;
294294
if (len == 0 || from + len < from || to + len < to ||
295-
from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
295+
from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
296296
return -EINVAL;
297297

298298
flush = 0;

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343

4444
#define KVM_PIO_PAGE_OFFSET 1
4545
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
46-
#define KVM_HALT_POLL_NS_DEFAULT 500000
46+
#define KVM_HALT_POLL_NS_DEFAULT 400000
4747

4848
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
4949

arch/x86/kvm/hyperv.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
11161116
break;
11171117
case HVCALL_POST_MESSAGE:
11181118
case HVCALL_SIGNAL_EVENT:
1119+
/* don't bother userspace if it has no way to handle it */
1120+
if (!vcpu_to_synic(vcpu)->active) {
1121+
res = HV_STATUS_INVALID_HYPERCALL_CODE;
1122+
break;
1123+
}
11191124
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
11201125
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
11211126
vcpu->run->hyperv.u.hcall.input = param;

arch/x86/kvm/lapic.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
13691369

13701370
hrtimer_start(&apic->lapic_timer.timer,
13711371
ktime_add_ns(now, apic->lapic_timer.period),
1372-
HRTIMER_MODE_ABS);
1372+
HRTIMER_MODE_ABS_PINNED);
13731373

13741374
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
13751375
PRIx64 ", "
@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
14021402
expire = ktime_add_ns(now, ns);
14031403
expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
14041404
hrtimer_start(&apic->lapic_timer.timer,
1405-
expire, HRTIMER_MODE_ABS);
1405+
expire, HRTIMER_MODE_ABS_PINNED);
14061406
} else
14071407
apic_timer_expired(apic);
14081408

@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
18681868
apic->vcpu = vcpu;
18691869

18701870
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1871-
HRTIMER_MODE_ABS);
1871+
HRTIMER_MODE_ABS_PINNED);
18721872
apic->lapic_timer.timer.function = apic_timer_fn;
18731873

18741874
/*
@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
20032003

20042004
timer = &vcpu->arch.apic->lapic_timer.timer;
20052005
if (hrtimer_cancel(timer))
2006-
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
2006+
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
20072007
}
20082008

20092009
/*

arch/x86/kvm/mmu.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557557
!is_writable_pte(new_spte))
558558
ret = true;
559559

560-
if (!shadow_accessed_mask)
560+
if (!shadow_accessed_mask) {
561+
/*
562+
* We don't set page dirty when dropping non-writable spte.
563+
* So do it now if the new spte is becoming non-writable.
564+
*/
565+
if (ret)
566+
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
561567
return ret;
568+
}
562569

563570
/*
564571
* Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
605612

606613
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
607614
kvm_set_pfn_accessed(pfn);
608-
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
615+
if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
616+
PT_WRITABLE_MASK))
609617
kvm_set_pfn_dirty(pfn);
610618
return 1;
611619
}

arch/x86/kvm/x86.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
60956095
}
60966096

60976097
/* try to inject new event if pending */
6098-
if (vcpu->arch.nmi_pending) {
6099-
if (kvm_x86_ops->nmi_allowed(vcpu)) {
6100-
--vcpu->arch.nmi_pending;
6101-
vcpu->arch.nmi_injected = true;
6102-
kvm_x86_ops->set_nmi(vcpu);
6103-
}
6098+
if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6099+
--vcpu->arch.nmi_pending;
6100+
vcpu->arch.nmi_injected = true;
6101+
kvm_x86_ops->set_nmi(vcpu);
61046102
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
61056103
/*
61066104
* Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
65696567
if (inject_pending_event(vcpu, req_int_win) != 0)
65706568
req_immediate_exit = true;
65716569
/* enable NMI/IRQ window open exits if needed */
6572-
else if (vcpu->arch.nmi_pending)
6573-
kvm_x86_ops->enable_nmi_window(vcpu);
6574-
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6575-
kvm_x86_ops->enable_irq_window(vcpu);
6570+
else {
6571+
if (vcpu->arch.nmi_pending)
6572+
kvm_x86_ops->enable_nmi_window(vcpu);
6573+
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6574+
kvm_x86_ops->enable_irq_window(vcpu);
6575+
}
65766576

65776577
if (kvm_lapic_enabled(vcpu)) {
65786578
update_cr8_intercept(vcpu);

include/linux/compiler-gcc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@
199199
#define unreachable() __builtin_unreachable()
200200

201201
/* Mark a function definition as prohibited from being cloned. */
202-
#define __noclone __attribute__((__noclone__))
202+
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
203203

204204
#endif /* GCC_VERSION >= 40500 */
205205

0 commit comments

Comments
 (0)