Skip to content

Commit 2e53d63

Browse files
matosattiavikivity
authored andcommitted
KVM: MMU: ignore zapped root pagetables
Mark zapped root pagetables as invalid and ignore such pages during lookup. This is a problem with the cr3-target feature, where a zapped root table fools the faulting code into creating a read-only mapping. The result is a lockup if the instruction can't be emulated. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Cc: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
1 parent 847f0ad commit 2e53d63

File tree

5 files changed

+48
-2
lines changed

5 files changed

+48
-2
lines changed

arch/x86/kvm/mmu.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,8 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
667667
index = kvm_page_table_hashfn(gfn);
668668
bucket = &kvm->arch.mmu_page_hash[index];
669669
hlist_for_each_entry(sp, node, bucket, hash_link)
670-
if (sp->gfn == gfn && !sp->role.metaphysical) {
670+
if (sp->gfn == gfn && !sp->role.metaphysical
671+
&& !sp->role.invalid) {
671672
pgprintk("%s: found role %x\n",
672673
__FUNCTION__, sp->role.word);
673674
return sp;
@@ -792,8 +793,11 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
792793
if (!sp->root_count) {
793794
hlist_del(&sp->hash_link);
794795
kvm_mmu_free_page(kvm, sp);
795-
} else
796+
} else {
796797
list_move(&sp->link, &kvm->arch.active_mmu_pages);
798+
sp->role.invalid = 1;
799+
kvm_reload_remote_mmus(kvm);
800+
}
797801
kvm_mmu_reset_last_pte_updated(kvm);
798802
}
799803

@@ -1073,6 +1077,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
10731077

10741078
sp = page_header(root);
10751079
--sp->root_count;
1080+
if (!sp->root_count && sp->role.invalid)
1081+
kvm_mmu_zap_page(vcpu->kvm, sp);
10761082
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
10771083
spin_unlock(&vcpu->kvm->mmu_lock);
10781084
return;
@@ -1085,6 +1091,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
10851091
root &= PT64_BASE_ADDR_MASK;
10861092
sp = page_header(root);
10871093
--sp->root_count;
1094+
if (!sp->root_count && sp->role.invalid)
1095+
kvm_mmu_zap_page(vcpu->kvm, sp);
10881096
}
10891097
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
10901098
}

arch/x86/kvm/x86.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2658,6 +2658,10 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
26582658
kvm_x86_ops->guest_debug_pre(vcpu);
26592659

26602660
again:
2661+
if (vcpu->requests)
2662+
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2663+
kvm_mmu_unload(vcpu);
2664+
26612665
r = kvm_mmu_reload(vcpu);
26622666
if (unlikely(r))
26632667
goto out;
@@ -2689,6 +2693,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
26892693
goto out;
26902694
}
26912695

2696+
if (vcpu->requests)
2697+
if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
2698+
local_irq_enable();
2699+
preempt_enable();
2700+
r = 1;
2701+
goto out;
2702+
}
2703+
26922704
if (signal_pending(current)) {
26932705
local_irq_enable();
26942706
preempt_enable();

include/asm-x86/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ union kvm_mmu_page_role {
141141
unsigned pad_for_nice_hex_output:6;
142142
unsigned metaphysical:1;
143143
unsigned access:3;
144+
unsigned invalid:1;
144145
};
145146
};
146147

include/linux/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
#define KVM_REQ_TLB_FLUSH 0
3838
#define KVM_REQ_MIGRATE_TIMER 1
3939
#define KVM_REQ_REPORT_TPR_ACCESS 2
40+
#define KVM_REQ_MMU_RELOAD 3
4041

4142
struct kvm_vcpu;
4243
extern struct kmem_cache *kvm_vcpu_cache;
@@ -190,6 +191,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
190191
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
191192
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
192193
void kvm_flush_remote_tlbs(struct kvm *kvm);
194+
void kvm_reload_remote_mmus(struct kvm *kvm);
193195

194196
long kvm_arch_dev_ioctl(struct file *filp,
195197
unsigned int ioctl, unsigned long arg);

virt/kvm/kvm_main.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
119119
smp_call_function_mask(cpus, ack_flush, NULL, 1);
120120
}
121121

122+
void kvm_reload_remote_mmus(struct kvm *kvm)
123+
{
124+
int i, cpu;
125+
cpumask_t cpus;
126+
struct kvm_vcpu *vcpu;
127+
128+
cpus_clear(cpus);
129+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
130+
vcpu = kvm->vcpus[i];
131+
if (!vcpu)
132+
continue;
133+
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
134+
continue;
135+
cpu = vcpu->cpu;
136+
if (cpu != -1 && cpu != raw_smp_processor_id())
137+
cpu_set(cpu, cpus);
138+
}
139+
if (cpus_empty(cpus))
140+
return;
141+
smp_call_function_mask(cpus, ack_flush, NULL, 1);
142+
}
143+
144+
122145
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
123146
{
124147
struct page *page;

0 commit comments

Comments
 (0)