Skip to content

Commit 697d389

Browse files
paulusmackavikivity
authored andcommitted
KVM: PPC: Implement MMIO emulation support for Book3S HV guests
This provides the low-level support for MMIO emulation in Book3S HV guests. When the guest tries to map a page which is not covered by any memslot, that page is taken to be an MMIO emulation page. Instead of inserting a valid HPTE, we insert an HPTE that has the valid bit clear but another hypervisor software-use bit set, which we call HPTE_V_ABSENT, to indicate that this is an absent page. An absent page is treated much like a valid page as far as guest hcalls (H_ENTER, H_REMOVE, H_READ etc.) are concerned, except of course that an absent HPTE doesn't need to be invalidated with tlbie since it was never valid as far as the hardware is concerned. When the guest accesses a page for which there is an absent HPTE, it will take a hypervisor data storage interrupt (HDSI) since we now set the VPM1 bit in the LPCR. Our HDSI handler for HPTE-not-present faults looks up the hash table and if it finds an absent HPTE mapping the requested virtual address, will switch to kernel mode and handle the fault in kvmppc_book3s_hv_page_fault(), which at present just calls kvmppc_hv_emulate_mmio() to set up the MMIO emulation. This is based on an earlier patch by Benjamin Herrenschmidt, but since heavily reworked. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
1 parent 06ce2c6 commit 697d389

File tree

12 files changed

+607
-83
lines changed

12 files changed

+607
-83
lines changed

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119119
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120120
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121121
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
122+
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
123+
struct kvm_vcpu *vcpu, unsigned long addr,
124+
unsigned long status);
125+
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
126+
unsigned long slb_v, unsigned long valid);
122127

123128
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124129
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);

arch/powerpc/include/asm/kvm_book3s_64.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,15 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
4343
#define HPT_HASH_MASK (HPT_NPTEG - 1)
4444
#endif
4545

46+
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
47+
4648
/*
4749
* We use a lock bit in HPTE dword 0 to synchronize updates and
4850
* accesses to each HPTE, and another bit to indicate non-present
4951
* HPTEs.
5052
*/
5153
#define HPTE_V_HVLOCK 0x40UL
54+
#define HPTE_V_ABSENT 0x20UL
5255

5356
static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
5457
{
@@ -144,6 +147,29 @@ static inline unsigned long hpte_cache_bits(unsigned long pte_val)
144147
#endif
145148
}
146149

150+
static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
151+
{
152+
if (key)
153+
return PP_RWRX <= pp && pp <= PP_RXRX;
154+
return 1;
155+
}
156+
157+
static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
158+
{
159+
if (key)
160+
return pp == PP_RWRW;
161+
return pp <= PP_RWRW;
162+
}
163+
164+
static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
165+
{
166+
unsigned long skey;
167+
168+
skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
169+
((hpte_r & HPTE_R_KEY_LO) >> 9);
170+
return (amr >> (62 - 2 * skey)) & 3;
171+
}
172+
147173
static inline void lock_rmap(unsigned long *rmap)
148174
{
149175
do {

arch/powerpc/include/asm/kvm_host.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ struct kvm_arch {
210210
unsigned long lpcr;
211211
unsigned long rmor;
212212
struct kvmppc_rma_info *rma;
213+
unsigned long vrma_slb_v;
213214
int rma_setup_done;
214215
struct list_head spapr_tce_tables;
215216
spinlock_t slot_phys_lock;
@@ -452,6 +453,10 @@ struct kvm_vcpu_arch {
452453
#ifdef CONFIG_KVM_BOOK3S_64_HV
453454
struct kvm_vcpu_arch_shared shregs;
454455

456+
unsigned long pgfault_addr;
457+
long pgfault_index;
458+
unsigned long pgfault_hpte[2];
459+
455460
struct list_head run_list;
456461
struct task_struct *run_task;
457462
struct kvm_run *kvm_run;

arch/powerpc/include/asm/mmu-hash64.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,11 +108,11 @@ extern char initial_stab[];
108108
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
109109

110110
/* Values for PP (assumes Ks=0, Kp=1) */
111-
/* pp0 will always be 0 for linux */
112111
#define PP_RWXX 0 /* Supervisor read/write, User none */
113112
#define PP_RWRX 1 /* Supervisor read/write, User read */
114113
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
115114
#define PP_RXRX 3 /* Supervisor read, User read */
115+
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
116116

117117
#ifndef __ASSEMBLY__
118118

arch/powerpc/include/asm/ppc-opcode.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
4646
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
4747
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
48+
#define PPC_INST_SLBFEE 0x7c0007a7
4849

4950
#define PPC_INST_STRING 0x7c00042a
5051
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -183,7 +184,8 @@
183184
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
184185
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
185186
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
186-
187+
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
188+
__PPC_RT(t) | __PPC_RB(b))
187189

188190
/*
189191
* Define what the VSX XX1 form instructions will look like, then add

arch/powerpc/include/asm/reg.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@
216216
#define DSISR_ISSTORE 0x02000000 /* access was a store */
217217
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
218218
#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
219+
#define DSISR_KEYFAULT 0x00200000 /* Key fault */
219220
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
220221
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
221222
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */

arch/powerpc/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,7 @@ int main(void)
455455
DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
456456
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
457457
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
458+
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
458459
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
459460
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
460461
#endif

arch/powerpc/kernel/exceptions-64s.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,14 +100,14 @@ data_access_not_stab:
100100
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
101101
#endif
102102
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
103-
KVMTEST_PR, 0x300)
103+
KVMTEST, 0x300)
104104

105105
. = 0x380
106106
.globl data_access_slb_pSeries
107107
data_access_slb_pSeries:
108108
HMT_MEDIUM
109109
SET_SCRATCH0(r13)
110-
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
110+
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
111111
std r3,PACA_EXSLB+EX_R3(r13)
112112
mfspr r3,SPRN_DAR
113113
#ifdef __DISABLED__
@@ -329,8 +329,8 @@ do_stab_bolted_pSeries:
329329
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
330330
#endif /* CONFIG_POWER4_ONLY */
331331

332-
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
333-
KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
332+
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
333+
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
334334
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
335335
KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
336336
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)

0 commit comments

Comments
 (0)