Skip to content

Commit f85279b

Browse files
committed
arm64: KVM: Save/restore the host SPE state when entering/leaving a VM
The SPE buffer is virtually addressed, using the page tables of the CPU MMU. Unusually, this means that the EL0/1 page table may be live whilst we're executing at EL2 on non-VHE configurations. When VHE is in use, we can use the same property to profile the guest behind its back. This patch adds the relevant disabling and flushing code to KVM so that the host can make use of SPE without corrupting guest memory, and any attempts by a guest to use SPE will result in a trap. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Cc: Alex Bennée <alex.bennee@linaro.org> Cc: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 3d29a9a commit f85279b

File tree

5 files changed

+95
-4
lines changed

5 files changed

+95
-4
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,9 @@
188188
#define CPTR_EL2_DEFAULT 0x000033ff
189189

190190
/* Hyp Debug Configuration Register bits */
191+
#define MDCR_EL2_TPMS (1 << 14)
192+
#define MDCR_EL2_E2PB_MASK (UL(0x3))
193+
#define MDCR_EL2_E2PB_SHIFT (UL(12))
191194
#define MDCR_EL2_TDRA (1 << 11)
192195
#define MDCR_EL2_TDOSA (1 << 10)
193196
#define MDCR_EL2_TDA (1 << 9)

arch/arm64/include/asm/kvm_host.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,12 @@ struct kvm_vcpu_arch {
229229

230230
/* Pointer to host CPU context */
231231
kvm_cpu_context_t *host_cpu_context;
232-
struct kvm_guest_debug_arch host_debug_state;
232+
struct {
233+
/* {Break,watch}point registers */
234+
struct kvm_guest_debug_arch regs;
235+
/* Statistical profiling extension */
236+
u64 pmscr_el1;
237+
} host_debug_state;
233238

234239
/* VGIC state */
235240
struct vgic_cpu vgic_cpu;

arch/arm64/kvm/debug.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
9595
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
9696
* - Debug ROM Address (MDCR_EL2_TDRA)
9797
* - OS related registers (MDCR_EL2_TDOSA)
98+
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
9899
*
99100
* Additionally, KVM only traps guest accesses to the debug registers if
100101
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -110,8 +111,13 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
110111

111112
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
112113

114+
/*
115+
* This also clears MDCR_EL2_E2PB_MASK to disable guest access
116+
* to the profiling buffer.
117+
*/
113118
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
114119
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
120+
MDCR_EL2_TPMS |
115121
MDCR_EL2_TPMCR |
116122
MDCR_EL2_TDRA |
117123
MDCR_EL2_TDOSA);

arch/arm64/kvm/hyp/debug-sr.c

Lines changed: 64 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,66 @@
6565
default: write_debug(ptr[0], reg, 0); \
6666
}
6767

68+
#define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
69+
70+
#define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
71+
#define PMBLIMITR_EL1_E BIT(0)
72+
73+
#define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
74+
#define PMBIDR_EL1_P BIT(4)
75+
76+
#define psb_csync() asm volatile("hint #17")
77+
78+
static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
79+
{
80+
/* The vcpu can run. but it can't hide. */
81+
}
82+
83+
static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
84+
{
85+
u64 reg;
86+
87+
/* SPE present on this CPU? */
88+
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
89+
ID_AA64DFR0_PMSVER_SHIFT))
90+
return;
91+
92+
/* Yes; is it owned by EL3? */
93+
reg = read_sysreg_s(PMBIDR_EL1);
94+
if (reg & PMBIDR_EL1_P)
95+
return;
96+
97+
/* No; is the host actually using the thing? */
98+
reg = read_sysreg_s(PMBLIMITR_EL1);
99+
if (!(reg & PMBLIMITR_EL1_E))
100+
return;
101+
102+
/* Yes; save the control register and disable data generation */
103+
*pmscr_el1 = read_sysreg_s(PMSCR_EL1);
104+
write_sysreg_s(0, PMSCR_EL1);
105+
isb();
106+
107+
/* Now drain all buffered data to memory */
108+
psb_csync();
109+
dsb(nsh);
110+
}
111+
112+
static hyp_alternate_select(__debug_save_spe,
113+
__debug_save_spe_nvhe, __debug_save_spe_vhe,
114+
ARM64_HAS_VIRT_HOST_EXTN);
115+
116+
static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
117+
{
118+
if (!pmscr_el1)
119+
return;
120+
121+
/* The host page table is installed, but not yet synchronised */
122+
isb();
123+
124+
/* Re-enable data generation */
125+
write_sysreg_s(pmscr_el1, PMSCR_EL1);
126+
}
127+
68128
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
69129
struct kvm_guest_debug_arch *dbg,
70130
struct kvm_cpu_context *ctxt)
@@ -118,13 +178,15 @@ void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
118178
(vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
119179
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
120180

121-
__debug_save_state(vcpu, &vcpu->arch.host_debug_state,
181+
__debug_save_state(vcpu, &vcpu->arch.host_debug_state.regs,
122182
kern_hyp_va(vcpu->arch.host_cpu_context));
183+
__debug_save_spe()(&vcpu->arch.host_debug_state.pmscr_el1);
123184
}
124185

125186
void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
126187
{
127-
__debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
188+
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
189+
__debug_restore_state(vcpu, &vcpu->arch.host_debug_state.regs,
128190
kern_hyp_va(vcpu->arch.host_cpu_context));
129191

130192
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)

arch/arm64/kvm/hyp/switch.c

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,14 +103,26 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
103103
static void __hyp_text __deactivate_traps_vhe(void)
104104
{
105105
extern char vectors[]; /* kernel exception vectors */
106+
u64 mdcr_el2 = read_sysreg(mdcr_el2);
106107

108+
mdcr_el2 &= MDCR_EL2_HPMN_MASK |
109+
MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
110+
MDCR_EL2_TPMS;
111+
112+
write_sysreg(mdcr_el2, mdcr_el2);
107113
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
108114
write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
109115
write_sysreg(vectors, vbar_el1);
110116
}
111117

112118
static void __hyp_text __deactivate_traps_nvhe(void)
113119
{
120+
u64 mdcr_el2 = read_sysreg(mdcr_el2);
121+
122+
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
123+
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
124+
125+
write_sysreg(mdcr_el2, mdcr_el2);
114126
write_sysreg(HCR_RW, hcr_el2);
115127
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
116128
}
@@ -132,7 +144,6 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
132144

133145
__deactivate_traps_arch()();
134146
write_sysreg(0, hstr_el2);
135-
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
136147
write_sysreg(0, pmuserenr_el0);
137148
}
138149

@@ -357,6 +368,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
357368
}
358369

359370
__debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
371+
/*
372+
* This must come after restoring the host sysregs, since a non-VHE
373+
* system may enable SPE here and make use of the TTBRs.
374+
*/
360375
__debug_cond_restore_host_state(vcpu);
361376

362377
return exit_code;

0 commit comments

Comments
 (0)