Skip to content

Commit 2ec5be3

Browse files
pfedinMarc Zyngier
authored and
Marc Zyngier
committed
arm64: KVM: Correctly handle zero register in system register accesses
System register accesses also use zero register for Rt == 31, and therefore using it will also result in getting SP value instead. This patch makes them also using new accessors, introduced by the previous patch. Since register value is no longer directly associated with storage inside vCPU context structure, we introduce a dedicated storage for it in struct sys_reg_params. This refactor also gets rid of "massive hack" in kvm_handle_cp_64(). Signed-off-by: Pavel Fedin <p.fedin@samsung.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
1 parent 3fec037 commit 2ec5be3

File tree

3 files changed

+45
-48
lines changed

3 files changed

+45
-48
lines changed

arch/arm64/kvm/sys_regs.c

Lines changed: 42 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -97,18 +97,16 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
9797
struct sys_reg_params *p,
9898
const struct sys_reg_desc *r)
9999
{
100-
unsigned long val;
101100
bool was_enabled = vcpu_has_cache_enabled(vcpu);
102101

103102
BUG_ON(!p->is_write);
104103

105-
val = *vcpu_reg(vcpu, p->Rt);
106104
if (!p->is_aarch32) {
107-
vcpu_sys_reg(vcpu, r->reg) = val;
105+
vcpu_sys_reg(vcpu, r->reg) = p->regval;
108106
} else {
109107
if (!p->is_32bit)
110-
vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
111-
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
108+
vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
109+
vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
112110
}
113111

114112
kvm_toggle_cache(vcpu, was_enabled);
@@ -125,13 +123,10 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125123
struct sys_reg_params *p,
126124
const struct sys_reg_desc *r)
127125
{
128-
u64 val;
129-
130126
if (!p->is_write)
131127
return read_from_write_only(vcpu, p);
132128

133-
val = *vcpu_reg(vcpu, p->Rt);
134-
vgic_v3_dispatch_sgi(vcpu, val);
129+
vgic_v3_dispatch_sgi(vcpu, p->regval);
135130

136131
return true;
137132
}
@@ -153,7 +148,7 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
153148
if (p->is_write) {
154149
return ignore_write(vcpu, p);
155150
} else {
156-
*vcpu_reg(vcpu, p->Rt) = (1 << 3);
151+
p->regval = (1 << 3);
157152
return true;
158153
}
159154
}
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167162
} else {
168163
u32 val;
169164
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170-
*vcpu_reg(vcpu, p->Rt) = val;
165+
p->regval = val;
171166
return true;
172167
}
173168
}
@@ -204,13 +199,13 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
204199
const struct sys_reg_desc *r)
205200
{
206201
if (p->is_write) {
207-
vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
202+
vcpu_sys_reg(vcpu, r->reg) = p->regval;
208203
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209204
} else {
210-
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
205+
p->regval = vcpu_sys_reg(vcpu, r->reg);
211206
}
212207

213-
trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
208+
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
214209

215210
return true;
216211
}
@@ -228,7 +223,7 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228223
struct sys_reg_params *p,
229224
u64 *dbg_reg)
230225
{
231-
u64 val = *vcpu_reg(vcpu, p->Rt);
226+
u64 val = p->regval;
232227

233228
if (p->is_32bit) {
234229
val &= 0xffffffffUL;
@@ -243,12 +238,9 @@ static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243238
struct sys_reg_params *p,
244239
u64 *dbg_reg)
245240
{
246-
u64 val = *dbg_reg;
247-
241+
p->regval = *dbg_reg;
248242
if (p->is_32bit)
249-
val &= 0xffffffffUL;
250-
251-
*vcpu_reg(vcpu, p->Rt) = val;
243+
p->regval &= 0xffffffffUL;
252244
}
253245

254246
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
@@ -697,10 +689,10 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697689
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
698690
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
699691

700-
*vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
701-
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
702-
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
703-
(6 << 16) | (el3 << 14) | (el3 << 12));
692+
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
693+
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
694+
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
695+
| (6 << 16) | (el3 << 14) | (el3 << 12));
704696
return true;
705697
}
706698
}
@@ -710,10 +702,10 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
710702
const struct sys_reg_desc *r)
711703
{
712704
if (p->is_write) {
713-
vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
705+
vcpu_cp14(vcpu, r->reg) = p->regval;
714706
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715707
} else {
716-
*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
708+
p->regval = vcpu_cp14(vcpu, r->reg);
717709
}
718710

719711
return true;
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740732
u64 val = *dbg_reg;
741733

742734
val &= 0xffffffffUL;
743-
val |= *vcpu_reg(vcpu, p->Rt) << 32;
735+
val |= p->regval << 32;
744736
*dbg_reg = val;
745737

746738
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747739
} else {
748-
*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
740+
p->regval = *dbg_reg >> 32;
749741
}
750742

751743
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
10621054
{
10631055
struct sys_reg_params params;
10641056
u32 hsr = kvm_vcpu_get_hsr(vcpu);
1057+
int Rt = (hsr >> 5) & 0xf;
10651058
int Rt2 = (hsr >> 10) & 0xf;
10661059

10671060
params.is_aarch32 = true;
10681061
params.is_32bit = false;
10691062
params.CRm = (hsr >> 1) & 0xf;
1070-
params.Rt = (hsr >> 5) & 0xf;
10711063
params.is_write = ((hsr & 1) == 0);
10721064

10731065
params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
10761068
params.CRn = 0;
10771069

10781070
/*
1079-
* Massive hack here. Store Rt2 in the top 32bits so we only
1080-
* have one register to deal with. As we use the same trap
1071+
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
10811072
* backends between AArch32 and AArch64, we get away with it.
10821073
*/
10831074
if (params.is_write) {
1084-
u64 val = *vcpu_reg(vcpu, params.Rt);
1085-
val &= 0xffffffff;
1086-
val |= *vcpu_reg(vcpu, Rt2) << 32;
1087-
*vcpu_reg(vcpu, params.Rt) = val;
1075+
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1076+
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
10881077
}
10891078

10901079
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
10951084
unhandled_cp_access(vcpu, &params);
10961085

10971086
out:
1098-
/* Do the opposite hack for the read side */
1087+
/* Split up the value between registers for the read side */
10991088
if (!params.is_write) {
1100-
u64 val = *vcpu_reg(vcpu, params.Rt);
1101-
val >>= 32;
1102-
*vcpu_reg(vcpu, Rt2) = val;
1089+
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1090+
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
11031091
}
11041092

11051093
return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
11181106
{
11191107
struct sys_reg_params params;
11201108
u32 hsr = kvm_vcpu_get_hsr(vcpu);
1109+
int Rt = (hsr >> 5) & 0xf;
11211110

11221111
params.is_aarch32 = true;
11231112
params.is_32bit = true;
11241113
params.CRm = (hsr >> 1) & 0xf;
1125-
params.Rt = (hsr >> 5) & 0xf;
1114+
params.regval = vcpu_get_reg(vcpu, Rt);
11261115
params.is_write = ((hsr & 1) == 0);
11271116
params.CRn = (hsr >> 10) & 0xf;
11281117
params.Op0 = 0;
11291118
params.Op1 = (hsr >> 14) & 0x7;
11301119
params.Op2 = (hsr >> 17) & 0x7;
11311120

1132-
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
1133-
return 1;
1134-
if (!emulate_cp(vcpu, &params, global, nr_global))
1121+
if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1122+
!emulate_cp(vcpu, &params, global, nr_global)) {
1123+
if (!params.is_write)
1124+
vcpu_set_reg(vcpu, Rt, params.regval);
11351125
return 1;
1126+
}
11361127

11371128
unhandled_cp_access(vcpu, &params);
11381129
return 1;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
12301221
{
12311222
struct sys_reg_params params;
12321223
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1224+
int Rt = (esr >> 5) & 0x1f;
1225+
int ret;
12331226

12341227
trace_kvm_handle_sys_reg(esr);
12351228

@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
12401233
params.CRn = (esr >> 10) & 0xf;
12411234
params.CRm = (esr >> 1) & 0xf;
12421235
params.Op2 = (esr >> 17) & 0x7;
1243-
params.Rt = (esr >> 5) & 0x1f;
1236+
params.regval = vcpu_get_reg(vcpu, Rt);
12441237
params.is_write = !(esr & 1);
12451238

1246-
return emulate_sys_reg(vcpu, &params);
1239+
ret = emulate_sys_reg(vcpu, &params);
1240+
1241+
if (!params.is_write)
1242+
vcpu_set_reg(vcpu, Rt, params.regval);
1243+
return ret;
12471244
}
12481245

12491246
/******************************************************************************

arch/arm64/kvm/sys_regs.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ struct sys_reg_params {
2828
u8 CRn;
2929
u8 CRm;
3030
u8 Op2;
31-
u8 Rt;
31+
u64 regval;
3232
bool is_write;
3333
bool is_aarch32;
3434
bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -79,7 +79,7 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
7979
static inline bool read_zero(struct kvm_vcpu *vcpu,
8080
struct sys_reg_params *p)
8181
{
82-
*vcpu_reg(vcpu, p->Rt) = 0;
82+
p->regval = 0;
8383
return true;
8484
}
8585

arch/arm64/kvm/sys_regs_generic_v8.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
3737
if (p->is_write)
3838
return ignore_write(vcpu, p);
3939

40-
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
40+
p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
4141
return true;
4242
}
4343

0 commit comments

Comments
 (0)