@@ -97,18 +97,16 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
97
97
struct sys_reg_params * p ,
98
98
const struct sys_reg_desc * r )
99
99
{
100
- unsigned long val ;
101
100
bool was_enabled = vcpu_has_cache_enabled (vcpu );
102
101
103
102
BUG_ON (!p -> is_write );
104
103
105
- val = * vcpu_reg (vcpu , p -> Rt );
106
104
if (!p -> is_aarch32 ) {
107
- vcpu_sys_reg (vcpu , r -> reg ) = val ;
105
+ vcpu_sys_reg (vcpu , r -> reg ) = p -> regval ;
108
106
} else {
109
107
if (!p -> is_32bit )
110
- vcpu_cp15_64_high (vcpu , r -> reg ) = val >> 32 ;
111
- vcpu_cp15_64_low (vcpu , r -> reg ) = val & 0xffffffffUL ;
108
+ vcpu_cp15_64_high (vcpu , r -> reg ) = upper_32_bits ( p -> regval ) ;
109
+ vcpu_cp15_64_low (vcpu , r -> reg ) = lower_32_bits ( p -> regval ) ;
112
110
}
113
111
114
112
kvm_toggle_cache (vcpu , was_enabled );
@@ -125,13 +123,10 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125
123
struct sys_reg_params * p ,
126
124
const struct sys_reg_desc * r )
127
125
{
128
- u64 val ;
129
-
130
126
if (!p -> is_write )
131
127
return read_from_write_only (vcpu , p );
132
128
133
- val = * vcpu_reg (vcpu , p -> Rt );
134
- vgic_v3_dispatch_sgi (vcpu , val );
129
+ vgic_v3_dispatch_sgi (vcpu , p -> regval );
135
130
136
131
return true;
137
132
}
@@ -153,7 +148,7 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
153
148
if (p -> is_write ) {
154
149
return ignore_write (vcpu , p );
155
150
} else {
156
- * vcpu_reg ( vcpu , p -> Rt ) = (1 << 3 );
151
+ p -> regval = (1 << 3 );
157
152
return true;
158
153
}
159
154
}
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167
162
} else {
168
163
u32 val ;
169
164
asm volatile ("mrs %0, dbgauthstatus_el1" : "=r" (val ));
170
- * vcpu_reg ( vcpu , p -> Rt ) = val ;
165
+ p -> regval = val ;
171
166
return true;
172
167
}
173
168
}
@@ -204,13 +199,13 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
204
199
const struct sys_reg_desc * r )
205
200
{
206
201
if (p -> is_write ) {
207
- vcpu_sys_reg (vcpu , r -> reg ) = * vcpu_reg ( vcpu , p -> Rt ) ;
202
+ vcpu_sys_reg (vcpu , r -> reg ) = p -> regval ;
208
203
vcpu -> arch .debug_flags |= KVM_ARM64_DEBUG_DIRTY ;
209
204
} else {
210
- * vcpu_reg ( vcpu , p -> Rt ) = vcpu_sys_reg (vcpu , r -> reg );
205
+ p -> regval = vcpu_sys_reg (vcpu , r -> reg );
211
206
}
212
207
213
- trace_trap_reg (__func__ , r -> reg , p -> is_write , * vcpu_reg ( vcpu , p -> Rt ) );
208
+ trace_trap_reg (__func__ , r -> reg , p -> is_write , p -> regval );
214
209
215
210
return true;
216
211
}
@@ -228,7 +223,7 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228
223
struct sys_reg_params * p ,
229
224
u64 * dbg_reg )
230
225
{
231
- u64 val = * vcpu_reg ( vcpu , p -> Rt ) ;
226
+ u64 val = p -> regval ;
232
227
233
228
if (p -> is_32bit ) {
234
229
val &= 0xffffffffUL ;
@@ -243,12 +238,9 @@ static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243
238
struct sys_reg_params * p ,
244
239
u64 * dbg_reg )
245
240
{
246
- u64 val = * dbg_reg ;
247
-
241
+ p -> regval = * dbg_reg ;
248
242
if (p -> is_32bit )
249
- val &= 0xffffffffUL ;
250
-
251
- * vcpu_reg (vcpu , p -> Rt ) = val ;
243
+ p -> regval &= 0xffffffffUL ;
252
244
}
253
245
254
246
static inline bool trap_bvr (struct kvm_vcpu * vcpu ,
@@ -697,10 +689,10 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697
689
u64 pfr = read_system_reg (SYS_ID_AA64PFR0_EL1 );
698
690
u32 el3 = !!cpuid_feature_extract_field (pfr , ID_AA64PFR0_EL3_SHIFT );
699
691
700
- * vcpu_reg ( vcpu , p -> Rt ) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT ) & 0xf ) << 28 ) |
701
- (((dfr >> ID_AA64DFR0_BRPS_SHIFT ) & 0xf ) << 24 ) |
702
- (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT ) & 0xf ) << 20 ) |
703
- (6 << 16 ) | (el3 << 14 ) | (el3 << 12 ));
692
+ p -> regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT ) & 0xf ) << 28 ) |
693
+ (((dfr >> ID_AA64DFR0_BRPS_SHIFT ) & 0xf ) << 24 ) |
694
+ (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT ) & 0xf ) << 20 )
695
+ | (6 << 16 ) | (el3 << 14 ) | (el3 << 12 ));
704
696
return true;
705
697
}
706
698
}
@@ -710,10 +702,10 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
710
702
const struct sys_reg_desc * r )
711
703
{
712
704
if (p -> is_write ) {
713
- vcpu_cp14 (vcpu , r -> reg ) = * vcpu_reg ( vcpu , p -> Rt ) ;
705
+ vcpu_cp14 (vcpu , r -> reg ) = p -> regval ;
714
706
vcpu -> arch .debug_flags |= KVM_ARM64_DEBUG_DIRTY ;
715
707
} else {
716
- * vcpu_reg ( vcpu , p -> Rt ) = vcpu_cp14 (vcpu , r -> reg );
708
+ p -> regval = vcpu_cp14 (vcpu , r -> reg );
717
709
}
718
710
719
711
return true;
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740
732
u64 val = * dbg_reg ;
741
733
742
734
val &= 0xffffffffUL ;
743
- val |= * vcpu_reg ( vcpu , p -> Rt ) << 32 ;
735
+ val |= p -> regval << 32 ;
744
736
* dbg_reg = val ;
745
737
746
738
vcpu -> arch .debug_flags |= KVM_ARM64_DEBUG_DIRTY ;
747
739
} else {
748
- * vcpu_reg ( vcpu , p -> Rt ) = * dbg_reg >> 32 ;
740
+ p -> regval = * dbg_reg >> 32 ;
749
741
}
750
742
751
743
trace_trap_reg (__func__ , rd -> reg , p -> is_write , * dbg_reg );
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1062
1054
{
1063
1055
struct sys_reg_params params ;
1064
1056
u32 hsr = kvm_vcpu_get_hsr (vcpu );
1057
+ int Rt = (hsr >> 5 ) & 0xf ;
1065
1058
int Rt2 = (hsr >> 10 ) & 0xf ;
1066
1059
1067
1060
params .is_aarch32 = true;
1068
1061
params .is_32bit = false;
1069
1062
params .CRm = (hsr >> 1 ) & 0xf ;
1070
- params .Rt = (hsr >> 5 ) & 0xf ;
1071
1063
params .is_write = ((hsr & 1 ) == 0 );
1072
1064
1073
1065
params .Op0 = 0 ;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1076
1068
params .CRn = 0 ;
1077
1069
1078
1070
/*
1079
- * Massive hack here. Store Rt2 in the top 32bits so we only
1080
- * have one register to deal with. As we use the same trap
1071
+ * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1081
1072
* backends between AArch32 and AArch64, we get away with it.
1082
1073
*/
1083
1074
if (params .is_write ) {
1084
- u64 val = * vcpu_reg (vcpu , params .Rt );
1085
- val &= 0xffffffff ;
1086
- val |= * vcpu_reg (vcpu , Rt2 ) << 32 ;
1087
- * vcpu_reg (vcpu , params .Rt ) = val ;
1075
+ params .regval = vcpu_get_reg (vcpu , Rt ) & 0xffffffff ;
1076
+ params .regval |= vcpu_get_reg (vcpu , Rt2 ) << 32 ;
1088
1077
}
1089
1078
1090
1079
if (!emulate_cp (vcpu , & params , target_specific , nr_specific ))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1095
1084
unhandled_cp_access (vcpu , & params );
1096
1085
1097
1086
out :
1098
- /* Do the opposite hack for the read side */
1087
+ /* Split up the value between registers for the read side */
1099
1088
if (!params .is_write ) {
1100
- u64 val = * vcpu_reg (vcpu , params .Rt );
1101
- val >>= 32 ;
1102
- * vcpu_reg (vcpu , Rt2 ) = val ;
1089
+ vcpu_set_reg (vcpu , Rt , lower_32_bits (params .regval ));
1090
+ vcpu_set_reg (vcpu , Rt2 , upper_32_bits (params .regval ));
1103
1091
}
1104
1092
1105
1093
return 1 ;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1118
1106
{
1119
1107
struct sys_reg_params params ;
1120
1108
u32 hsr = kvm_vcpu_get_hsr (vcpu );
1109
+ int Rt = (hsr >> 5 ) & 0xf ;
1121
1110
1122
1111
params .is_aarch32 = true;
1123
1112
params .is_32bit = true;
1124
1113
params .CRm = (hsr >> 1 ) & 0xf ;
1125
- params .Rt = ( hsr >> 5 ) & 0xf ;
1114
+ params .regval = vcpu_get_reg ( vcpu , Rt ) ;
1126
1115
params .is_write = ((hsr & 1 ) == 0 );
1127
1116
params .CRn = (hsr >> 10 ) & 0xf ;
1128
1117
params .Op0 = 0 ;
1129
1118
params .Op1 = (hsr >> 14 ) & 0x7 ;
1130
1119
params .Op2 = (hsr >> 17 ) & 0x7 ;
1131
1120
1132
- if (!emulate_cp (vcpu , & params , target_specific , nr_specific ))
1133
- return 1 ;
1134
- if (!emulate_cp (vcpu , & params , global , nr_global ))
1121
+ if (!emulate_cp (vcpu , & params , target_specific , nr_specific ) ||
1122
+ !emulate_cp (vcpu , & params , global , nr_global )) {
1123
+ if (!params .is_write )
1124
+ vcpu_set_reg (vcpu , Rt , params .regval );
1135
1125
return 1 ;
1126
+ }
1136
1127
1137
1128
unhandled_cp_access (vcpu , & params );
1138
1129
return 1 ;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230
1221
{
1231
1222
struct sys_reg_params params ;
1232
1223
unsigned long esr = kvm_vcpu_get_hsr (vcpu );
1224
+ int Rt = (esr >> 5 ) & 0x1f ;
1225
+ int ret ;
1233
1226
1234
1227
trace_kvm_handle_sys_reg (esr );
1235
1228
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1240
1233
params .CRn = (esr >> 10 ) & 0xf ;
1241
1234
params .CRm = (esr >> 1 ) & 0xf ;
1242
1235
params .Op2 = (esr >> 17 ) & 0x7 ;
1243
- params .Rt = ( esr >> 5 ) & 0x1f ;
1236
+ params .regval = vcpu_get_reg ( vcpu , Rt ) ;
1244
1237
params .is_write = !(esr & 1 );
1245
1238
1246
- return emulate_sys_reg (vcpu , & params );
1239
+ ret = emulate_sys_reg (vcpu , & params );
1240
+
1241
+ if (!params .is_write )
1242
+ vcpu_set_reg (vcpu , Rt , params .regval );
1243
+ return ret ;
1247
1244
}
1248
1245
1249
1246
/******************************************************************************
0 commit comments