@@ -839,7 +839,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
839
839
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
840
840
* the page we just got a reference to gets unmapped before we have a
841
841
* chance to grab the mmu_lock, which ensure that if the page gets
842
- * unmapped afterwards, the call to kvm_unmap_hva will take it away
842
+ * unmapped afterwards, the call to kvm_unmap_gfn will take it away
843
843
* from us again properly. This smp_rmb() interacts with the smp_wmb()
844
844
* in kvm_mmu_notifier_invalidate_<page|range_end>.
845
845
*/
@@ -1064,123 +1064,72 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1064
1064
return ret ;
1065
1065
}
1066
1066
1067
- static int handle_hva_to_gpa (struct kvm * kvm ,
1068
- unsigned long start ,
1069
- unsigned long end ,
1070
- int (* handler )(struct kvm * kvm ,
1071
- gpa_t gpa , u64 size ,
1072
- void * data ),
1073
- void * data )
1067
+ bool kvm_unmap_gfn_range (struct kvm * kvm , struct kvm_gfn_info * info )
1074
1068
{
1075
- struct kvm_memslots * slots ;
1076
- struct kvm_memory_slot * memslot ;
1077
- int ret = 0 ;
1078
-
1079
- slots = kvm_memslots (kvm );
1080
-
1081
- /* we only care about the pages that the guest sees */
1082
- kvm_for_each_memslot (memslot , slots ) {
1083
- unsigned long hva_start , hva_end ;
1084
- gfn_t gpa ;
1085
-
1086
- hva_start = max (start , memslot -> userspace_addr );
1087
- hva_end = min (end , memslot -> userspace_addr +
1088
- (memslot -> npages << PAGE_SHIFT ));
1089
- if (hva_start >= hva_end )
1090
- continue ;
1091
-
1092
- gpa = hva_to_gfn_memslot (hva_start , memslot ) << PAGE_SHIFT ;
1093
- ret |= handler (kvm , gpa , (u64 )(hva_end - hva_start ), data );
1094
- }
1095
-
1096
- return ret ;
1097
- }
1069
+ if (!kvm -> arch .mmu .pgt )
1070
+ return 0 ;
1098
1071
1099
- static int kvm_unmap_hva_handler (struct kvm * kvm , gpa_t gpa , u64 size , void * data )
1100
- {
1101
- unsigned flags = * (unsigned * )data ;
1102
- bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE ;
1072
+ __unmap_stage2_range (& kvm -> arch .mmu , info -> start << PAGE_SHIFT ,
1073
+ (info -> end - info -> start ) << PAGE_SHIFT ,
1074
+ info -> may_block );
1103
1075
1104
- __unmap_stage2_range (& kvm -> arch .mmu , gpa , size , may_block );
1105
1076
return 0 ;
1106
1077
}
1107
1078
1108
- int kvm_unmap_hva_range (struct kvm * kvm ,
1109
- unsigned long start , unsigned long end , unsigned flags )
1079
+ bool kvm_set_spte_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1110
1080
{
1081
+ kvm_pfn_t pfn = pte_pfn (info -> pte );
1082
+
1111
1083
if (!kvm -> arch .mmu .pgt )
1112
1084
return 0 ;
1113
1085
1114
- handle_hva_to_gpa (kvm , start , end , & kvm_unmap_hva_handler , & flags );
1115
- return 0 ;
1116
- }
1117
-
1118
- static int kvm_set_spte_handler (struct kvm * kvm , gpa_t gpa , u64 size , void * data )
1119
- {
1120
- kvm_pfn_t * pfn = (kvm_pfn_t * )data ;
1086
+ WARN_ON (info -> end - info -> start != 1 );
1121
1087
1122
- WARN_ON (size != PAGE_SIZE );
1088
+ /*
1089
+ * We've moved a page around, probably through CoW, so let's treat it
1090
+ * just like a translation fault and clean the cache to the PoC.
1091
+ */
1092
+ clean_dcache_guest_page (pfn , PAGE_SIZE );
1123
1093
1124
1094
/*
1125
1095
* The MMU notifiers will have unmapped a huge PMD before calling
1126
- * ->change_pte() (which in turn calls kvm_set_spte_hva ()) and
1096
+ * ->change_pte() (which in turn calls kvm_set_spte_gfn ()) and
1127
1097
* therefore we never need to clear out a huge PMD through this
1128
1098
* calling path and a memcache is not required.
1129
1099
*/
1130
- kvm_pgtable_stage2_map (kvm -> arch .mmu .pgt , gpa , PAGE_SIZE ,
1131
- __pfn_to_phys (* pfn ), KVM_PGTABLE_PROT_R , NULL );
1100
+ kvm_pgtable_stage2_map (kvm -> arch .mmu .pgt , info -> start << PAGE_SHIFT ,
1101
+ PAGE_SIZE , __pfn_to_phys (pfn ),
1102
+ KVM_PGTABLE_PROT_R , NULL );
1103
+
1132
1104
return 0 ;
1133
1105
}
1134
1106
1135
- int kvm_set_spte_hva (struct kvm * kvm , unsigned long hva , pte_t pte )
1107
+ bool kvm_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1136
1108
{
1137
- unsigned long end = hva + PAGE_SIZE ;
1138
- kvm_pfn_t pfn = pte_pfn (pte );
1109
+ u64 size = (info -> end - info -> start ) << PAGE_SHIFT ;
1110
+ kvm_pte_t kpte ;
1111
+ pte_t pte ;
1139
1112
1140
1113
if (!kvm -> arch .mmu .pgt )
1141
1114
return 0 ;
1142
1115
1143
- /*
1144
- * We've moved a page around, probably through CoW, so let's treat it
1145
- * just like a translation fault and clean the cache to the PoC.
1146
- */
1147
- clean_dcache_guest_page (pfn , PAGE_SIZE );
1148
- handle_hva_to_gpa (kvm , hva , end , & kvm_set_spte_handler , & pfn );
1149
- return 0 ;
1150
- }
1151
1116
1152
- static int kvm_age_hva_handler (struct kvm * kvm , gpa_t gpa , u64 size , void * data )
1153
- {
1154
- pte_t pte ;
1155
- kvm_pte_t kpte ;
1156
1117
1157
1118
WARN_ON (size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE );
1158
- kpte = kvm_pgtable_stage2_mkold (kvm -> arch .mmu .pgt , gpa );
1119
+
1120
+ kpte = kvm_pgtable_stage2_mkold (kvm -> arch .mmu .pgt ,
1121
+ info -> start << PAGE_SHIFT );
1159
1122
pte = __pte (kpte );
1160
1123
return pte_valid (pte ) && pte_young (pte );
1161
1124
}
1162
1125
1163
- static int kvm_test_age_hva_handler (struct kvm * kvm , gpa_t gpa , u64 size , void * data )
1164
- {
1165
- WARN_ON (size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE );
1166
- return kvm_pgtable_stage2_is_young (kvm -> arch .mmu .pgt , gpa );
1167
- }
1168
-
1169
- int kvm_age_hva (struct kvm * kvm , unsigned long start , unsigned long end )
1170
- {
1171
- if (!kvm -> arch .mmu .pgt )
1172
- return 0 ;
1173
-
1174
- return handle_hva_to_gpa (kvm , start , end , kvm_age_hva_handler , NULL );
1175
- }
1176
-
1177
- int kvm_test_age_hva (struct kvm * kvm , unsigned long hva )
1126
+ bool kvm_test_age_gfn (struct kvm * kvm , struct kvm_gfn_info * info )
1178
1127
{
1179
1128
if (!kvm -> arch .mmu .pgt )
1180
1129
return 0 ;
1181
1130
1182
- return handle_hva_to_gpa (kvm , hva , hva + PAGE_SIZE ,
1183
- kvm_test_age_hva_handler , NULL );
1131
+ return kvm_pgtable_stage2_is_young (kvm -> arch . mmu . pgt ,
1132
+ info -> start << PAGE_SHIFT );
1184
1133
}
1185
1134
1186
1135
phys_addr_t kvm_mmu_get_httbr (void )
0 commit comments