@@ -400,22 +400,16 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
400
400
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT ;
401
401
unsigned offset = ((cr3 & (PAGE_SIZE - 1 )) >> 5 ) << 2 ;
402
402
int i ;
403
- u64 * pdpt ;
404
403
int ret ;
405
- struct page * page ;
406
404
u64 pdpte [ARRAY_SIZE (vcpu -> pdptrs )];
407
405
408
406
mutex_lock (& vcpu -> kvm -> lock );
409
- page = gfn_to_page (vcpu -> kvm , pdpt_gfn );
410
- if (!page ) {
407
+ ret = kvm_read_guest_page (vcpu -> kvm , pdpt_gfn , pdpte ,
408
+ offset * sizeof (u64 ), sizeof (pdpte ));
409
+ if (ret < 0 ) {
411
410
ret = 0 ;
412
411
goto out ;
413
412
}
414
-
415
- pdpt = kmap_atomic (page , KM_USER0 );
416
- memcpy (pdpte , pdpt + offset , sizeof (pdpte ));
417
- kunmap_atomic (pdpt , KM_USER0 );
418
-
419
413
for (i = 0 ; i < ARRAY_SIZE (pdpte ); ++ i ) {
420
414
if ((pdpte [i ] & 1 ) && (pdpte [i ] & 0xfffffff0000001e6ull )) {
421
415
ret = 0 ;
@@ -962,6 +956,127 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
962
956
}
963
957
EXPORT_SYMBOL_GPL (gfn_to_page );
964
958
959
+ static int next_segment (unsigned long len , int offset )
960
+ {
961
+ if (len > PAGE_SIZE - offset )
962
+ return PAGE_SIZE - offset ;
963
+ else
964
+ return len ;
965
+ }
966
+
967
+ int kvm_read_guest_page (struct kvm * kvm , gfn_t gfn , void * data , int offset ,
968
+ int len )
969
+ {
970
+ void * page_virt ;
971
+ struct page * page ;
972
+
973
+ page = gfn_to_page (kvm , gfn );
974
+ if (!page )
975
+ return - EFAULT ;
976
+ page_virt = kmap_atomic (page , KM_USER0 );
977
+
978
+ memcpy (data , page_virt + offset , len );
979
+
980
+ kunmap_atomic (page_virt , KM_USER0 );
981
+ return 0 ;
982
+ }
983
+ EXPORT_SYMBOL_GPL (kvm_read_guest_page );
984
+
985
+ int kvm_read_guest (struct kvm * kvm , gpa_t gpa , void * data , unsigned long len )
986
+ {
987
+ gfn_t gfn = gpa >> PAGE_SHIFT ;
988
+ int seg ;
989
+ int offset = offset_in_page (gpa );
990
+ int ret ;
991
+
992
+ while ((seg = next_segment (len , offset )) != 0 ) {
993
+ ret = kvm_read_guest_page (kvm , gfn , data , offset , seg );
994
+ if (ret < 0 )
995
+ return ret ;
996
+ offset = 0 ;
997
+ len -= seg ;
998
+ data += seg ;
999
+ ++ gfn ;
1000
+ }
1001
+ return 0 ;
1002
+ }
1003
+ EXPORT_SYMBOL_GPL (kvm_read_guest );
1004
+
1005
+ int kvm_write_guest_page (struct kvm * kvm , gfn_t gfn , const void * data ,
1006
+ int offset , int len )
1007
+ {
1008
+ void * page_virt ;
1009
+ struct page * page ;
1010
+
1011
+ page = gfn_to_page (kvm , gfn );
1012
+ if (!page )
1013
+ return - EFAULT ;
1014
+ page_virt = kmap_atomic (page , KM_USER0 );
1015
+
1016
+ memcpy (page_virt + offset , data , len );
1017
+
1018
+ kunmap_atomic (page_virt , KM_USER0 );
1019
+ mark_page_dirty (kvm , gfn );
1020
+ return 0 ;
1021
+ }
1022
+ EXPORT_SYMBOL_GPL (kvm_write_guest_page );
1023
+
1024
+ int kvm_write_guest (struct kvm * kvm , gpa_t gpa , const void * data ,
1025
+ unsigned long len )
1026
+ {
1027
+ gfn_t gfn = gpa >> PAGE_SHIFT ;
1028
+ int seg ;
1029
+ int offset = offset_in_page (gpa );
1030
+ int ret ;
1031
+
1032
+ while ((seg = next_segment (len , offset )) != 0 ) {
1033
+ ret = kvm_write_guest_page (kvm , gfn , data , offset , seg );
1034
+ if (ret < 0 )
1035
+ return ret ;
1036
+ offset = 0 ;
1037
+ len -= seg ;
1038
+ data += seg ;
1039
+ ++ gfn ;
1040
+ }
1041
+ return 0 ;
1042
+ }
1043
+
1044
+ int kvm_clear_guest_page (struct kvm * kvm , gfn_t gfn , int offset , int len )
1045
+ {
1046
+ void * page_virt ;
1047
+ struct page * page ;
1048
+
1049
+ page = gfn_to_page (kvm , gfn );
1050
+ if (!page )
1051
+ return - EFAULT ;
1052
+ page_virt = kmap_atomic (page , KM_USER0 );
1053
+
1054
+ memset (page_virt + offset , 0 , len );
1055
+
1056
+ kunmap_atomic (page_virt , KM_USER0 );
1057
+ return 0 ;
1058
+ }
1059
+ EXPORT_SYMBOL_GPL (kvm_clear_guest_page );
1060
+
1061
+ int kvm_clear_guest (struct kvm * kvm , gpa_t gpa , unsigned long len )
1062
+ {
1063
+ gfn_t gfn = gpa >> PAGE_SHIFT ;
1064
+ int seg ;
1065
+ int offset = offset_in_page (gpa );
1066
+ int ret ;
1067
+
1068
+ while ((seg = next_segment (len , offset )) != 0 ) {
1069
+ ret = kvm_clear_guest_page (kvm , gfn , offset , seg );
1070
+ if (ret < 0 )
1071
+ return ret ;
1072
+ offset = 0 ;
1073
+ len -= seg ;
1074
+ ++ gfn ;
1075
+ }
1076
+ return 0 ;
1077
+ }
1078
+ EXPORT_SYMBOL_GPL (kvm_clear_guest );
1079
+
965
1080
/* WARNING: Does not work on aliased pages. */
966
1081
void mark_page_dirty (struct kvm * kvm , gfn_t gfn )
967
1082
{
@@ -988,21 +1103,13 @@ int emulator_read_std(unsigned long addr,
988
1103
gpa_t gpa = vcpu -> mmu .gva_to_gpa (vcpu , addr );
989
1104
unsigned offset = addr & (PAGE_SIZE - 1 );
990
1105
unsigned tocopy = min (bytes , (unsigned )PAGE_SIZE - offset );
991
- unsigned long pfn ;
992
- struct page * page ;
993
- void * page_virt ;
1106
+ int ret ;
994
1107
995
1108
if (gpa == UNMAPPED_GVA )
996
1109
return X86EMUL_PROPAGATE_FAULT ;
997
- pfn = gpa >> PAGE_SHIFT ;
998
- page = gfn_to_page (vcpu -> kvm , pfn );
999
- if (!page )
1110
+ ret = kvm_read_guest (vcpu -> kvm , gpa , data , tocopy );
1111
+ if (ret < 0 )
1000
1112
return X86EMUL_UNHANDLEABLE ;
1001
- page_virt = kmap_atomic (page , KM_USER0 );
1002
-
1003
- memcpy (data , page_virt + offset , tocopy );
1004
-
1005
- kunmap_atomic (page_virt , KM_USER0 );
1006
1113
1007
1114
bytes -= tocopy ;
1008
1115
data += tocopy ;
@@ -1095,19 +1202,12 @@ static int emulator_read_emulated(unsigned long addr,
1095
1202
static int emulator_write_phys (struct kvm_vcpu * vcpu , gpa_t gpa ,
1096
1203
const void * val , int bytes )
1097
1204
{
1098
- struct page * page ;
1099
- void * virt ;
1205
+ int ret ;
1100
1206
1101
- if (((gpa + bytes - 1 ) >> PAGE_SHIFT ) != (gpa >> PAGE_SHIFT ))
1102
- return 0 ;
1103
- page = gfn_to_page (vcpu -> kvm , gpa >> PAGE_SHIFT );
1104
- if (!page )
1207
+ ret = kvm_write_guest (vcpu -> kvm , gpa , val , bytes );
1208
+ if (ret < 0 )
1105
1209
return 0 ;
1106
- mark_page_dirty (vcpu -> kvm , gpa >> PAGE_SHIFT );
1107
- virt = kmap_atomic (page , KM_USER0 );
1108
1210
kvm_mmu_pte_write (vcpu , gpa , val , bytes );
1109
- memcpy (virt + offset_in_page (gpa ), val , bytes );
1110
- kunmap_atomic (virt , KM_USER0 );
1111
1211
return 1 ;
1112
1212
}
1113
1213
0 commit comments