@@ -95,6 +95,11 @@ struct vring_virtqueue {
95
95
/* How to notify other side. FIXME: commonalize hcalls! */
96
96
bool (* notify )(struct virtqueue * vq );
97
97
98
+ /* DMA, allocation, and size information */
99
+ bool we_own_ring ;
100
+ size_t queue_size_in_bytes ;
101
+ dma_addr_t queue_dma_addr ;
102
+
98
103
#ifdef DEBUG
99
104
/* They're supposed to lock for us. */
100
105
unsigned int in_use ;
@@ -878,36 +883,31 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
878
883
}
879
884
EXPORT_SYMBOL_GPL (vring_interrupt );
880
885
881
- struct virtqueue * vring_new_virtqueue (unsigned int index ,
882
- unsigned int num ,
883
- unsigned int vring_align ,
884
- struct virtio_device * vdev ,
885
- bool weak_barriers ,
886
- void * pages ,
887
- bool (* notify )(struct virtqueue * ),
888
- void (* callback )(struct virtqueue * ),
889
- const char * name )
886
+ struct virtqueue * __vring_new_virtqueue (unsigned int index ,
887
+ struct vring vring ,
888
+ struct virtio_device * vdev ,
889
+ bool weak_barriers ,
890
+ bool (* notify )(struct virtqueue * ),
891
+ void (* callback )(struct virtqueue * ),
892
+ const char * name )
890
893
{
891
- struct vring_virtqueue * vq ;
892
894
unsigned int i ;
895
+ struct vring_virtqueue * vq ;
893
896
894
- /* We assume num is a power of 2. */
895
- if (num & (num - 1 )) {
896
- dev_warn (& vdev -> dev , "Bad virtqueue length %u\n" , num );
897
- return NULL ;
898
- }
899
-
900
- vq = kmalloc (sizeof (* vq ) + num * sizeof (struct vring_desc_state ),
897
+ vq = kmalloc (sizeof (* vq ) + vring .num * sizeof (struct vring_desc_state ),
901
898
GFP_KERNEL );
902
899
if (!vq )
903
900
return NULL ;
904
901
905
- vring_init ( & vq -> vring , num , pages , vring_align ) ;
902
+ vq -> vring = vring ;
906
903
vq -> vq .callback = callback ;
907
904
vq -> vq .vdev = vdev ;
908
905
vq -> vq .name = name ;
909
- vq -> vq .num_free = num ;
906
+ vq -> vq .num_free = vring . num ;
910
907
vq -> vq .index = index ;
908
+ vq -> we_own_ring = false;
909
+ vq -> queue_dma_addr = 0 ;
910
+ vq -> queue_size_in_bytes = 0 ;
911
911
vq -> notify = notify ;
912
912
vq -> weak_barriers = weak_barriers ;
913
913
vq -> broken = false;
@@ -932,18 +932,145 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
932
932
933
933
/* Put everything in free lists. */
934
934
vq -> free_head = 0 ;
935
- for (i = 0 ; i < num - 1 ; i ++ )
935
+ for (i = 0 ; i < vring . num - 1 ; i ++ )
936
936
vq -> vring .desc [i ].next = cpu_to_virtio16 (vdev , i + 1 );
937
- memset (vq -> desc_state , 0 , num * sizeof (struct vring_desc_state ));
937
+ memset (vq -> desc_state , 0 , vring . num * sizeof (struct vring_desc_state ));
938
938
939
939
return & vq -> vq ;
940
940
}
941
+ EXPORT_SYMBOL_GPL (__vring_new_virtqueue );
942
+
943
+ static void * vring_alloc_queue (struct virtio_device * vdev , size_t size ,
944
+ dma_addr_t * dma_handle , gfp_t flag )
945
+ {
946
+ if (vring_use_dma_api (vdev )) {
947
+ return dma_alloc_coherent (vdev -> dev .parent , size ,
948
+ dma_handle , flag );
949
+ } else {
950
+ void * queue = alloc_pages_exact (PAGE_ALIGN (size ), flag );
951
+ if (queue ) {
952
+ phys_addr_t phys_addr = virt_to_phys (queue );
953
+ * dma_handle = (dma_addr_t )phys_addr ;
954
+
955
+ /*
956
+ * Sanity check: make sure we dind't truncate
957
+ * the address. The only arches I can find that
958
+ * have 64-bit phys_addr_t but 32-bit dma_addr_t
959
+ * are certain non-highmem MIPS and x86
960
+ * configurations, but these configurations
961
+ * should never allocate physical pages above 32
962
+ * bits, so this is fine. Just in case, throw a
963
+ * warning and abort if we end up with an
964
+ * unrepresentable address.
965
+ */
966
+ if (WARN_ON_ONCE (* dma_handle != phys_addr )) {
967
+ free_pages_exact (queue , PAGE_ALIGN (size ));
968
+ return NULL ;
969
+ }
970
+ }
971
+ return queue ;
972
+ }
973
+ }
974
+
975
+ static void vring_free_queue (struct virtio_device * vdev , size_t size ,
976
+ void * queue , dma_addr_t dma_handle )
977
+ {
978
+ if (vring_use_dma_api (vdev )) {
979
+ dma_free_coherent (vdev -> dev .parent , size , queue , dma_handle );
980
+ } else {
981
+ free_pages_exact (queue , PAGE_ALIGN (size ));
982
+ }
983
+ }
984
+
985
+ struct virtqueue * vring_create_virtqueue (
986
+ unsigned int index ,
987
+ unsigned int num ,
988
+ unsigned int vring_align ,
989
+ struct virtio_device * vdev ,
990
+ bool weak_barriers ,
991
+ bool may_reduce_num ,
992
+ bool (* notify )(struct virtqueue * ),
993
+ void (* callback )(struct virtqueue * ),
994
+ const char * name )
995
+ {
996
+ struct virtqueue * vq ;
997
+ void * queue ;
998
+ dma_addr_t dma_addr ;
999
+ size_t queue_size_in_bytes ;
1000
+ struct vring vring ;
1001
+
1002
+ /* We assume num is a power of 2. */
1003
+ if (num & (num - 1 )) {
1004
+ dev_warn (& vdev -> dev , "Bad virtqueue length %u\n" , num );
1005
+ return NULL ;
1006
+ }
1007
+
1008
+ /* TODO: allocate each queue chunk individually */
1009
+ for (; num && vring_size (num , vring_align ) > PAGE_SIZE ; num /= 2 ) {
1010
+ queue = vring_alloc_queue (vdev , vring_size (num , vring_align ),
1011
+ & dma_addr ,
1012
+ GFP_KERNEL |__GFP_NOWARN |__GFP_ZERO );
1013
+ if (queue )
1014
+ break ;
1015
+ }
1016
+
1017
+ if (!num )
1018
+ return NULL ;
1019
+
1020
+ if (!queue ) {
1021
+ /* Try to get a single page. You are my only hope! */
1022
+ queue = vring_alloc_queue (vdev , vring_size (num , vring_align ),
1023
+ & dma_addr , GFP_KERNEL |__GFP_ZERO );
1024
+ }
1025
+ if (!queue )
1026
+ return NULL ;
1027
+
1028
+ queue_size_in_bytes = vring_size (num , vring_align );
1029
+ vring_init (& vring , num , queue , vring_align );
1030
+
1031
+ vq = __vring_new_virtqueue (index , vring , vdev , weak_barriers ,
1032
+ notify , callback , name );
1033
+ if (!vq ) {
1034
+ vring_free_queue (vdev , queue_size_in_bytes , queue ,
1035
+ dma_addr );
1036
+ return NULL ;
1037
+ }
1038
+
1039
+ to_vvq (vq )-> queue_dma_addr = dma_addr ;
1040
+ to_vvq (vq )-> queue_size_in_bytes = queue_size_in_bytes ;
1041
+ to_vvq (vq )-> we_own_ring = true;
1042
+
1043
+ return vq ;
1044
+ }
1045
+ EXPORT_SYMBOL_GPL (vring_create_virtqueue );
1046
+
1047
+ struct virtqueue * vring_new_virtqueue (unsigned int index ,
1048
+ unsigned int num ,
1049
+ unsigned int vring_align ,
1050
+ struct virtio_device * vdev ,
1051
+ bool weak_barriers ,
1052
+ void * pages ,
1053
+ bool (* notify )(struct virtqueue * vq ),
1054
+ void (* callback )(struct virtqueue * vq ),
1055
+ const char * name )
1056
+ {
1057
+ struct vring vring ;
1058
+ vring_init (& vring , num , pages , vring_align );
1059
+ return __vring_new_virtqueue (index , vring , vdev , weak_barriers ,
1060
+ notify , callback , name );
1061
+ }
941
1062
EXPORT_SYMBOL_GPL (vring_new_virtqueue );
942
1063
943
- void vring_del_virtqueue (struct virtqueue * vq )
1064
+ void vring_del_virtqueue (struct virtqueue * _vq )
944
1065
{
945
- list_del (& vq -> list );
946
- kfree (to_vvq (vq ));
1066
+ struct vring_virtqueue * vq = to_vvq (_vq );
1067
+
1068
+ if (vq -> we_own_ring ) {
1069
+ vring_free_queue (vq -> vq .vdev , vq -> queue_size_in_bytes ,
1070
+ vq -> vring .desc , vq -> queue_dma_addr );
1071
+ }
1072
+ list_del (& _vq -> list );
1073
+ kfree (vq );
947
1074
}
948
1075
EXPORT_SYMBOL_GPL (vring_del_virtqueue );
949
1076
@@ -1007,20 +1134,42 @@ void virtio_break_device(struct virtio_device *dev)
1007
1134
}
1008
1135
EXPORT_SYMBOL_GPL (virtio_break_device );
1009
1136
1010
- void * virtqueue_get_avail (struct virtqueue * _vq )
1137
+ dma_addr_t virtqueue_get_desc_addr (struct virtqueue * _vq )
1011
1138
{
1012
1139
struct vring_virtqueue * vq = to_vvq (_vq );
1013
1140
1014
- return vq -> vring .avail ;
1141
+ BUG_ON (!vq -> we_own_ring );
1142
+
1143
+ return vq -> queue_dma_addr ;
1015
1144
}
1016
- EXPORT_SYMBOL_GPL (virtqueue_get_avail );
1145
+ EXPORT_SYMBOL_GPL (virtqueue_get_desc_addr );
1017
1146
1018
- void * virtqueue_get_used (struct virtqueue * _vq )
1147
+ dma_addr_t virtqueue_get_avail_addr (struct virtqueue * _vq )
1019
1148
{
1020
1149
struct vring_virtqueue * vq = to_vvq (_vq );
1021
1150
1022
- return vq -> vring .used ;
1151
+ BUG_ON (!vq -> we_own_ring );
1152
+
1153
+ return vq -> queue_dma_addr +
1154
+ ((char * )vq -> vring .avail - (char * )vq -> vring .desc );
1155
+ }
1156
+ EXPORT_SYMBOL_GPL (virtqueue_get_avail_addr );
1157
+
1158
+ dma_addr_t virtqueue_get_used_addr (struct virtqueue * _vq )
1159
+ {
1160
+ struct vring_virtqueue * vq = to_vvq (_vq );
1161
+
1162
+ BUG_ON (!vq -> we_own_ring );
1163
+
1164
+ return vq -> queue_dma_addr +
1165
+ ((char * )vq -> vring .used - (char * )vq -> vring .desc );
1166
+ }
1167
+ EXPORT_SYMBOL_GPL (virtqueue_get_used_addr );
1168
+
1169
+ const struct vring * virtqueue_get_vring (struct virtqueue * vq )
1170
+ {
1171
+ return & to_vvq (vq )-> vring ;
1023
1172
}
1024
- EXPORT_SYMBOL_GPL (virtqueue_get_used );
1173
+ EXPORT_SYMBOL_GPL (virtqueue_get_vring );
1025
1174
1026
1175
MODULE_LICENSE ("GPL" );
0 commit comments