@@ -115,11 +115,17 @@ struct event_lpi_map {
115
115
u16 * col_map ;
116
116
irq_hw_number_t lpi_base ;
117
117
int nr_lpis ;
118
+ struct mutex vlpi_lock ;
119
+ struct its_vm * vm ;
120
+ struct its_vlpi_map * vlpi_maps ;
121
+ int nr_vlpis ;
118
122
};
119
123
120
124
/*
121
- * The ITS view of a device - belongs to an ITS, a collection, owns an
122
- * interrupt translation table, and a list of interrupts.
125
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
126
+ * translation table, and a list of interrupts. If it some of its
127
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
128
+ * indicates which one.
123
129
*/
124
130
struct its_device {
125
131
struct list_head entry ;
@@ -205,6 +211,21 @@ struct its_cmd_desc {
205
211
struct {
206
212
struct its_collection * col ;
207
213
} its_invall_cmd ;
214
+
215
+ struct {
216
+ struct its_vpe * vpe ;
217
+ struct its_device * dev ;
218
+ u32 virt_id ;
219
+ u32 event_id ;
220
+ bool db_enabled ;
221
+ } its_vmapti_cmd ;
222
+
223
+ struct {
224
+ struct its_vpe * vpe ;
225
+ struct its_device * dev ;
226
+ u32 event_id ;
227
+ bool db_enabled ;
228
+ } its_vmovi_cmd ;
208
229
};
209
230
};
210
231
@@ -221,6 +242,9 @@ struct its_cmd_block {
221
242
typedef struct its_collection * (* its_cmd_builder_t )(struct its_cmd_block * ,
222
243
struct its_cmd_desc * );
223
244
245
+ typedef struct its_vpe * (* its_cmd_vbuilder_t )(struct its_cmd_block * ,
246
+ struct its_cmd_desc * );
247
+
224
248
static void its_mask_encode (u64 * raw_cmd , u64 val , int h , int l )
225
249
{
226
250
u64 mask = GENMASK_ULL (h , l );
@@ -273,6 +297,26 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
273
297
its_mask_encode (& cmd -> raw_cmd [2 ], col , 15 , 0 );
274
298
}
275
299
300
+ static void its_encode_vpeid (struct its_cmd_block * cmd , u16 vpeid )
301
+ {
302
+ its_mask_encode (& cmd -> raw_cmd [1 ], vpeid , 47 , 32 );
303
+ }
304
+
305
+ static void its_encode_virt_id (struct its_cmd_block * cmd , u32 virt_id )
306
+ {
307
+ its_mask_encode (& cmd -> raw_cmd [2 ], virt_id , 31 , 0 );
308
+ }
309
+
310
+ static void its_encode_db_phys_id (struct its_cmd_block * cmd , u32 db_phys_id )
311
+ {
312
+ its_mask_encode (& cmd -> raw_cmd [2 ], db_phys_id , 63 , 32 );
313
+ }
314
+
315
+ static void its_encode_db_valid (struct its_cmd_block * cmd , bool db_valid )
316
+ {
317
+ its_mask_encode (& cmd -> raw_cmd [2 ], db_valid , 0 , 0 );
318
+ }
319
+
276
320
static inline void its_fixup_cmd (struct its_cmd_block * cmd )
277
321
{
278
322
/* Let's fixup BE commands */
@@ -431,6 +475,50 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
431
475
return NULL ;
432
476
}
433
477
478
+ static struct its_vpe * its_build_vmapti_cmd (struct its_cmd_block * cmd ,
479
+ struct its_cmd_desc * desc )
480
+ {
481
+ u32 db ;
482
+
483
+ if (desc -> its_vmapti_cmd .db_enabled )
484
+ db = desc -> its_vmapti_cmd .vpe -> vpe_db_lpi ;
485
+ else
486
+ db = 1023 ;
487
+
488
+ its_encode_cmd (cmd , GITS_CMD_VMAPTI );
489
+ its_encode_devid (cmd , desc -> its_vmapti_cmd .dev -> device_id );
490
+ its_encode_vpeid (cmd , desc -> its_vmapti_cmd .vpe -> vpe_id );
491
+ its_encode_event_id (cmd , desc -> its_vmapti_cmd .event_id );
492
+ its_encode_db_phys_id (cmd , db );
493
+ its_encode_virt_id (cmd , desc -> its_vmapti_cmd .virt_id );
494
+
495
+ its_fixup_cmd (cmd );
496
+
497
+ return desc -> its_vmapti_cmd .vpe ;
498
+ }
499
+
500
+ static struct its_vpe * its_build_vmovi_cmd (struct its_cmd_block * cmd ,
501
+ struct its_cmd_desc * desc )
502
+ {
503
+ u32 db ;
504
+
505
+ if (desc -> its_vmovi_cmd .db_enabled )
506
+ db = desc -> its_vmovi_cmd .vpe -> vpe_db_lpi ;
507
+ else
508
+ db = 1023 ;
509
+
510
+ its_encode_cmd (cmd , GITS_CMD_VMOVI );
511
+ its_encode_devid (cmd , desc -> its_vmovi_cmd .dev -> device_id );
512
+ its_encode_vpeid (cmd , desc -> its_vmovi_cmd .vpe -> vpe_id );
513
+ its_encode_event_id (cmd , desc -> its_vmovi_cmd .event_id );
514
+ its_encode_db_phys_id (cmd , db );
515
+ its_encode_db_valid (cmd , true);
516
+
517
+ its_fixup_cmd (cmd );
518
+
519
+ return desc -> its_vmovi_cmd .vpe ;
520
+ }
521
+
434
522
static u64 its_cmd_ptr_to_offset (struct its_node * its ,
435
523
struct its_cmd_block * ptr )
436
524
{
@@ -582,6 +670,18 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
582
670
static BUILD_SINGLE_CMD_FUNC (its_send_single_command , its_cmd_builder_t ,
583
671
struct its_collection , its_build_sync_cmd )
584
672
673
+ static void its_build_vsync_cmd (struct its_cmd_block * sync_cmd ,
674
+ struct its_vpe * sync_vpe )
675
+ {
676
+ its_encode_cmd (sync_cmd , GITS_CMD_VSYNC );
677
+ its_encode_vpeid (sync_cmd , sync_vpe -> vpe_id );
678
+
679
+ its_fixup_cmd (sync_cmd );
680
+ }
681
+
682
+ static BUILD_SINGLE_CMD_FUNC (its_send_single_vcommand , its_cmd_vbuilder_t ,
683
+ struct its_vpe , its_build_vsync_cmd )
684
+
585
685
static void its_send_int (struct its_device * dev , u32 event_id )
586
686
{
587
687
struct its_cmd_desc desc ;
@@ -675,6 +775,33 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
675
775
its_send_single_command (its , its_build_invall_cmd , & desc );
676
776
}
677
777
778
+ static void its_send_vmapti (struct its_device * dev , u32 id )
779
+ {
780
+ struct its_vlpi_map * map = & dev -> event_map .vlpi_maps [id ];
781
+ struct its_cmd_desc desc ;
782
+
783
+ desc .its_vmapti_cmd .vpe = map -> vpe ;
784
+ desc .its_vmapti_cmd .dev = dev ;
785
+ desc .its_vmapti_cmd .virt_id = map -> vintid ;
786
+ desc .its_vmapti_cmd .event_id = id ;
787
+ desc .its_vmapti_cmd .db_enabled = map -> db_enabled ;
788
+
789
+ its_send_single_vcommand (dev -> its , its_build_vmapti_cmd , & desc );
790
+ }
791
+
792
+ static void its_send_vmovi (struct its_device * dev , u32 id )
793
+ {
794
+ struct its_vlpi_map * map = & dev -> event_map .vlpi_maps [id ];
795
+ struct its_cmd_desc desc ;
796
+
797
+ desc .its_vmovi_cmd .vpe = map -> vpe ;
798
+ desc .its_vmovi_cmd .dev = dev ;
799
+ desc .its_vmovi_cmd .event_id = id ;
800
+ desc .its_vmovi_cmd .db_enabled = map -> db_enabled ;
801
+
802
+ its_send_single_vcommand (dev -> its , its_build_vmovi_cmd , & desc );
803
+ }
804
+
678
805
/*
679
806
* irqchip functions - assumes MSI, mostly.
680
807
*/
@@ -787,19 +914,135 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
787
914
return 0 ;
788
915
}
789
916
917
+ static int its_vlpi_map (struct irq_data * d , struct its_cmd_info * info )
918
+ {
919
+ struct its_device * its_dev = irq_data_get_irq_chip_data (d );
920
+ u32 event = its_get_event_id (d );
921
+ int ret = 0 ;
922
+
923
+ if (!info -> map )
924
+ return - EINVAL ;
925
+
926
+ mutex_lock (& its_dev -> event_map .vlpi_lock );
927
+
928
+ if (!its_dev -> event_map .vm ) {
929
+ struct its_vlpi_map * maps ;
930
+
931
+ maps = kzalloc (sizeof (* maps ) * its_dev -> event_map .nr_lpis ,
932
+ GFP_KERNEL );
933
+ if (!maps ) {
934
+ ret = - ENOMEM ;
935
+ goto out ;
936
+ }
937
+
938
+ its_dev -> event_map .vm = info -> map -> vm ;
939
+ its_dev -> event_map .vlpi_maps = maps ;
940
+ } else if (its_dev -> event_map .vm != info -> map -> vm ) {
941
+ ret = - EINVAL ;
942
+ goto out ;
943
+ }
944
+
945
+ /* Get our private copy of the mapping information */
946
+ its_dev -> event_map .vlpi_maps [event ] = * info -> map ;
947
+
948
+ if (irqd_is_forwarded_to_vcpu (d )) {
949
+ /* Already mapped, move it around */
950
+ its_send_vmovi (its_dev , event );
951
+ } else {
952
+ /* Drop the physical mapping */
953
+ its_send_discard (its_dev , event );
954
+
955
+ /* and install the virtual one */
956
+ its_send_vmapti (its_dev , event );
957
+ irqd_set_forwarded_to_vcpu (d );
958
+
959
+ /* Increment the number of VLPIs */
960
+ its_dev -> event_map .nr_vlpis ++ ;
961
+ }
962
+
963
+ out :
964
+ mutex_unlock (& its_dev -> event_map .vlpi_lock );
965
+ return ret ;
966
+ }
967
+
968
+ static int its_vlpi_get (struct irq_data * d , struct its_cmd_info * info )
969
+ {
970
+ struct its_device * its_dev = irq_data_get_irq_chip_data (d );
971
+ u32 event = its_get_event_id (d );
972
+ int ret = 0 ;
973
+
974
+ mutex_lock (& its_dev -> event_map .vlpi_lock );
975
+
976
+ if (!its_dev -> event_map .vm ||
977
+ !its_dev -> event_map .vlpi_maps [event ].vm ) {
978
+ ret = - EINVAL ;
979
+ goto out ;
980
+ }
981
+
982
+ /* Copy our mapping information to the incoming request */
983
+ * info -> map = its_dev -> event_map .vlpi_maps [event ];
984
+
985
+ out :
986
+ mutex_unlock (& its_dev -> event_map .vlpi_lock );
987
+ return ret ;
988
+ }
989
+
990
+ static int its_vlpi_unmap (struct irq_data * d )
991
+ {
992
+ struct its_device * its_dev = irq_data_get_irq_chip_data (d );
993
+ u32 event = its_get_event_id (d );
994
+ int ret = 0 ;
995
+
996
+ mutex_lock (& its_dev -> event_map .vlpi_lock );
997
+
998
+ if (!its_dev -> event_map .vm || !irqd_is_forwarded_to_vcpu (d )) {
999
+ ret = - EINVAL ;
1000
+ goto out ;
1001
+ }
1002
+
1003
+ /* Drop the virtual mapping */
1004
+ its_send_discard (its_dev , event );
1005
+
1006
+ /* and restore the physical one */
1007
+ irqd_clr_forwarded_to_vcpu (d );
1008
+ its_send_mapti (its_dev , d -> hwirq , event );
1009
+ lpi_update_config (d , 0xff , (LPI_PROP_DEFAULT_PRIO |
1010
+ LPI_PROP_ENABLED |
1011
+ LPI_PROP_GROUP1 ));
1012
+
1013
+ /*
1014
+ * Drop the refcount and make the device available again if
1015
+ * this was the last VLPI.
1016
+ */
1017
+ if (!-- its_dev -> event_map .nr_vlpis ) {
1018
+ its_dev -> event_map .vm = NULL ;
1019
+ kfree (its_dev -> event_map .vlpi_maps );
1020
+ }
1021
+
1022
+ out :
1023
+ mutex_unlock (& its_dev -> event_map .vlpi_lock );
1024
+ return ret ;
1025
+ }
1026
+
790
1027
static int its_irq_set_vcpu_affinity (struct irq_data * d , void * vcpu_info )
791
1028
{
792
1029
struct its_device * its_dev = irq_data_get_irq_chip_data (d );
793
1030
struct its_cmd_info * info = vcpu_info ;
794
1031
795
1032
/* Need a v4 ITS */
796
- if (!its_dev -> its -> is_v4 || ! info )
1033
+ if (!its_dev -> its -> is_v4 )
797
1034
return - EINVAL ;
798
1035
1036
+ /* Unmap request? */
1037
+ if (!info )
1038
+ return its_vlpi_unmap (d );
1039
+
799
1040
switch (info -> cmd_type ) {
800
1041
case MAP_VLPI :
1042
+ return its_vlpi_map (d , info );
801
1043
802
1044
case GET_VLPI :
1045
+ return its_vlpi_get (d , info );
803
1046
804
1047
case PROP_UPDATE_VLPI :
805
1048
case PROP_UPDATE_AND_INV_VLPI :
@@ -1518,6 +1761,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1518
1761
dev -> event_map .col_map = col_map ;
1519
1762
dev -> event_map .lpi_base = lpi_base ;
1520
1763
dev -> event_map .nr_lpis = nr_lpis ;
1764
+ mutex_init (& dev -> event_map .vlpi_lock );
1521
1765
dev -> device_id = dev_id ;
1522
1766
INIT_LIST_HEAD (& dev -> entry );
1523
1767
0 commit comments