@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
57
57
#define VFIO_PCI_INDEX_TO_OFFSET (index ) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58
58
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
59
59
60
+ #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
61
+
60
62
#define OPREGION_SIGNATURE "IntelGraphicsMem"
61
63
62
64
struct vfio_region ;
@@ -76,6 +78,11 @@ struct vfio_region {
76
78
void * data ;
77
79
};
78
80
81
+ struct vfio_edid_region {
82
+ struct vfio_region_gfx_edid vfio_edid_regs ;
83
+ void * edid_blob ;
84
+ };
85
+
79
86
struct kvmgt_pgfn {
80
87
gfn_t gfn ;
81
88
struct hlist_node hnode ;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
427
434
.release = intel_vgpu_reg_release_opregion ,
428
435
};
429
436
437
+ static int handle_edid_regs (struct intel_vgpu * vgpu ,
438
+ struct vfio_edid_region * region , char * buf ,
439
+ size_t count , u16 offset , bool is_write )
440
+ {
441
+ struct vfio_region_gfx_edid * regs = & region -> vfio_edid_regs ;
442
+ unsigned int data ;
443
+
444
+ if (offset + count > sizeof (* regs ))
445
+ return - EINVAL ;
446
+
447
+ if (count != 4 )
448
+ return - EINVAL ;
449
+
450
+ if (is_write ) {
451
+ data = * ((unsigned int * )buf );
452
+ switch (offset ) {
453
+ case offsetof(struct vfio_region_gfx_edid , link_state ):
454
+ if (data == VFIO_DEVICE_GFX_LINK_STATE_UP ) {
455
+ if (!drm_edid_block_valid (
456
+ (u8 * )region -> edid_blob ,
457
+ 0 ,
458
+ true,
459
+ NULL )) {
460
+ gvt_vgpu_err ("invalid EDID blob\n" );
461
+ return - EINVAL ;
462
+ }
463
+ intel_gvt_ops -> emulate_hotplug (vgpu , true);
464
+ } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN )
465
+ intel_gvt_ops -> emulate_hotplug (vgpu , false);
466
+ else {
467
+ gvt_vgpu_err ("invalid EDID link state %d\n" ,
468
+ regs -> link_state );
469
+ return - EINVAL ;
470
+ }
471
+ regs -> link_state = data ;
472
+ break ;
473
+ case offsetof(struct vfio_region_gfx_edid , edid_size ):
474
+ if (data > regs -> edid_max_size ) {
475
+ gvt_vgpu_err ("EDID size is bigger than %d!\n" ,
476
+ regs -> edid_max_size );
477
+ return - EINVAL ;
478
+ }
479
+ regs -> edid_size = data ;
480
+ break ;
481
+ default :
482
+ /* read-only regs */
483
+ gvt_vgpu_err ("write read-only EDID region at offset %d\n" ,
484
+ offset );
485
+ return - EPERM ;
486
+ }
487
+ } else {
488
+ memcpy (buf , (char * )regs + offset , count );
489
+ }
490
+
491
+ return count ;
492
+ }
493
+
494
+ static int handle_edid_blob (struct vfio_edid_region * region , char * buf ,
495
+ size_t count , u16 offset , bool is_write )
496
+ {
497
+ if (offset + count > region -> vfio_edid_regs .edid_size )
498
+ return - EINVAL ;
499
+
500
+ if (is_write )
501
+ memcpy (region -> edid_blob + offset , buf , count );
502
+ else
503
+ memcpy (buf , region -> edid_blob + offset , count );
504
+
505
+ return count ;
506
+ }
507
+
508
+ static size_t intel_vgpu_reg_rw_edid (struct intel_vgpu * vgpu , char * buf ,
509
+ size_t count , loff_t * ppos , bool iswrite )
510
+ {
511
+ int ret ;
512
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX (* ppos ) -
513
+ VFIO_PCI_NUM_REGIONS ;
514
+ struct vfio_edid_region * region =
515
+ (struct vfio_edid_region * )vgpu -> vdev .region [i ].data ;
516
+ loff_t pos = * ppos & VFIO_PCI_OFFSET_MASK ;
517
+
518
+ if (pos < region -> vfio_edid_regs .edid_offset ) {
519
+ ret = handle_edid_regs (vgpu , region , buf , count , pos , iswrite );
520
+ } else {
521
+ pos -= EDID_BLOB_OFFSET ;
522
+ ret = handle_edid_blob (region , buf , count , pos , iswrite );
523
+ }
524
+
525
+ if (ret < 0 )
526
+ gvt_vgpu_err ("failed to access EDID region\n" );
527
+
528
+ return ret ;
529
+ }
530
+
531
+ static void intel_vgpu_reg_release_edid (struct intel_vgpu * vgpu ,
532
+ struct vfio_region * region )
533
+ {
534
+ kfree (region -> data );
535
+ }
536
+
537
+ static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
538
+ .rw = intel_vgpu_reg_rw_edid ,
539
+ .release = intel_vgpu_reg_release_edid ,
540
+ };
541
+
430
542
static int intel_vgpu_register_reg (struct intel_vgpu * vgpu ,
431
543
unsigned int type , unsigned int subtype ,
432
544
const struct intel_vgpu_regops * ops ,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
493
605
return ret ;
494
606
}
495
607
608
+ static int kvmgt_set_edid (void * p_vgpu , int port_num )
609
+ {
610
+ struct intel_vgpu * vgpu = (struct intel_vgpu * )p_vgpu ;
611
+ struct intel_vgpu_port * port = intel_vgpu_port (vgpu , port_num );
612
+ struct vfio_edid_region * base ;
613
+ int ret ;
614
+
615
+ base = kzalloc (sizeof (* base ), GFP_KERNEL );
616
+ if (!base )
617
+ return - ENOMEM ;
618
+
619
+ /* TODO: Add multi-port and EDID extension block support */
620
+ base -> vfio_edid_regs .edid_offset = EDID_BLOB_OFFSET ;
621
+ base -> vfio_edid_regs .edid_max_size = EDID_SIZE ;
622
+ base -> vfio_edid_regs .edid_size = EDID_SIZE ;
623
+ base -> vfio_edid_regs .max_xres = vgpu_edid_xres (port -> id );
624
+ base -> vfio_edid_regs .max_yres = vgpu_edid_yres (port -> id );
625
+ base -> edid_blob = port -> edid -> edid_block ;
626
+
627
+ ret = intel_vgpu_register_reg (vgpu ,
628
+ VFIO_REGION_TYPE_GFX ,
629
+ VFIO_REGION_SUBTYPE_GFX_EDID ,
630
+ & intel_vgpu_regops_edid , EDID_SIZE ,
631
+ VFIO_REGION_INFO_FLAG_READ |
632
+ VFIO_REGION_INFO_FLAG_WRITE |
633
+ VFIO_REGION_INFO_FLAG_CAPS , base );
634
+
635
+ return ret ;
636
+ }
637
+
496
638
static void kvmgt_put_vfio_device (void * vgpu )
497
639
{
498
640
if (WARN_ON (!((struct intel_vgpu * )vgpu )-> vdev .vfio_device ))
@@ -1874,6 +2016,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
1874
2016
.dma_map_guest_page = kvmgt_dma_map_guest_page ,
1875
2017
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page ,
1876
2018
.set_opregion = kvmgt_set_opregion ,
2019
+ .set_edid = kvmgt_set_edid ,
1877
2020
.get_vfio_device = kvmgt_get_vfio_device ,
1878
2021
.put_vfio_device = kvmgt_put_vfio_device ,
1879
2022
.is_valid_gfn = kvmgt_is_valid_gfn ,
0 commit comments