@@ -353,3 +353,128 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
353
353
}
354
354
}
355
355
356
+ static void __init init_iommu_from_pci (struct amd_iommu * iommu )
357
+ {
358
+ int bus = PCI_BUS (iommu -> devid );
359
+ int dev = PCI_SLOT (iommu -> devid );
360
+ int fn = PCI_FUNC (iommu -> devid );
361
+ int cap_ptr = iommu -> cap_ptr ;
362
+ u32 range ;
363
+
364
+ iommu -> cap = read_pci_config (bus , dev , fn , cap_ptr + MMIO_CAP_HDR_OFFSET );
365
+
366
+ range = read_pci_config (bus , dev , fn , cap_ptr + MMIO_RANGE_OFFSET );
367
+ iommu -> first_device = DEVID (MMIO_GET_BUS (range ), MMIO_GET_FD (range ));
368
+ iommu -> last_device = DEVID (MMIO_GET_BUS (range ), MMIO_GET_LD (range ));
369
+ }
370
+
371
+ static void __init init_iommu_from_acpi (struct amd_iommu * iommu ,
372
+ struct ivhd_header * h )
373
+ {
374
+ u8 * p = (u8 * )h ;
375
+ u8 * end = p , flags = 0 ;
376
+ u16 dev_i , devid = 0 , devid_start = 0 , devid_to = 0 ;
377
+ u32 ext_flags = 0 ;
378
+ bool alias = 0 ;
379
+ struct ivhd_entry * e ;
380
+
381
+ /*
382
+ * First set the recommended feature enable bits from ACPI
383
+ * into the IOMMU control registers
384
+ */
385
+ h -> flags & IVHD_FLAG_HT_TUN_EN ?
386
+ iommu_feature_enable (iommu , CONTROL_HT_TUN_EN ) :
387
+ iommu_feature_disable (iommu , CONTROL_HT_TUN_EN );
388
+
389
+ h -> flags & IVHD_FLAG_PASSPW_EN ?
390
+ iommu_feature_enable (iommu , CONTROL_PASSPW_EN ) :
391
+ iommu_feature_disable (iommu , CONTROL_PASSPW_EN );
392
+
393
+ h -> flags & IVHD_FLAG_RESPASSPW_EN ?
394
+ iommu_feature_enable (iommu , CONTROL_RESPASSPW_EN ) :
395
+ iommu_feature_disable (iommu , CONTROL_RESPASSPW_EN );
396
+
397
+ h -> flags & IVHD_FLAG_ISOC_EN ?
398
+ iommu_feature_enable (iommu , CONTROL_ISOC_EN ) :
399
+ iommu_feature_disable (iommu , CONTROL_ISOC_EN );
400
+
401
+ /*
402
+ * make IOMMU memory accesses cache coherent
403
+ */
404
+ iommu_feature_enable (iommu , CONTROL_COHERENT_EN );
405
+
406
+ /*
407
+ * Done. Now parse the device entries
408
+ */
409
+ p += sizeof (struct ivhd_header );
410
+ end += h -> length ;
411
+
412
+ while (p < end ) {
413
+ e = (struct ivhd_entry * )p ;
414
+ switch (e -> type ) {
415
+ case IVHD_DEV_ALL :
416
+ for (dev_i = iommu -> first_device ;
417
+ dev_i <= iommu -> last_device ; ++ dev_i )
418
+ set_dev_entry_from_acpi (dev_i , e -> flags , 0 );
419
+ break ;
420
+ case IVHD_DEV_SELECT :
421
+ devid = e -> devid ;
422
+ set_dev_entry_from_acpi (devid , e -> flags , 0 );
423
+ break ;
424
+ case IVHD_DEV_SELECT_RANGE_START :
425
+ devid_start = e -> devid ;
426
+ flags = e -> flags ;
427
+ ext_flags = 0 ;
428
+ alias = 0 ;
429
+ break ;
430
+ case IVHD_DEV_ALIAS :
431
+ devid = e -> devid ;
432
+ devid_to = e -> ext >> 8 ;
433
+ set_dev_entry_from_acpi (devid , e -> flags , 0 );
434
+ amd_iommu_alias_table [devid ] = devid_to ;
435
+ break ;
436
+ case IVHD_DEV_ALIAS_RANGE :
437
+ devid_start = e -> devid ;
438
+ flags = e -> flags ;
439
+ devid_to = e -> ext >> 8 ;
440
+ ext_flags = 0 ;
441
+ alias = 1 ;
442
+ break ;
443
+ case IVHD_DEV_EXT_SELECT :
444
+ devid = e -> devid ;
445
+ set_dev_entry_from_acpi (devid , e -> flags , e -> ext );
446
+ break ;
447
+ case IVHD_DEV_EXT_SELECT_RANGE :
448
+ devid_start = e -> devid ;
449
+ flags = e -> flags ;
450
+ ext_flags = e -> ext ;
451
+ alias = 0 ;
452
+ break ;
453
+ case IVHD_DEV_RANGE_END :
454
+ devid = e -> devid ;
455
+ for (dev_i = devid_start ; dev_i <= devid ; ++ dev_i ) {
456
+ if (alias )
457
+ amd_iommu_alias_table [dev_i ] = devid_to ;
458
+ set_dev_entry_from_acpi (
459
+ amd_iommu_alias_table [dev_i ],
460
+ flags , ext_flags );
461
+ }
462
+ break ;
463
+ default :
464
+ break ;
465
+ }
466
+
467
+ p += 0x04 << (e -> type >> 6 );
468
+ }
469
+ }
470
+
471
+ static int __init init_iommu_devices (struct amd_iommu * iommu )
472
+ {
473
+ u16 i ;
474
+
475
+ for (i = iommu -> first_device ; i <= iommu -> last_device ; ++ i )
476
+ set_iommu_for_device (iommu , i );
477
+
478
+ return 0 ;
479
+ }
480
+
0 commit comments