@@ -488,22 +488,6 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
488
488
return 0 ;
489
489
}
490
490
491
- static void iommu_flush_complete (struct protection_domain * domain )
492
- {
493
- int i ;
494
-
495
- for (i = 0 ; i < amd_iommus_present ; ++ i ) {
496
- if (!domain -> dev_iommu [i ])
497
- continue ;
498
-
499
- /*
500
- * Devices of this domain are behind this IOMMU
501
- * We need to wait for completion of all commands.
502
- */
503
- iommu_completion_wait (amd_iommus [i ]);
504
- }
505
- }
506
-
507
491
/*
508
492
* Command send function for invalidating a device table entry
509
493
*/
@@ -526,8 +510,8 @@ static int iommu_flush_device(struct device *dev)
526
510
* It invalidates a single PTE if the range to flush is within a single
527
511
* page. Otherwise it flushes the whole TLB of the IOMMU.
528
512
*/
529
- static void __iommu_flush_pages (struct protection_domain * domain ,
530
- u64 address , size_t size , int pde )
513
+ static void __domain_flush_pages (struct protection_domain * domain ,
514
+ u64 address , size_t size , int pde )
531
515
{
532
516
struct iommu_cmd cmd ;
533
517
int ret = 0 , i ;
@@ -548,29 +532,45 @@ static void __iommu_flush_pages(struct protection_domain *domain,
548
532
WARN_ON (ret );
549
533
}
550
534
551
- static void iommu_flush_pages (struct protection_domain * domain ,
552
- u64 address , size_t size )
535
+ static void domain_flush_pages (struct protection_domain * domain ,
536
+ u64 address , size_t size )
553
537
{
554
- __iommu_flush_pages (domain , address , size , 0 );
538
+ __domain_flush_pages (domain , address , size , 0 );
555
539
}
556
540
557
541
/* Flush the whole IO/TLB for a given protection domain */
558
- static void iommu_flush_tlb (struct protection_domain * domain )
542
+ static void domain_flush_tlb (struct protection_domain * domain )
559
543
{
560
- __iommu_flush_pages (domain , 0 , CMD_INV_IOMMU_ALL_PAGES_ADDRESS , 0 );
544
+ __domain_flush_pages (domain , 0 , CMD_INV_IOMMU_ALL_PAGES_ADDRESS , 0 );
561
545
}
562
546
563
547
/* Flush the whole IO/TLB for a given protection domain - including PDE */
564
- static void iommu_flush_tlb_pde (struct protection_domain * domain )
548
+ static void domain_flush_tlb_pde (struct protection_domain * domain )
565
549
{
566
- __iommu_flush_pages (domain , 0 , CMD_INV_IOMMU_ALL_PAGES_ADDRESS , 1 );
550
+ __domain_flush_pages (domain , 0 , CMD_INV_IOMMU_ALL_PAGES_ADDRESS , 1 );
551
+ }
552
+
553
+ static void domain_flush_complete (struct protection_domain * domain )
554
+ {
555
+ int i ;
556
+
557
+ for (i = 0 ; i < amd_iommus_present ; ++ i ) {
558
+ if (!domain -> dev_iommu [i ])
559
+ continue ;
560
+
561
+ /*
562
+ * Devices of this domain are behind this IOMMU
563
+ * We need to wait for completion of all commands.
564
+ */
565
+ iommu_completion_wait (amd_iommus [i ]);
566
+ }
567
567
}
568
568
569
569
570
570
/*
571
571
* This function flushes the DTEs for all devices in domain
572
572
*/
573
- static void iommu_flush_domain_devices (struct protection_domain * domain )
573
+ static void domain_flush_devices (struct protection_domain * domain )
574
574
{
575
575
struct iommu_dev_data * dev_data ;
576
576
unsigned long flags ;
@@ -591,8 +591,8 @@ static void iommu_flush_all_domain_devices(void)
591
591
spin_lock_irqsave (& amd_iommu_pd_lock , flags );
592
592
593
593
list_for_each_entry (domain , & amd_iommu_pd_list , list ) {
594
- iommu_flush_domain_devices (domain );
595
- iommu_flush_complete (domain );
594
+ domain_flush_devices (domain );
595
+ domain_flush_complete (domain );
596
596
}
597
597
598
598
spin_unlock_irqrestore (& amd_iommu_pd_lock , flags );
@@ -616,8 +616,8 @@ void amd_iommu_flush_all_domains(void)
616
616
617
617
list_for_each_entry (domain , & amd_iommu_pd_list , list ) {
618
618
spin_lock (& domain -> lock );
619
- iommu_flush_tlb_pde (domain );
620
- iommu_flush_complete (domain );
619
+ domain_flush_tlb_pde (domain );
620
+ domain_flush_complete (domain );
621
621
spin_unlock (& domain -> lock );
622
622
}
623
623
@@ -1480,7 +1480,7 @@ static int attach_device(struct device *dev,
1480
1480
* left the caches in the IOMMU dirty. So we have to flush
1481
1481
* here to evict all dirty stuff.
1482
1482
*/
1483
- iommu_flush_tlb_pde (domain );
1483
+ domain_flush_tlb_pde (domain );
1484
1484
1485
1485
return ret ;
1486
1486
}
@@ -1693,8 +1693,9 @@ static void update_domain(struct protection_domain *domain)
1693
1693
return ;
1694
1694
1695
1695
update_device_table (domain );
1696
- iommu_flush_domain_devices (domain );
1697
- iommu_flush_tlb_pde (domain );
1696
+
1697
+ domain_flush_devices (domain );
1698
+ domain_flush_tlb_pde (domain );
1698
1699
1699
1700
domain -> updated = false;
1700
1701
}
@@ -1853,10 +1854,10 @@ static dma_addr_t __map_single(struct device *dev,
1853
1854
ADD_STATS_COUNTER (alloced_io_mem , size );
1854
1855
1855
1856
if (unlikely (dma_dom -> need_flush && !amd_iommu_unmap_flush )) {
1856
- iommu_flush_tlb (& dma_dom -> domain );
1857
+ domain_flush_tlb (& dma_dom -> domain );
1857
1858
dma_dom -> need_flush = false;
1858
1859
} else if (unlikely (amd_iommu_np_cache ))
1859
- iommu_flush_pages (& dma_dom -> domain , address , size );
1860
+ domain_flush_pages (& dma_dom -> domain , address , size );
1860
1861
1861
1862
out :
1862
1863
return address ;
@@ -1905,7 +1906,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1905
1906
dma_ops_free_addresses (dma_dom , dma_addr , pages );
1906
1907
1907
1908
if (amd_iommu_unmap_flush || dma_dom -> need_flush ) {
1908
- iommu_flush_pages (& dma_dom -> domain , flush_addr , size );
1909
+ domain_flush_pages (& dma_dom -> domain , flush_addr , size );
1909
1910
dma_dom -> need_flush = false;
1910
1911
}
1911
1912
}
@@ -1941,7 +1942,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1941
1942
if (addr == DMA_ERROR_CODE )
1942
1943
goto out ;
1943
1944
1944
- iommu_flush_complete (domain );
1945
+ domain_flush_complete (domain );
1945
1946
1946
1947
out :
1947
1948
spin_unlock_irqrestore (& domain -> lock , flags );
@@ -1968,7 +1969,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1968
1969
1969
1970
__unmap_single (domain -> priv , dma_addr , size , dir );
1970
1971
1971
- iommu_flush_complete (domain );
1972
+ domain_flush_complete (domain );
1972
1973
1973
1974
spin_unlock_irqrestore (& domain -> lock , flags );
1974
1975
}
@@ -2033,7 +2034,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2033
2034
goto unmap ;
2034
2035
}
2035
2036
2036
- iommu_flush_complete (domain );
2037
+ domain_flush_complete (domain );
2037
2038
2038
2039
out :
2039
2040
spin_unlock_irqrestore (& domain -> lock , flags );
@@ -2079,7 +2080,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2079
2080
s -> dma_address = s -> dma_length = 0 ;
2080
2081
}
2081
2082
2082
- iommu_flush_complete (domain );
2083
+ domain_flush_complete (domain );
2083
2084
2084
2085
spin_unlock_irqrestore (& domain -> lock , flags );
2085
2086
}
@@ -2129,7 +2130,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
2129
2130
goto out_free ;
2130
2131
}
2131
2132
2132
- iommu_flush_complete (domain );
2133
+ domain_flush_complete (domain );
2133
2134
2134
2135
spin_unlock_irqrestore (& domain -> lock , flags );
2135
2136
@@ -2161,7 +2162,7 @@ static void free_coherent(struct device *dev, size_t size,
2161
2162
2162
2163
__unmap_single (domain -> priv , dma_addr , size , DMA_BIDIRECTIONAL );
2163
2164
2164
- iommu_flush_complete (domain );
2165
+ domain_flush_complete (domain );
2165
2166
2166
2167
spin_unlock_irqrestore (& domain -> lock , flags );
2167
2168
@@ -2471,7 +2472,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2471
2472
unmap_size = iommu_unmap_page (domain , iova , page_size );
2472
2473
mutex_unlock (& domain -> api_lock );
2473
2474
2474
- iommu_flush_tlb_pde (domain );
2475
+ domain_flush_tlb_pde (domain );
2475
2476
2476
2477
return get_order (unmap_size );
2477
2478
}
0 commit comments