@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
37
37
phys_addr_t phys ;
38
38
};
39
39
40
+ enum iommu_dma_cookie_type {
41
+ IOMMU_DMA_IOVA_COOKIE ,
42
+ IOMMU_DMA_MSI_COOKIE ,
43
+ };
44
+
40
45
struct iommu_dma_cookie {
41
- struct iova_domain iovad ;
42
- struct list_head msi_page_list ;
43
- spinlock_t msi_lock ;
46
+ enum iommu_dma_cookie_type type ;
47
+ union {
48
+ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49
+ struct iova_domain iovad ;
50
+ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51
+ dma_addr_t msi_iova ;
52
+ };
53
+ struct list_head msi_page_list ;
54
+ spinlock_t msi_lock ;
44
55
};
45
56
57
+ static inline size_t cookie_msi_granule (struct iommu_dma_cookie * cookie )
58
+ {
59
+ if (cookie -> type == IOMMU_DMA_IOVA_COOKIE )
60
+ return cookie -> iovad .granule ;
61
+ return PAGE_SIZE ;
62
+ }
63
+
46
64
static inline struct iova_domain * cookie_iovad (struct iommu_domain * domain )
47
65
{
48
- return & ((struct iommu_dma_cookie * )domain -> iova_cookie )-> iovad ;
66
+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
67
+
68
+ if (cookie -> type == IOMMU_DMA_IOVA_COOKIE )
69
+ return & cookie -> iovad ;
70
+ return NULL ;
71
+ }
72
+
73
+ static struct iommu_dma_cookie * cookie_alloc (enum iommu_dma_cookie_type type )
74
+ {
75
+ struct iommu_dma_cookie * cookie ;
76
+
77
+ cookie = kzalloc (sizeof (* cookie ), GFP_KERNEL );
78
+ if (cookie ) {
79
+ spin_lock_init (& cookie -> msi_lock );
80
+ INIT_LIST_HEAD (& cookie -> msi_page_list );
81
+ cookie -> type = type ;
82
+ }
83
+ return cookie ;
49
84
}
50
85
51
86
int iommu_dma_init (void )
@@ -61,26 +96,54 @@ int iommu_dma_init(void)
61
96
* callback when domain->type == IOMMU_DOMAIN_DMA.
62
97
*/
63
98
int iommu_get_dma_cookie (struct iommu_domain * domain )
99
+ {
100
+ if (domain -> iova_cookie )
101
+ return - EEXIST ;
102
+
103
+ domain -> iova_cookie = cookie_alloc (IOMMU_DMA_IOVA_COOKIE );
104
+ if (!domain -> iova_cookie )
105
+ return - ENOMEM ;
106
+
107
+ return 0 ;
108
+ }
109
+ EXPORT_SYMBOL (iommu_get_dma_cookie );
110
+
111
+ /**
112
+ * iommu_get_msi_cookie - Acquire just MSI remapping resources
113
+ * @domain: IOMMU domain to prepare
114
+ * @base: Start address of IOVA region for MSI mappings
115
+ *
116
+ * Users who manage their own IOVA allocation and do not want DMA API support,
117
+ * but would still like to take advantage of automatic MSI remapping, can use
118
+ * this to initialise their own domain appropriately. Users should reserve a
119
+ * contiguous IOVA region, starting at @base, large enough to accommodate the
120
+ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
121
+ * used by the devices attached to @domain.
122
+ */
123
+ int iommu_get_msi_cookie (struct iommu_domain * domain , dma_addr_t base )
64
124
{
65
125
struct iommu_dma_cookie * cookie ;
66
126
127
+ if (domain -> type != IOMMU_DOMAIN_UNMANAGED )
128
+ return - EINVAL ;
129
+
67
130
if (domain -> iova_cookie )
68
131
return - EEXIST ;
69
132
70
- cookie = kzalloc ( sizeof ( * cookie ), GFP_KERNEL );
133
+ cookie = cookie_alloc ( IOMMU_DMA_MSI_COOKIE );
71
134
if (!cookie )
72
135
return - ENOMEM ;
73
136
74
- spin_lock_init (& cookie -> msi_lock );
75
- INIT_LIST_HEAD (& cookie -> msi_page_list );
137
+ cookie -> msi_iova = base ;
76
138
domain -> iova_cookie = cookie ;
77
139
return 0 ;
78
140
}
79
- EXPORT_SYMBOL (iommu_get_dma_cookie );
141
+ EXPORT_SYMBOL (iommu_get_msi_cookie );
80
142
81
143
/**
82
144
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
83
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
145
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
146
+ * iommu_get_msi_cookie()
84
147
*
85
148
* IOMMU drivers should normally call this from their domain_free callback.
86
149
*/
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
92
155
if (!cookie )
93
156
return ;
94
157
95
- if (cookie -> iovad .granule )
158
+ if (cookie -> type == IOMMU_DMA_IOVA_COOKIE && cookie -> iovad .granule )
96
159
put_iova_domain (& cookie -> iovad );
97
160
98
161
list_for_each_entry_safe (msi , tmp , & cookie -> msi_page_list , list ) {
@@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
137
200
int iommu_dma_init_domain (struct iommu_domain * domain , dma_addr_t base ,
138
201
u64 size , struct device * dev )
139
202
{
140
- struct iova_domain * iovad = cookie_iovad (domain );
203
+ struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
204
+ struct iova_domain * iovad = & cookie -> iovad ;
141
205
unsigned long order , base_pfn , end_pfn ;
142
206
143
- if (!iovad )
144
- return - ENODEV ;
207
+ if (!cookie || cookie -> type != IOMMU_DMA_IOVA_COOKIE )
208
+ return - EINVAL ;
145
209
146
210
/* Use the smallest supported page size for IOVA granularity */
147
211
order = __ffs (domain -> pgsize_bitmap );
@@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
662
726
{
663
727
struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
664
728
struct iommu_dma_msi_page * msi_page ;
665
- struct iova_domain * iovad = & cookie -> iovad ;
729
+ struct iova_domain * iovad = cookie_iovad ( domain ) ;
666
730
struct iova * iova ;
667
731
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
732
+ size_t size = cookie_msi_granule (cookie );
668
733
669
- msi_addr &= ~(phys_addr_t )iova_mask ( iovad );
734
+ msi_addr &= ~(phys_addr_t )( size - 1 );
670
735
list_for_each_entry (msi_page , & cookie -> msi_page_list , list )
671
736
if (msi_page -> phys == msi_addr )
672
737
return msi_page ;
@@ -675,21 +740,29 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
675
740
if (!msi_page )
676
741
return NULL ;
677
742
678
- iova = __alloc_iova (domain , iovad -> granule , dma_get_mask (dev ));
679
- if (!iova )
680
- goto out_free_page ;
681
-
682
743
msi_page -> phys = msi_addr ;
683
- msi_page -> iova = iova_dma_addr (iovad , iova );
684
- if (iommu_map (domain , msi_page -> iova , msi_addr , iovad -> granule , prot ))
744
+ if (iovad ) {
745
+ iova = __alloc_iova (domain , size , dma_get_mask (dev ));
746
+ if (!iova )
747
+ goto out_free_page ;
748
+ msi_page -> iova = iova_dma_addr (iovad , iova );
749
+ } else {
750
+ msi_page -> iova = cookie -> msi_iova ;
751
+ cookie -> msi_iova += size ;
752
+ }
753
+
754
+ if (iommu_map (domain , msi_page -> iova , msi_addr , size , prot ))
685
755
goto out_free_iova ;
686
756
687
757
INIT_LIST_HEAD (& msi_page -> list );
688
758
list_add (& msi_page -> list , & cookie -> msi_page_list );
689
759
return msi_page ;
690
760
691
761
out_free_iova :
692
- __free_iova (iovad , iova );
762
+ if (iovad )
763
+ __free_iova (iovad , iova );
764
+ else
765
+ cookie -> msi_iova -= size ;
693
766
out_free_page :
694
767
kfree (msi_page );
695
768
return NULL ;
@@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
730
803
msg -> data = ~0U ;
731
804
} else {
732
805
msg -> address_hi = upper_32_bits (msi_page -> iova );
733
- msg -> address_lo &= iova_mask ( & cookie -> iovad ) ;
806
+ msg -> address_lo &= cookie_msi_granule ( cookie ) - 1 ;
734
807
msg -> address_lo += lower_32_bits (msi_page -> iova );
735
808
}
736
809
}
0 commit comments