23
23
#include <asm/octeon/octeon.h>
24
24
25
25
#ifdef CONFIG_PCI
26
+ #include <linux/pci.h>
26
27
#include <asm/octeon/pci-octeon.h>
27
28
#include <asm/octeon/cvmx-npi-defs.h>
28
29
#include <asm/octeon/cvmx-pci-defs.h>
29
30
31
+ struct octeon_dma_map_ops {
32
+ dma_addr_t (* phys_to_dma )(struct device * dev , phys_addr_t paddr );
33
+ phys_addr_t (* dma_to_phys )(struct device * dev , dma_addr_t daddr );
34
+ };
35
+
30
36
static dma_addr_t octeon_hole_phys_to_dma (phys_addr_t paddr )
31
37
{
32
38
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE ))
@@ -60,6 +66,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
60
66
return daddr ;
61
67
}
62
68
69
+ static const struct octeon_dma_map_ops octeon_gen1_ops = {
70
+ .phys_to_dma = octeon_gen1_phys_to_dma ,
71
+ .dma_to_phys = octeon_gen1_dma_to_phys ,
72
+ };
73
+
63
74
static dma_addr_t octeon_gen2_phys_to_dma (struct device * dev , phys_addr_t paddr )
64
75
{
65
76
return octeon_hole_phys_to_dma (paddr );
@@ -70,6 +81,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
70
81
return octeon_hole_dma_to_phys (daddr );
71
82
}
72
83
84
+ static const struct octeon_dma_map_ops octeon_gen2_ops = {
85
+ .phys_to_dma = octeon_gen2_phys_to_dma ,
86
+ .dma_to_phys = octeon_gen2_dma_to_phys ,
87
+ };
88
+
73
89
static dma_addr_t octeon_big_phys_to_dma (struct device * dev , phys_addr_t paddr )
74
90
{
75
91
if (paddr >= 0x410000000ull && paddr < 0x420000000ull )
@@ -92,6 +108,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
92
108
return daddr ;
93
109
}
94
110
111
+ static const struct octeon_dma_map_ops octeon_big_ops = {
112
+ .phys_to_dma = octeon_big_phys_to_dma ,
113
+ .dma_to_phys = octeon_big_dma_to_phys ,
114
+ };
115
+
95
116
static dma_addr_t octeon_small_phys_to_dma (struct device * dev ,
96
117
phys_addr_t paddr )
97
118
{
@@ -120,6 +141,32 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
120
141
return daddr ;
121
142
}
122
143
144
+ static const struct octeon_dma_map_ops octeon_small_ops = {
145
+ .phys_to_dma = octeon_small_phys_to_dma ,
146
+ .dma_to_phys = octeon_small_dma_to_phys ,
147
+ };
148
+
149
+ static const struct octeon_dma_map_ops * octeon_pci_dma_ops ;
150
+
151
+ void __init octeon_pci_dma_init (void )
152
+ {
153
+ switch (octeon_dma_bar_type ) {
154
+ case OCTEON_DMA_BAR_TYPE_PCIE :
155
+ octeon_pci_dma_ops = & octeon_gen1_ops ;
156
+ break ;
157
+ case OCTEON_DMA_BAR_TYPE_PCIE2 :
158
+ octeon_pci_dma_ops = & octeon_gen2_ops ;
159
+ break ;
160
+ case OCTEON_DMA_BAR_TYPE_BIG :
161
+ octeon_pci_dma_ops = & octeon_big_ops ;
162
+ break ;
163
+ case OCTEON_DMA_BAR_TYPE_SMALL :
164
+ octeon_pci_dma_ops = & octeon_small_ops ;
165
+ break ;
166
+ default :
167
+ BUG ();
168
+ }
169
+ }
123
170
#endif /* CONFIG_PCI */
124
171
125
172
static dma_addr_t octeon_dma_map_page (struct device * dev , struct page * page ,
@@ -165,57 +212,37 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
165
212
return ret ;
166
213
}
167
214
168
- static dma_addr_t octeon_unity_phys_to_dma (struct device * dev , phys_addr_t paddr )
169
- {
170
- return paddr ;
171
- }
172
-
173
- static phys_addr_t octeon_unity_dma_to_phys (struct device * dev , dma_addr_t daddr )
174
- {
175
- return daddr ;
176
- }
177
-
178
- struct octeon_dma_map_ops {
179
- const struct dma_map_ops dma_map_ops ;
180
- dma_addr_t (* phys_to_dma )(struct device * dev , phys_addr_t paddr );
181
- phys_addr_t (* dma_to_phys )(struct device * dev , dma_addr_t daddr );
182
- };
183
-
184
215
dma_addr_t __phys_to_dma (struct device * dev , phys_addr_t paddr )
185
216
{
186
- struct octeon_dma_map_ops * ops = container_of ( get_dma_ops ( dev ),
187
- struct octeon_dma_map_ops ,
188
- dma_map_ops );
189
-
190
- return ops -> phys_to_dma ( dev , paddr ) ;
217
+ #ifdef CONFIG_PCI
218
+ if ( dev && dev_is_pci ( dev ))
219
+ return octeon_pci_dma_ops -> phys_to_dma ( dev , paddr );
220
+ #endif
221
+ return paddr ;
191
222
}
192
223
193
224
phys_addr_t __dma_to_phys (struct device * dev , dma_addr_t daddr )
194
225
{
195
- struct octeon_dma_map_ops * ops = container_of ( get_dma_ops ( dev ),
196
- struct octeon_dma_map_ops ,
197
- dma_map_ops );
198
-
199
- return ops -> dma_to_phys ( dev , daddr ) ;
226
+ #ifdef CONFIG_PCI
227
+ if ( dev && dev_is_pci ( dev ))
228
+ return octeon_pci_dma_ops -> dma_to_phys ( dev , daddr );
229
+ #endif
230
+ return daddr ;
200
231
}
201
232
202
- static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
203
- .dma_map_ops = {
204
- .alloc = octeon_dma_alloc_coherent ,
205
- .free = swiotlb_free ,
206
- .map_page = octeon_dma_map_page ,
207
- .unmap_page = swiotlb_unmap_page ,
208
- .map_sg = octeon_dma_map_sg ,
209
- .unmap_sg = swiotlb_unmap_sg_attrs ,
210
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
211
- .sync_single_for_device = octeon_dma_sync_single_for_device ,
212
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
213
- .sync_sg_for_device = octeon_dma_sync_sg_for_device ,
214
- .mapping_error = swiotlb_dma_mapping_error ,
215
- .dma_supported = swiotlb_dma_supported
216
- },
217
- .phys_to_dma = octeon_unity_phys_to_dma ,
218
- .dma_to_phys = octeon_unity_dma_to_phys
233
+ static const struct dma_map_ops octeon_swiotlb_ops = {
234
+ .alloc = octeon_dma_alloc_coherent ,
235
+ .free = swiotlb_free ,
236
+ .map_page = octeon_dma_map_page ,
237
+ .unmap_page = swiotlb_unmap_page ,
238
+ .map_sg = octeon_dma_map_sg ,
239
+ .unmap_sg = swiotlb_unmap_sg_attrs ,
240
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
241
+ .sync_single_for_device = octeon_dma_sync_single_for_device ,
242
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
243
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device ,
244
+ .mapping_error = swiotlb_dma_mapping_error ,
245
+ .dma_supported = swiotlb_dma_supported
219
246
};
220
247
221
248
char * octeon_swiotlb ;
@@ -281,51 +308,5 @@ void __init plat_swiotlb_setup(void)
281
308
if (swiotlb_init_with_tbl (octeon_swiotlb , swiotlb_nslabs , 1 ) == - ENOMEM )
282
309
panic ("Cannot allocate SWIOTLB buffer" );
283
310
284
- mips_dma_map_ops = & octeon_linear_dma_map_ops . dma_map_ops ;
311
+ mips_dma_map_ops = & octeon_swiotlb_ops ;
285
312
}
286
-
287
- #ifdef CONFIG_PCI
288
- static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
289
- .dma_map_ops = {
290
- .alloc = octeon_dma_alloc_coherent ,
291
- .free = swiotlb_free ,
292
- .map_page = octeon_dma_map_page ,
293
- .unmap_page = swiotlb_unmap_page ,
294
- .map_sg = octeon_dma_map_sg ,
295
- .unmap_sg = swiotlb_unmap_sg_attrs ,
296
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu ,
297
- .sync_single_for_device = octeon_dma_sync_single_for_device ,
298
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu ,
299
- .sync_sg_for_device = octeon_dma_sync_sg_for_device ,
300
- .mapping_error = swiotlb_dma_mapping_error ,
301
- .dma_supported = swiotlb_dma_supported
302
- },
303
- };
304
-
305
- const struct dma_map_ops * octeon_pci_dma_map_ops ;
306
-
307
- void __init octeon_pci_dma_init (void )
308
- {
309
- switch (octeon_dma_bar_type ) {
310
- case OCTEON_DMA_BAR_TYPE_PCIE2 :
311
- _octeon_pci_dma_map_ops .phys_to_dma = octeon_gen2_phys_to_dma ;
312
- _octeon_pci_dma_map_ops .dma_to_phys = octeon_gen2_dma_to_phys ;
313
- break ;
314
- case OCTEON_DMA_BAR_TYPE_PCIE :
315
- _octeon_pci_dma_map_ops .phys_to_dma = octeon_gen1_phys_to_dma ;
316
- _octeon_pci_dma_map_ops .dma_to_phys = octeon_gen1_dma_to_phys ;
317
- break ;
318
- case OCTEON_DMA_BAR_TYPE_BIG :
319
- _octeon_pci_dma_map_ops .phys_to_dma = octeon_big_phys_to_dma ;
320
- _octeon_pci_dma_map_ops .dma_to_phys = octeon_big_dma_to_phys ;
321
- break ;
322
- case OCTEON_DMA_BAR_TYPE_SMALL :
323
- _octeon_pci_dma_map_ops .phys_to_dma = octeon_small_phys_to_dma ;
324
- _octeon_pci_dma_map_ops .dma_to_phys = octeon_small_dma_to_phys ;
325
- break ;
326
- default :
327
- BUG ();
328
- }
329
- octeon_pci_dma_map_ops = & _octeon_pci_dma_map_ops .dma_map_ops ;
330
- }
331
- #endif /* CONFIG_PCI */
0 commit comments