23
23
24
24
#include <linux/io.h>
25
25
#include <linux/iommu.h>
26
- #include <linux/list.h>
27
26
#include <linux/moduleparam.h>
28
27
#include <linux/platform_device.h>
29
28
#include <linux/slab.h>
46
45
#define GART_PAGE_MASK \
47
46
(~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
48
47
49
- struct gart_client {
50
- struct device * dev ;
51
- struct list_head list ;
52
- };
53
-
54
48
struct gart_device {
55
49
void __iomem * regs ;
56
50
u32 * savedata ;
57
51
u32 page_count ; /* total remappable size */
58
52
dma_addr_t iovmm_base ; /* offset to vmm_area */
59
53
spinlock_t pte_lock ; /* for pagetable */
60
- struct list_head client ;
61
- spinlock_t client_lock ; /* for client list */
54
+ spinlock_t dom_lock ; /* for active domain */
55
+ unsigned int active_devices ; /* number of active devices */
62
56
struct iommu_domain * active_domain ; /* current active domain */
63
57
struct device * dev ;
64
58
65
59
struct iommu_device iommu ; /* IOMMU Core handle */
66
60
};
67
61
68
- struct gart_domain {
69
- struct iommu_domain domain ; /* generic domain handle */
70
- struct gart_device * gart ; /* link to gart device */
71
- };
72
-
73
62
static struct gart_device * gart_handle ; /* unique for a system */
74
63
75
64
static bool gart_debug ;
76
65
77
66
#define GART_PTE (_pfn ) \
78
67
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
79
68
80
- static struct gart_domain * to_gart_domain (struct iommu_domain * dom )
81
- {
82
- return container_of (dom , struct gart_domain , domain );
83
- }
84
-
85
69
/*
86
70
* Any interaction between any block on PPSB and a block on APB or AHB
87
71
* must have these read-back to ensure the APB/AHB bus transaction is
@@ -170,125 +154,70 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
170
154
static int gart_iommu_attach_dev (struct iommu_domain * domain ,
171
155
struct device * dev )
172
156
{
173
- struct gart_domain * gart_domain = to_gart_domain (domain );
174
157
struct gart_device * gart = gart_handle ;
175
- struct gart_client * client , * c ;
176
- int err = 0 ;
177
-
178
- client = kzalloc (sizeof (* c ), GFP_KERNEL );
179
- if (!client )
180
- return - ENOMEM ;
181
- client -> dev = dev ;
182
-
183
- spin_lock (& gart -> client_lock );
184
- list_for_each_entry (c , & gart -> client , list ) {
185
- if (c -> dev == dev ) {
186
- dev_err (gart -> dev ,
187
- "%s is already attached\n" , dev_name (dev ));
188
- err = - EINVAL ;
189
- goto fail ;
190
- }
191
- }
192
- if (gart -> active_domain && gart -> active_domain != domain ) {
193
- dev_err (gart -> dev , "Only one domain can be active at a time\n" );
194
- err = - EINVAL ;
195
- goto fail ;
196
- }
197
- gart -> active_domain = domain ;
198
- gart_domain -> gart = gart ;
199
- list_add (& client -> list , & gart -> client );
200
- spin_unlock (& gart -> client_lock );
201
- dev_dbg (gart -> dev , "Attached %s\n" , dev_name (dev ));
202
- return 0 ;
158
+ int ret = 0 ;
203
159
204
- fail :
205
- kfree (client );
206
- spin_unlock (& gart -> client_lock );
207
- return err ;
208
- }
160
+ spin_lock (& gart -> dom_lock );
209
161
210
- static void __gart_iommu_detach_dev (struct iommu_domain * domain ,
211
- struct device * dev )
212
- {
213
- struct gart_domain * gart_domain = to_gart_domain (domain );
214
- struct gart_device * gart = gart_domain -> gart ;
215
- struct gart_client * c ;
216
-
217
- list_for_each_entry (c , & gart -> client , list ) {
218
- if (c -> dev == dev ) {
219
- list_del (& c -> list );
220
- kfree (c );
221
- if (list_empty (& gart -> client )) {
222
- gart -> active_domain = NULL ;
223
- gart_domain -> gart = NULL ;
224
- }
225
- dev_dbg (gart -> dev , "Detached %s\n" , dev_name (dev ));
226
- return ;
227
- }
162
+ if (gart -> active_domain && gart -> active_domain != domain ) {
163
+ ret = - EBUSY ;
164
+ } else if (dev -> archdata .iommu != domain ) {
165
+ dev -> archdata .iommu = domain ;
166
+ gart -> active_domain = domain ;
167
+ gart -> active_devices ++ ;
228
168
}
229
169
230
- dev_err (gart -> dev , "Couldn't find %s to detach\n" , dev_name (dev ));
170
+ spin_unlock (& gart -> dom_lock );
171
+
172
+ return ret ;
231
173
}
232
174
233
175
static void gart_iommu_detach_dev (struct iommu_domain * domain ,
234
176
struct device * dev )
235
177
{
236
- struct gart_domain * gart_domain = to_gart_domain (domain );
237
- struct gart_device * gart = gart_domain -> gart ;
178
+ struct gart_device * gart = gart_handle ;
179
+
180
+ spin_lock (& gart -> dom_lock );
238
181
239
- spin_lock (& gart -> client_lock );
240
- __gart_iommu_detach_dev (domain , dev );
241
- spin_unlock (& gart -> client_lock );
182
+ if (dev -> archdata .iommu == domain ) {
183
+ dev -> archdata .iommu = NULL ;
184
+
185
+ if (-- gart -> active_devices == 0 )
186
+ gart -> active_domain = NULL ;
187
+ }
188
+
189
+ spin_unlock (& gart -> dom_lock );
242
190
}
243
191
244
192
static struct iommu_domain * gart_iommu_domain_alloc (unsigned type )
245
193
{
246
- struct gart_domain * gart_domain ;
247
- struct gart_device * gart ;
194
+ struct gart_device * gart = gart_handle ;
195
+ struct iommu_domain * domain ;
248
196
249
197
if (type != IOMMU_DOMAIN_UNMANAGED )
250
198
return NULL ;
251
199
252
- gart = gart_handle ;
253
- if (!gart )
254
- return NULL ;
255
-
256
- gart_domain = kzalloc (sizeof (* gart_domain ), GFP_KERNEL );
257
- if (!gart_domain )
258
- return NULL ;
259
-
260
- gart_domain -> domain .geometry .aperture_start = gart -> iovmm_base ;
261
- gart_domain -> domain .geometry .aperture_end = gart -> iovmm_base +
200
+ domain = kzalloc (sizeof (* domain ), GFP_KERNEL );
201
+ if (domain ) {
202
+ domain -> geometry .aperture_start = gart -> iovmm_base ;
203
+ domain -> geometry .aperture_end = gart -> iovmm_base +
262
204
gart -> page_count * GART_PAGE_SIZE - 1 ;
263
- gart_domain -> domain .geometry .force_aperture = true;
205
+ domain -> geometry .force_aperture = true;
206
+ }
264
207
265
- return & gart_domain -> domain ;
208
+ return domain ;
266
209
}
267
210
268
211
static void gart_iommu_domain_free (struct iommu_domain * domain )
269
212
{
270
- struct gart_domain * gart_domain = to_gart_domain (domain );
271
- struct gart_device * gart = gart_domain -> gart ;
272
-
273
- if (gart ) {
274
- spin_lock (& gart -> client_lock );
275
- if (!list_empty (& gart -> client )) {
276
- struct gart_client * c , * tmp ;
277
-
278
- list_for_each_entry_safe (c , tmp , & gart -> client , list )
279
- __gart_iommu_detach_dev (domain , c -> dev );
280
- }
281
- spin_unlock (& gart -> client_lock );
282
- }
283
-
284
- kfree (gart_domain );
213
+ WARN_ON (gart_handle -> active_domain == domain );
214
+ kfree (domain );
285
215
}
286
216
287
217
static int gart_iommu_map (struct iommu_domain * domain , unsigned long iova ,
288
218
phys_addr_t pa , size_t bytes , int prot )
289
219
{
290
- struct gart_domain * gart_domain = to_gart_domain (domain );
291
- struct gart_device * gart = gart_domain -> gart ;
220
+ struct gart_device * gart = gart_handle ;
292
221
unsigned long flags ;
293
222
unsigned long pfn ;
294
223
unsigned long pte ;
@@ -319,8 +248,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
319
248
static size_t gart_iommu_unmap (struct iommu_domain * domain , unsigned long iova ,
320
249
size_t bytes )
321
250
{
322
- struct gart_domain * gart_domain = to_gart_domain (domain );
323
- struct gart_device * gart = gart_domain -> gart ;
251
+ struct gart_device * gart = gart_handle ;
324
252
unsigned long flags ;
325
253
326
254
if (!gart_iova_range_valid (gart , iova , bytes ))
@@ -335,8 +263,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
335
263
static phys_addr_t gart_iommu_iova_to_phys (struct iommu_domain * domain ,
336
264
dma_addr_t iova )
337
265
{
338
- struct gart_domain * gart_domain = to_gart_domain (domain );
339
- struct gart_device * gart = gart_domain -> gart ;
266
+ struct gart_device * gart = gart_handle ;
340
267
unsigned long pte ;
341
268
phys_addr_t pa ;
342
269
unsigned long flags ;
@@ -395,8 +322,7 @@ static int gart_iommu_of_xlate(struct device *dev,
395
322
396
323
static void gart_iommu_sync (struct iommu_domain * domain )
397
324
{
398
- struct gart_domain * gart_domain = to_gart_domain (domain );
399
- struct gart_device * gart = gart_domain -> gart ;
325
+ struct gart_device * gart = gart_handle ;
400
326
401
327
FLUSH_GART_REGS (gart );
402
328
}
@@ -483,8 +409,7 @@ struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
483
409
gart -> dev = dev ;
484
410
gart_regs = mc -> regs + GART_REG_BASE ;
485
411
spin_lock_init (& gart -> pte_lock );
486
- spin_lock_init (& gart -> client_lock );
487
- INIT_LIST_HEAD (& gart -> client );
412
+ spin_lock_init (& gart -> dom_lock );
488
413
gart -> regs = gart_regs ;
489
414
gart -> iovmm_base = (dma_addr_t )res_remap -> start ;
490
415
gart -> page_count = (resource_size (res_remap ) >> GART_PAGE_SHIFT );
0 commit comments