@@ -36,6 +36,8 @@ struct mm_iommu_table_group_mem_t {
36
36
u64 ua ; /* userspace address */
37
37
u64 entries ; /* number of entries in hpas[] */
38
38
u64 * hpas ; /* vmalloc'ed */
39
+ #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
40
+ u64 dev_hpa ; /* Device memory base address */
39
41
};
40
42
41
43
static long mm_iommu_adjust_locked_vm (struct mm_struct * mm ,
@@ -126,7 +128,8 @@ static int mm_iommu_move_page_from_cma(struct page *page)
126
128
return 0 ;
127
129
}
128
130
129
- long mm_iommu_new (struct mm_struct * mm , unsigned long ua , unsigned long entries ,
131
+ static long mm_iommu_do_alloc (struct mm_struct * mm , unsigned long ua ,
132
+ unsigned long entries , unsigned long dev_hpa ,
130
133
struct mm_iommu_table_group_mem_t * * pmem )
131
134
{
132
135
struct mm_iommu_table_group_mem_t * mem ;
@@ -150,18 +153,27 @@ long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
150
153
151
154
}
152
155
153
- ret = mm_iommu_adjust_locked_vm (mm , entries , true);
154
- if (ret )
155
- goto unlock_exit ;
156
+ if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA ) {
157
+ ret = mm_iommu_adjust_locked_vm (mm , entries , true);
158
+ if (ret )
159
+ goto unlock_exit ;
156
160
157
- locked_entries = entries ;
161
+ locked_entries = entries ;
162
+ }
158
163
159
164
mem = kzalloc (sizeof (* mem ), GFP_KERNEL );
160
165
if (!mem ) {
161
166
ret = - ENOMEM ;
162
167
goto unlock_exit ;
163
168
}
164
169
170
+ if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA ) {
171
+ mem -> pageshift = __ffs (dev_hpa | (entries << PAGE_SHIFT ));
172
+ mem -> dev_hpa = dev_hpa ;
173
+ goto good_exit ;
174
+ }
175
+ mem -> dev_hpa = MM_IOMMU_TABLE_INVALID_HPA ;
176
+
165
177
/*
166
178
* For a starting point for a maximum page size calculation
167
179
* we use @ua and @entries natural alignment to allow IOMMU pages
@@ -230,6 +242,7 @@ long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
230
242
mem -> hpas [i ] = page_to_pfn (page ) << PAGE_SHIFT ;
231
243
}
232
244
245
+ good_exit :
233
246
atomic64_set (& mem -> mapped , 1 );
234
247
mem -> used = 1 ;
235
248
mem -> ua = ua ;
@@ -246,13 +259,31 @@ long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
246
259
247
260
return ret ;
248
261
}
262
+
263
+ long mm_iommu_new (struct mm_struct * mm , unsigned long ua , unsigned long entries ,
264
+ struct mm_iommu_table_group_mem_t * * pmem )
265
+ {
266
+ return mm_iommu_do_alloc (mm , ua , entries , MM_IOMMU_TABLE_INVALID_HPA ,
267
+ pmem );
268
+ }
249
269
EXPORT_SYMBOL_GPL (mm_iommu_new );
250
270
271
+ long mm_iommu_newdev (struct mm_struct * mm , unsigned long ua ,
272
+ unsigned long entries , unsigned long dev_hpa ,
273
+ struct mm_iommu_table_group_mem_t * * pmem )
274
+ {
275
+ return mm_iommu_do_alloc (mm , ua , entries , dev_hpa , pmem );
276
+ }
277
+ EXPORT_SYMBOL_GPL (mm_iommu_newdev );
278
+
251
279
static void mm_iommu_unpin (struct mm_iommu_table_group_mem_t * mem )
252
280
{
253
281
long i ;
254
282
struct page * page = NULL ;
255
283
284
+ if (!mem -> hpas )
285
+ return ;
286
+
256
287
for (i = 0 ; i < mem -> entries ; ++ i ) {
257
288
if (!mem -> hpas [i ])
258
289
continue ;
@@ -294,6 +325,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
294
325
long mm_iommu_put (struct mm_struct * mm , struct mm_iommu_table_group_mem_t * mem )
295
326
{
296
327
long ret = 0 ;
328
+ unsigned long entries , dev_hpa ;
297
329
298
330
mutex_lock (& mem_list_mutex );
299
331
@@ -315,9 +347,12 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
315
347
}
316
348
317
349
/* @mapped became 0 so now mappings are disabled, release the region */
350
+ entries = mem -> entries ;
351
+ dev_hpa = mem -> dev_hpa ;
318
352
mm_iommu_release (mem );
319
353
320
- mm_iommu_adjust_locked_vm (mm , mem -> entries , false);
354
+ if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA )
355
+ mm_iommu_adjust_locked_vm (mm , entries , false);
321
356
322
357
unlock_exit :
323
358
mutex_unlock (& mem_list_mutex );
@@ -387,14 +422,20 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
387
422
unsigned long ua , unsigned int pageshift , unsigned long * hpa )
388
423
{
389
424
const long entry = (ua - mem -> ua ) >> PAGE_SHIFT ;
390
- u64 * va = & mem -> hpas [ entry ] ;
425
+ u64 * va ;
391
426
392
427
if (entry >= mem -> entries )
393
428
return - EFAULT ;
394
429
395
430
if (pageshift > mem -> pageshift )
396
431
return - EFAULT ;
397
432
433
+ if (!mem -> hpas ) {
434
+ * hpa = mem -> dev_hpa + (ua - mem -> ua );
435
+ return 0 ;
436
+ }
437
+
438
+ va = & mem -> hpas [entry ];
398
439
* hpa = (* va & MM_IOMMU_TABLE_GROUP_PAGE_MASK ) | (ua & ~PAGE_MASK );
399
440
400
441
return 0 ;
@@ -405,7 +446,6 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
405
446
unsigned long ua , unsigned int pageshift , unsigned long * hpa )
406
447
{
407
448
const long entry = (ua - mem -> ua ) >> PAGE_SHIFT ;
408
- void * va = & mem -> hpas [entry ];
409
449
unsigned long * pa ;
410
450
411
451
if (entry >= mem -> entries )
@@ -414,7 +454,12 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
414
454
if (pageshift > mem -> pageshift )
415
455
return - EFAULT ;
416
456
417
- pa = (void * ) vmalloc_to_phys (va );
457
+ if (!mem -> hpas ) {
458
+ * hpa = mem -> dev_hpa + (ua - mem -> ua );
459
+ return 0 ;
460
+ }
461
+
462
+ pa = (void * ) vmalloc_to_phys (& mem -> hpas [entry ]);
418
463
if (!pa )
419
464
return - EFAULT ;
420
465
@@ -434,6 +479,9 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
434
479
if (!mem )
435
480
return ;
436
481
482
+ if (mem -> dev_hpa != MM_IOMMU_TABLE_INVALID_HPA )
483
+ return ;
484
+
437
485
entry = (ua - mem -> ua ) >> PAGE_SHIFT ;
438
486
va = & mem -> hpas [entry ];
439
487
@@ -444,6 +492,33 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
444
492
* pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY ;
445
493
}
446
494
495
+ bool mm_iommu_is_devmem (struct mm_struct * mm , unsigned long hpa ,
496
+ unsigned int pageshift , unsigned long * size )
497
+ {
498
+ struct mm_iommu_table_group_mem_t * mem ;
499
+ unsigned long end ;
500
+
501
+ list_for_each_entry_rcu (mem , & mm -> context .iommu_group_mem_list , next ) {
502
+ if (mem -> dev_hpa == MM_IOMMU_TABLE_INVALID_HPA )
503
+ continue ;
504
+
505
+ end = mem -> dev_hpa + (mem -> entries << PAGE_SHIFT );
506
+ if ((mem -> dev_hpa <= hpa ) && (hpa < end )) {
507
+ /*
508
+ * Since the IOMMU page size might be bigger than
509
+ * PAGE_SIZE, the amount of preregistered memory
510
+ * starting from @hpa might be smaller than 1<<pageshift
511
+ * and the caller needs to distinguish this situation.
512
+ */
513
+ * size = min (1UL << pageshift , end - hpa );
514
+ return true;
515
+ }
516
+ }
517
+
518
+ return false;
519
+ }
520
+ EXPORT_SYMBOL_GPL (mm_iommu_is_devmem );
521
+
447
522
long mm_iommu_mapped_inc (struct mm_iommu_table_group_mem_t * mem )
448
523
{
449
524
if (atomic64_inc_not_zero (& mem -> mapped ))
0 commit comments