@@ -307,101 +307,93 @@ static struct device *to_vmd_dev(struct device *dev)
307
307
return & vmd -> dev -> dev ;
308
308
}
309
309
310
- static const struct dma_map_ops * vmd_dma_ops (struct device * dev )
311
- {
312
- return get_dma_ops (to_vmd_dev (dev ));
313
- }
314
-
315
310
static void * vmd_alloc (struct device * dev , size_t size , dma_addr_t * addr ,
316
311
gfp_t flag , unsigned long attrs )
317
312
{
318
- return vmd_dma_ops (dev )-> alloc (to_vmd_dev (dev ), size , addr , flag ,
319
- attrs );
313
+ return dma_alloc_attrs (to_vmd_dev (dev ), size , addr , flag , attrs );
320
314
}
321
315
322
316
static void vmd_free (struct device * dev , size_t size , void * vaddr ,
323
317
dma_addr_t addr , unsigned long attrs )
324
318
{
325
- return vmd_dma_ops (dev )-> free (to_vmd_dev (dev ), size , vaddr , addr ,
326
- attrs );
319
+ return dma_free_attrs (to_vmd_dev (dev ), size , vaddr , addr , attrs );
327
320
}
328
321
329
322
static int vmd_mmap (struct device * dev , struct vm_area_struct * vma ,
330
323
void * cpu_addr , dma_addr_t addr , size_t size ,
331
324
unsigned long attrs )
332
325
{
333
- return vmd_dma_ops ( dev ) -> mmap ( to_vmd_dev (dev ), vma , cpu_addr , addr ,
334
- size , attrs );
326
+ return dma_mmap_attrs ( to_vmd_dev (dev ), vma , cpu_addr , addr , size ,
327
+ attrs );
335
328
}
336
329
337
330
static int vmd_get_sgtable (struct device * dev , struct sg_table * sgt ,
338
331
void * cpu_addr , dma_addr_t addr , size_t size ,
339
332
unsigned long attrs )
340
333
{
341
- return vmd_dma_ops ( dev ) -> get_sgtable ( to_vmd_dev (dev ), sgt , cpu_addr ,
342
- addr , size , attrs );
334
+ return dma_get_sgtable_attrs ( to_vmd_dev (dev ), sgt , cpu_addr , addr , size ,
335
+ attrs );
343
336
}
344
337
345
338
static dma_addr_t vmd_map_page (struct device * dev , struct page * page ,
346
339
unsigned long offset , size_t size ,
347
340
enum dma_data_direction dir ,
348
341
unsigned long attrs )
349
342
{
350
- return vmd_dma_ops ( dev ) -> map_page ( to_vmd_dev (dev ), page , offset , size ,
351
- dir , attrs );
343
+ return dma_map_page_attrs ( to_vmd_dev (dev ), page , offset , size , dir ,
344
+ attrs );
352
345
}
353
346
354
347
static void vmd_unmap_page (struct device * dev , dma_addr_t addr , size_t size ,
355
348
enum dma_data_direction dir , unsigned long attrs )
356
349
{
357
- vmd_dma_ops ( dev ) -> unmap_page (to_vmd_dev (dev ), addr , size , dir , attrs );
350
+ dma_unmap_page_attrs (to_vmd_dev (dev ), addr , size , dir , attrs );
358
351
}
359
352
360
353
static int vmd_map_sg (struct device * dev , struct scatterlist * sg , int nents ,
361
354
enum dma_data_direction dir , unsigned long attrs )
362
355
{
363
- return vmd_dma_ops ( dev ) -> map_sg (to_vmd_dev (dev ), sg , nents , dir , attrs );
356
+ return dma_map_sg_attrs (to_vmd_dev (dev ), sg , nents , dir , attrs );
364
357
}
365
358
366
359
static void vmd_unmap_sg (struct device * dev , struct scatterlist * sg , int nents ,
367
360
enum dma_data_direction dir , unsigned long attrs )
368
361
{
369
- vmd_dma_ops ( dev ) -> unmap_sg (to_vmd_dev (dev ), sg , nents , dir , attrs );
362
+ dma_unmap_sg_attrs (to_vmd_dev (dev ), sg , nents , dir , attrs );
370
363
}
371
364
372
365
static void vmd_sync_single_for_cpu (struct device * dev , dma_addr_t addr ,
373
366
size_t size , enum dma_data_direction dir )
374
367
{
375
- vmd_dma_ops ( dev ) -> sync_single_for_cpu (to_vmd_dev (dev ), addr , size , dir );
368
+ dma_sync_single_for_cpu (to_vmd_dev (dev ), addr , size , dir );
376
369
}
377
370
378
371
static void vmd_sync_single_for_device (struct device * dev , dma_addr_t addr ,
379
372
size_t size , enum dma_data_direction dir )
380
373
{
381
- vmd_dma_ops (dev )-> sync_single_for_device (to_vmd_dev (dev ), addr , size ,
382
- dir );
374
+ dma_sync_single_for_device (to_vmd_dev (dev ), addr , size , dir );
383
375
}
384
376
385
377
static void vmd_sync_sg_for_cpu (struct device * dev , struct scatterlist * sg ,
386
378
int nents , enum dma_data_direction dir )
387
379
{
388
- vmd_dma_ops ( dev ) -> sync_sg_for_cpu (to_vmd_dev (dev ), sg , nents , dir );
380
+ dma_sync_sg_for_cpu (to_vmd_dev (dev ), sg , nents , dir );
389
381
}
390
382
391
383
static void vmd_sync_sg_for_device (struct device * dev , struct scatterlist * sg ,
392
384
int nents , enum dma_data_direction dir )
393
385
{
394
- vmd_dma_ops ( dev ) -> sync_sg_for_device (to_vmd_dev (dev ), sg , nents , dir );
386
+ dma_sync_sg_for_device (to_vmd_dev (dev ), sg , nents , dir );
395
387
}
396
388
397
389
static int vmd_dma_supported (struct device * dev , u64 mask )
398
390
{
399
- return vmd_dma_ops ( dev ) -> dma_supported (to_vmd_dev (dev ), mask );
391
+ return dma_supported (to_vmd_dev (dev ), mask );
400
392
}
401
393
402
394
static u64 vmd_get_required_mask (struct device * dev )
403
395
{
404
- return vmd_dma_ops ( dev ) -> get_required_mask (to_vmd_dev (dev ));
396
+ return dma_get_required_mask (to_vmd_dev (dev ));
405
397
}
406
398
407
399
static void vmd_teardown_dma_ops (struct vmd_dev * vmd )
0 commit comments