Skip to content

Commit 190d4e5

Browse files
author
Christoph Hellwig
committed
vmd: use the proper dma_* APIs instead of direct methods calls
With the bypass support for the direct mapping we might not always have methods to call, so use the proper APIs instead. The only downside is that we will create two dma-debug entries for each mapping if CONFIG_DMA_DEBUG is enabled. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
1 parent 55897af commit 190d4e5

File tree

1 file changed

+17
-25
lines changed
  • drivers/pci/controller

1 file changed

+17
-25
lines changed

drivers/pci/controller/vmd.c

Lines changed: 17 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -307,101 +307,93 @@ static struct device *to_vmd_dev(struct device *dev)
307307
return &vmd->dev->dev;
308308
}
309309

310-
static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
311-
{
312-
return get_dma_ops(to_vmd_dev(dev));
313-
}
314-
315310
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
316311
gfp_t flag, unsigned long attrs)
317312
{
318-
return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
319-
attrs);
313+
return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
320314
}
321315

322316
static void vmd_free(struct device *dev, size_t size, void *vaddr,
323317
dma_addr_t addr, unsigned long attrs)
324318
{
325-
return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
326-
attrs);
319+
return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
327320
}
328321

329322
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
330323
void *cpu_addr, dma_addr_t addr, size_t size,
331324
unsigned long attrs)
332325
{
333-
return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
334-
size, attrs);
326+
return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
327+
attrs);
335328
}
336329

337330
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
338331
void *cpu_addr, dma_addr_t addr, size_t size,
339332
unsigned long attrs)
340333
{
341-
return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
342-
addr, size, attrs);
334+
return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
335+
attrs);
343336
}
344337

345338
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
346339
unsigned long offset, size_t size,
347340
enum dma_data_direction dir,
348341
unsigned long attrs)
349342
{
350-
return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
351-
dir, attrs);
343+
return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
344+
attrs);
352345
}
353346

354347
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
355348
enum dma_data_direction dir, unsigned long attrs)
356349
{
357-
vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
350+
dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
358351
}
359352

360353
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
361354
enum dma_data_direction dir, unsigned long attrs)
362355
{
363-
return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
356+
return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
364357
}
365358

366359
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
367360
enum dma_data_direction dir, unsigned long attrs)
368361
{
369-
vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
362+
dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
370363
}
371364

372365
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
373366
size_t size, enum dma_data_direction dir)
374367
{
375-
vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
368+
dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
376369
}
377370

378371
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
379372
size_t size, enum dma_data_direction dir)
380373
{
381-
vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
382-
dir);
374+
dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
383375
}
384376

385377
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
386378
int nents, enum dma_data_direction dir)
387379
{
388-
vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
380+
dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
389381
}
390382

391383
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
392384
int nents, enum dma_data_direction dir)
393385
{
394-
vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
386+
dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
395387
}
396388

397389
static int vmd_dma_supported(struct device *dev, u64 mask)
398390
{
399-
return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
391+
return dma_supported(to_vmd_dev(dev), mask);
400392
}
401393

402394
static u64 vmd_get_required_mask(struct device *dev)
403395
{
404-
return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
396+
return dma_get_required_mask(to_vmd_dev(dev));
405397
}
406398

407399
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)

0 commit comments

Comments
 (0)