Skip to content

Commit c4dae36

Browse files
author
Christoph Hellwig
committed
swiotlb: refactor swiotlb_map_page
Remove the somewhat useless map_single function, and replace it with a swiotlb_bounce_page handler that handles everything related to actually bouncing a page. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
1 parent 4803b44 commit c4dae36

File tree

1 file changed

+30
-37
lines changed

1 file changed

+30
-37
lines changed

kernel/dma/swiotlb.c

Lines changed: 30 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -543,26 +543,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
543543
return tlb_addr;
544544
}
545545

546-
/*
547-
* Allocates bounce buffer and returns its physical address.
548-
*/
549-
static phys_addr_t
550-
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
551-
enum dma_data_direction dir, unsigned long attrs)
552-
{
553-
dma_addr_t start_dma_addr;
554-
555-
if (swiotlb_force == SWIOTLB_NO_FORCE) {
556-
dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
557-
&phys);
558-
return SWIOTLB_MAP_ERROR;
559-
}
560-
561-
start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
562-
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
563-
dir, attrs);
564-
}
565-
566546
/*
567547
* tlb_addr is the physical address of the bounce buffer to unmap.
568548
*/
@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
714694
return true;
715695
}
716696

697+
static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
698+
size_t size, enum dma_data_direction dir, unsigned long attrs)
699+
{
700+
dma_addr_t dma_addr;
701+
702+
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
703+
dev_warn_ratelimited(dev,
704+
"Cannot do DMA to address %pa\n", phys);
705+
return DIRECT_MAPPING_ERROR;
706+
}
707+
708+
/* Oh well, have to allocate and map a bounce buffer. */
709+
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
710+
*phys, size, dir, attrs);
711+
if (*phys == SWIOTLB_MAP_ERROR)
712+
return DIRECT_MAPPING_ERROR;
713+
714+
/* Ensure that the address returned is DMA'ble */
715+
dma_addr = __phys_to_dma(dev, *phys);
716+
if (unlikely(!dma_capable(dev, dma_addr, size))) {
717+
swiotlb_tbl_unmap_single(dev, *phys, size, dir,
718+
attrs | DMA_ATTR_SKIP_CPU_SYNC);
719+
return DIRECT_MAPPING_ERROR;
720+
}
721+
722+
return dma_addr;
723+
}
724+
717725
/*
718726
* Map a single buffer of the indicated size for DMA in streaming mode. The
719727
* physical address to use is returned.
@@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
726734
enum dma_data_direction dir,
727735
unsigned long attrs)
728736
{
729-
phys_addr_t map, phys = page_to_phys(page) + offset;
737+
phys_addr_t phys = page_to_phys(page) + offset;
730738
dma_addr_t dev_addr = phys_to_dma(dev, phys);
731739

732740
BUG_ON(dir == DMA_NONE);
@@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
739747
return dev_addr;
740748

741749
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
742-
743-
/* Oh well, have to allocate and map a bounce buffer. */
744-
map = map_single(dev, phys, size, dir, attrs);
745-
if (map == SWIOTLB_MAP_ERROR)
746-
return DIRECT_MAPPING_ERROR;
747-
748-
dev_addr = __phys_to_dma(dev, map);
749-
750-
/* Ensure that the address returned is DMA'ble */
751-
if (dma_capable(dev, dev_addr, size))
752-
return dev_addr;
753-
754-
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
755-
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
756-
757-
return DIRECT_MAPPING_ERROR;
750+
return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
758751
}
759752

760753
/*

0 commit comments

Comments
 (0)