21
21
#include <linux/init.h>
22
22
#include <linux/gfp.h>
23
23
#include <linux/mm.h>
24
- #include <linux/pci.h>
25
24
#include <linux/proc_fs.h>
26
25
#include <linux/seq_file.h>
27
26
#include <linux/string.h>
28
27
#include <linux/types.h>
29
- #include <linux/scatterlist .h>
30
- #include <linux/export .h>
28
+ #include <linux/dma-direct .h>
29
+ #include <linux/dma-noncoherent .h>
31
30
32
31
#include <asm/cacheflush.h>
33
32
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -437,7 +436,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
437
436
return addr ;
438
437
}
439
438
440
- static void * pa11_dma_alloc (struct device * dev , size_t size ,
439
+ void * arch_dma_alloc (struct device * dev , size_t size ,
441
440
dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
442
441
{
443
442
@@ -447,7 +446,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
447
446
return pcx_dma_alloc (dev , size , dma_handle , gfp , attrs );
448
447
}
449
448
450
- static void pa11_dma_free (struct device * dev , size_t size , void * vaddr ,
449
+ void arch_dma_free (struct device * dev , size_t size , void * vaddr ,
451
450
dma_addr_t dma_handle , unsigned long attrs )
452
451
{
453
452
int order = get_order (size );
@@ -462,142 +461,20 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
462
461
free_pages ((unsigned long )vaddr , get_order (size ));
463
462
}
464
463
465
- static dma_addr_t pa11_dma_map_page (struct device * dev , struct page * page ,
466
- unsigned long offset , size_t size ,
467
- enum dma_data_direction direction , unsigned long attrs )
464
+ void arch_sync_dma_for_device (struct device * dev , phys_addr_t paddr ,
465
+ size_t size , enum dma_data_direction dir )
468
466
{
469
- void * addr = page_address (page ) + offset ;
470
- BUG_ON (direction == DMA_NONE );
471
-
472
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
473
- flush_kernel_dcache_range ((unsigned long ) addr , size );
474
-
475
- return virt_to_phys (addr );
476
- }
477
-
478
- static void pa11_dma_unmap_page (struct device * dev , dma_addr_t dma_handle ,
479
- size_t size , enum dma_data_direction direction ,
480
- unsigned long attrs )
481
- {
482
- BUG_ON (direction == DMA_NONE );
483
-
484
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC )
485
- return ;
486
-
487
- if (direction == DMA_TO_DEVICE )
488
- return ;
489
-
490
- /*
491
- * For PCI_DMA_FROMDEVICE this flush is not necessary for the
492
- * simple map/unmap case. However, it IS necessary if if
493
- * pci_dma_sync_single_* has been called and the buffer reused.
494
- */
495
-
496
- flush_kernel_dcache_range ((unsigned long ) phys_to_virt (dma_handle ), size );
467
+ flush_kernel_dcache_range ((unsigned long )phys_to_virt (paddr ), size );
497
468
}
498
469
499
- static int pa11_dma_map_sg (struct device * dev , struct scatterlist * sglist ,
500
- int nents , enum dma_data_direction direction ,
501
- unsigned long attrs )
470
+ void arch_sync_dma_for_cpu (struct device * dev , phys_addr_t paddr ,
471
+ size_t size , enum dma_data_direction dir )
502
472
{
503
- int i ;
504
- struct scatterlist * sg ;
505
-
506
- BUG_ON (direction == DMA_NONE );
507
-
508
- for_each_sg (sglist , sg , nents , i ) {
509
- unsigned long vaddr = (unsigned long )sg_virt (sg );
510
-
511
- sg_dma_address (sg ) = (dma_addr_t ) virt_to_phys (vaddr );
512
- sg_dma_len (sg ) = sg -> length ;
513
-
514
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC )
515
- continue ;
516
-
517
- flush_kernel_dcache_range (vaddr , sg -> length );
518
- }
519
- return nents ;
473
+ flush_kernel_dcache_range ((unsigned long )phys_to_virt (paddr ), size );
520
474
}
521
475
522
- static void pa11_dma_unmap_sg (struct device * dev , struct scatterlist * sglist ,
523
- int nents , enum dma_data_direction direction ,
524
- unsigned long attrs )
525
- {
526
- int i ;
527
- struct scatterlist * sg ;
528
-
529
- BUG_ON (direction == DMA_NONE );
530
-
531
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC )
532
- return ;
533
-
534
- if (direction == DMA_TO_DEVICE )
535
- return ;
536
-
537
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
538
-
539
- for_each_sg (sglist , sg , nents , i )
540
- flush_kernel_dcache_range (sg_virt (sg ), sg -> length );
541
- }
542
-
543
- static void pa11_dma_sync_single_for_cpu (struct device * dev ,
544
- dma_addr_t dma_handle , size_t size ,
545
- enum dma_data_direction direction )
546
- {
547
- BUG_ON (direction == DMA_NONE );
548
-
549
- flush_kernel_dcache_range ((unsigned long ) phys_to_virt (dma_handle ),
550
- size );
551
- }
552
-
553
- static void pa11_dma_sync_single_for_device (struct device * dev ,
554
- dma_addr_t dma_handle , size_t size ,
555
- enum dma_data_direction direction )
556
- {
557
- BUG_ON (direction == DMA_NONE );
558
-
559
- flush_kernel_dcache_range ((unsigned long ) phys_to_virt (dma_handle ),
560
- size );
561
- }
562
-
563
- static void pa11_dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sglist , int nents , enum dma_data_direction direction )
564
- {
565
- int i ;
566
- struct scatterlist * sg ;
567
-
568
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
569
-
570
- for_each_sg (sglist , sg , nents , i )
571
- flush_kernel_dcache_range (sg_virt (sg ), sg -> length );
572
- }
573
-
574
- static void pa11_dma_sync_sg_for_device (struct device * dev , struct scatterlist * sglist , int nents , enum dma_data_direction direction )
575
- {
576
- int i ;
577
- struct scatterlist * sg ;
578
-
579
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
580
-
581
- for_each_sg (sglist , sg , nents , i )
582
- flush_kernel_dcache_range (sg_virt (sg ), sg -> length );
583
- }
584
-
585
- static void pa11_dma_cache_sync (struct device * dev , void * vaddr , size_t size ,
476
+ void arch_dma_cache_sync (struct device * dev , void * vaddr , size_t size ,
586
477
enum dma_data_direction direction )
587
478
{
588
479
flush_kernel_dcache_range ((unsigned long )vaddr , size );
589
480
}
590
-
591
- const struct dma_map_ops pa11_dma_ops = {
592
- .alloc = pa11_dma_alloc ,
593
- .free = pa11_dma_free ,
594
- .map_page = pa11_dma_map_page ,
595
- .unmap_page = pa11_dma_unmap_page ,
596
- .map_sg = pa11_dma_map_sg ,
597
- .unmap_sg = pa11_dma_unmap_sg ,
598
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu ,
599
- .sync_single_for_device = pa11_dma_sync_single_for_device ,
600
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu ,
601
- .sync_sg_for_device = pa11_dma_sync_sg_for_device ,
602
- .cache_sync = pa11_dma_cache_sync ,
603
- };
0 commit comments