@@ -504,7 +504,6 @@ void ib_umem_odp_release(struct ib_umem *umem)
504
504
static int ib_umem_odp_map_dma_single_page (
505
505
struct ib_umem * umem ,
506
506
int page_index ,
507
- u64 base_virt_addr ,
508
507
struct page * page ,
509
508
u64 access_mask ,
510
509
unsigned long current_seq )
@@ -527,7 +526,7 @@ static int ib_umem_odp_map_dma_single_page(
527
526
if (!(umem -> odp_data -> dma_list [page_index ])) {
528
527
dma_addr = ib_dma_map_page (dev ,
529
528
page ,
530
- 0 , PAGE_SIZE ,
529
+ 0 , BIT ( umem -> page_shift ) ,
531
530
DMA_BIDIRECTIONAL );
532
531
if (ib_dma_mapping_error (dev , dma_addr )) {
533
532
ret = - EFAULT ;
@@ -555,8 +554,9 @@ static int ib_umem_odp_map_dma_single_page(
555
554
if (remove_existing_mapping && umem -> context -> invalidate_range ) {
556
555
invalidate_page_trampoline (
557
556
umem ,
558
- base_virt_addr + (page_index * PAGE_SIZE ),
559
- base_virt_addr + ((page_index + 1 )* PAGE_SIZE ),
557
+ ib_umem_start (umem ) + (page_index >> umem -> page_shift ),
558
+ ib_umem_start (umem ) + ((page_index + 1 ) >>
559
+ umem -> page_shift ),
560
560
NULL );
561
561
ret = - EAGAIN ;
562
562
}
@@ -595,10 +595,10 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
595
595
struct task_struct * owning_process = NULL ;
596
596
struct mm_struct * owning_mm = NULL ;
597
597
struct page * * local_page_list = NULL ;
598
- u64 off ;
599
- int j , k , ret = 0 , start_idx , npages = 0 ;
600
- u64 base_virt_addr ;
598
+ u64 page_mask , off ;
599
+ int j , k , ret = 0 , start_idx , npages = 0 , page_shift ;
601
600
unsigned int flags = 0 ;
601
+ phys_addr_t p = 0 ;
602
602
603
603
if (access_mask == 0 )
604
604
return - EINVAL ;
@@ -611,9 +611,10 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
611
611
if (!local_page_list )
612
612
return - ENOMEM ;
613
613
614
- off = user_virt & (~PAGE_MASK );
615
- user_virt = user_virt & PAGE_MASK ;
616
- base_virt_addr = user_virt ;
614
+ page_shift = umem -> page_shift ;
615
+ page_mask = ~(BIT (page_shift ) - 1 );
616
+ off = user_virt & (~page_mask );
617
+ user_virt = user_virt & page_mask ;
617
618
bcnt += off ; /* Charge for the first page offset as well. */
618
619
619
620
owning_process = get_pid_task (umem -> context -> tgid , PIDTYPE_PID );
@@ -631,13 +632,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
631
632
if (access_mask & ODP_WRITE_ALLOWED_BIT )
632
633
flags |= FOLL_WRITE ;
633
634
634
- start_idx = (user_virt - ib_umem_start (umem )) >> PAGE_SHIFT ;
635
+ start_idx = (user_virt - ib_umem_start (umem )) >> page_shift ;
635
636
k = start_idx ;
636
637
637
638
while (bcnt > 0 ) {
638
- const size_t gup_num_pages =
639
- min_t ( size_t , ALIGN ( bcnt , PAGE_SIZE ) / PAGE_SIZE ,
640
- PAGE_SIZE / sizeof (struct page * ));
639
+ const size_t gup_num_pages = min_t ( size_t ,
640
+ ( bcnt + BIT ( page_shift ) - 1 ) >> page_shift ,
641
+ PAGE_SIZE / sizeof (struct page * ));
641
642
642
643
down_read (& owning_mm -> mmap_sem );
643
644
/*
@@ -656,14 +657,25 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
656
657
break ;
657
658
658
659
bcnt -= min_t (size_t , npages << PAGE_SHIFT , bcnt );
659
- user_virt += npages << PAGE_SHIFT ;
660
660
mutex_lock (& umem -> odp_data -> umem_mutex );
661
- for (j = 0 ; j < npages ; ++ j ) {
661
+ for (j = 0 ; j < npages ; j ++ , user_virt += PAGE_SIZE ) {
662
+ if (user_virt & ~page_mask ) {
663
+ p += PAGE_SIZE ;
664
+ if (page_to_phys (local_page_list [j ]) != p ) {
665
+ ret = - EFAULT ;
666
+ break ;
667
+ }
668
+ put_page (local_page_list [j ]);
669
+ continue ;
670
+ }
671
+
662
672
ret = ib_umem_odp_map_dma_single_page (
663
- umem , k , base_virt_addr , local_page_list [j ],
664
- access_mask , current_seq );
673
+ umem , k , local_page_list [j ],
674
+ access_mask , current_seq );
665
675
if (ret < 0 )
666
676
break ;
677
+
678
+ p = page_to_phys (local_page_list [j ]);
667
679
k ++ ;
668
680
}
669
681
mutex_unlock (& umem -> odp_data -> umem_mutex );
@@ -708,7 +720,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
708
720
* once. */
709
721
mutex_lock (& umem -> odp_data -> umem_mutex );
710
722
for (addr = virt ; addr < bound ; addr += BIT (umem -> page_shift )) {
711
- idx = (addr - ib_umem_start (umem )) / PAGE_SIZE ;
723
+ idx = (addr - ib_umem_start (umem )) >> umem -> page_shift ;
712
724
if (umem -> odp_data -> page_list [idx ]) {
713
725
struct page * page = umem -> odp_data -> page_list [idx ];
714
726
dma_addr_t dma = umem -> odp_data -> dma_list [idx ];
0 commit comments