@@ -888,22 +888,22 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
888
888
.flags = PVMW_SYNC ,
889
889
};
890
890
int * cleaned = arg ;
891
+ bool invalidation_needed = false;
891
892
892
893
while (page_vma_mapped_walk (& pvmw )) {
893
894
int ret = 0 ;
894
- address = pvmw .address ;
895
895
if (pvmw .pte ) {
896
896
pte_t entry ;
897
897
pte_t * pte = pvmw .pte ;
898
898
899
899
if (!pte_dirty (* pte ) && !pte_write (* pte ))
900
900
continue ;
901
901
902
- flush_cache_page (vma , address , pte_pfn (* pte ));
903
- entry = ptep_clear_flush (vma , address , pte );
902
+ flush_cache_page (vma , pvmw . address , pte_pfn (* pte ));
903
+ entry = ptep_clear_flush (vma , pvmw . address , pte );
904
904
entry = pte_wrprotect (entry );
905
905
entry = pte_mkclean (entry );
906
- set_pte_at (vma -> vm_mm , address , pte , entry );
906
+ set_pte_at (vma -> vm_mm , pvmw . address , pte , entry );
907
907
ret = 1 ;
908
908
} else {
909
909
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
913
913
if (!pmd_dirty (* pmd ) && !pmd_write (* pmd ))
914
914
continue ;
915
915
916
- flush_cache_page (vma , address , page_to_pfn (page ));
917
- entry = pmdp_huge_clear_flush (vma , address , pmd );
916
+ flush_cache_page (vma , pvmw . address , page_to_pfn (page ));
917
+ entry = pmdp_huge_clear_flush (vma , pvmw . address , pmd );
918
918
entry = pmd_wrprotect (entry );
919
919
entry = pmd_mkclean (entry );
920
- set_pmd_at (vma -> vm_mm , address , pmd , entry );
920
+ set_pmd_at (vma -> vm_mm , pvmw . address , pmd , entry );
921
921
ret = 1 ;
922
922
#else
923
923
/* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
926
926
}
927
927
928
928
if (ret ) {
929
- mmu_notifier_invalidate_page (vma -> vm_mm , address );
930
929
(* cleaned )++ ;
930
+ invalidation_needed = true;
931
931
}
932
932
}
933
933
934
+ if (invalidation_needed ) {
935
+ mmu_notifier_invalidate_range (vma -> vm_mm , address ,
936
+ address + (1UL << compound_order (page )));
937
+ }
938
+
934
939
return true;
935
940
}
936
941
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1323
1328
};
1324
1329
pte_t pteval ;
1325
1330
struct page * subpage ;
1326
- bool ret = true;
1331
+ bool ret = true, invalidation_needed = false ;
1327
1332
enum ttu_flags flags = (enum ttu_flags )arg ;
1328
1333
1329
1334
/* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1363
1368
VM_BUG_ON_PAGE (!pvmw .pte , page );
1364
1369
1365
1370
subpage = page - page_to_pfn (page ) + pte_pfn (* pvmw .pte );
1366
- address = pvmw .address ;
1367
-
1368
1371
1369
1372
if (!(flags & TTU_IGNORE_ACCESS )) {
1370
- if (ptep_clear_flush_young_notify (vma , address ,
1373
+ if (ptep_clear_flush_young_notify (vma , pvmw . address ,
1371
1374
pvmw .pte )) {
1372
1375
ret = false;
1373
1376
page_vma_mapped_walk_done (& pvmw );
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1376
1379
}
1377
1380
1378
1381
/* Nuke the page table entry. */
1379
- flush_cache_page (vma , address , pte_pfn (* pvmw .pte ));
1382
+ flush_cache_page (vma , pvmw . address , pte_pfn (* pvmw .pte ));
1380
1383
if (should_defer_flush (mm , flags )) {
1381
1384
/*
1382
1385
* We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1386
1389
* transition on a cached TLB entry is written through
1387
1390
* and traps if the PTE is unmapped.
1388
1391
*/
1389
- pteval = ptep_get_and_clear (mm , address , pvmw .pte );
1392
+ pteval = ptep_get_and_clear (mm , pvmw .address ,
1393
+ pvmw .pte );
1390
1394
1391
1395
set_tlb_ubc_flush_pending (mm , pte_dirty (pteval ));
1392
1396
} else {
1393
- pteval = ptep_clear_flush (vma , address , pvmw .pte );
1397
+ pteval = ptep_clear_flush (vma , pvmw . address , pvmw .pte );
1394
1398
}
1395
1399
1396
1400
/* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1405
1409
if (PageHuge (page )) {
1406
1410
int nr = 1 << compound_order (page );
1407
1411
hugetlb_count_sub (nr , mm );
1408
- set_huge_swap_pte_at (mm , address ,
1412
+ set_huge_swap_pte_at (mm , pvmw . address ,
1409
1413
pvmw .pte , pteval ,
1410
1414
vma_mmu_pagesize (vma ));
1411
1415
} else {
1412
1416
dec_mm_counter (mm , mm_counter (page ));
1413
- set_pte_at (mm , address , pvmw .pte , pteval );
1417
+ set_pte_at (mm , pvmw . address , pvmw .pte , pteval );
1414
1418
}
1415
1419
1416
1420
} else if (pte_unused (pteval )) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1434
1438
swp_pte = swp_entry_to_pte (entry );
1435
1439
if (pte_soft_dirty (pteval ))
1436
1440
swp_pte = pte_swp_mksoft_dirty (swp_pte );
1437
- set_pte_at (mm , address , pvmw .pte , swp_pte );
1441
+ set_pte_at (mm , pvmw . address , pvmw .pte , swp_pte );
1438
1442
} else if (PageAnon (page )) {
1439
1443
swp_entry_t entry = { .val = page_private (subpage ) };
1440
1444
pte_t swp_pte ;
@@ -1460,15 +1464,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1460
1464
* If the page was redirtied, it cannot be
1461
1465
* discarded. Remap the page to page table.
1462
1466
*/
1463
- set_pte_at (mm , address , pvmw .pte , pteval );
1467
+ set_pte_at (mm , pvmw . address , pvmw .pte , pteval );
1464
1468
SetPageSwapBacked (page );
1465
1469
ret = false;
1466
1470
page_vma_mapped_walk_done (& pvmw );
1467
1471
break ;
1468
1472
}
1469
1473
1470
1474
if (swap_duplicate (entry ) < 0 ) {
1471
- set_pte_at (mm , address , pvmw .pte , pteval );
1475
+ set_pte_at (mm , pvmw . address , pvmw .pte , pteval );
1472
1476
ret = false;
1473
1477
page_vma_mapped_walk_done (& pvmw );
1474
1478
break ;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1484
1488
swp_pte = swp_entry_to_pte (entry );
1485
1489
if (pte_soft_dirty (pteval ))
1486
1490
swp_pte = pte_swp_mksoft_dirty (swp_pte );
1487
- set_pte_at (mm , address , pvmw .pte , swp_pte );
1491
+ set_pte_at (mm , pvmw . address , pvmw .pte , swp_pte );
1488
1492
} else
1489
1493
dec_mm_counter (mm , mm_counter_file (page ));
1490
1494
discard :
1491
1495
page_remove_rmap (subpage , PageHuge (page ));
1492
1496
put_page (page );
1493
- mmu_notifier_invalidate_page ( mm , address ) ;
1497
+ invalidation_needed = true ;
1494
1498
}
1499
+
1500
+ if (invalidation_needed )
1501
+ mmu_notifier_invalidate_range (mm , address ,
1502
+ address + (1UL << compound_order (page )));
1495
1503
return ret ;
1496
1504
}
1497
1505
0 commit comments