@@ -1307,6 +1307,47 @@ static void unmap_page_range(struct mmu_gather *tlb,
1307
1307
mem_cgroup_uncharge_end ();
1308
1308
}
1309
1309
1310
+
1311
+ static void unmap_single_vma (struct mmu_gather * tlb ,
1312
+ struct vm_area_struct * vma , unsigned long start_addr ,
1313
+ unsigned long end_addr , unsigned long * nr_accounted ,
1314
+ struct zap_details * details )
1315
+ {
1316
+ unsigned long start = max (vma -> vm_start , start_addr );
1317
+ unsigned long end ;
1318
+
1319
+ if (start >= vma -> vm_end )
1320
+ return ;
1321
+ end = min (vma -> vm_end , end_addr );
1322
+ if (end <= vma -> vm_start )
1323
+ return ;
1324
+
1325
+ if (vma -> vm_flags & VM_ACCOUNT )
1326
+ * nr_accounted += (end - start ) >> PAGE_SHIFT ;
1327
+
1328
+ if (unlikely (is_pfn_mapping (vma )))
1329
+ untrack_pfn_vma (vma , 0 , 0 );
1330
+
1331
+ if (start != end ) {
1332
+ if (unlikely (is_vm_hugetlb_page (vma ))) {
1333
+ /*
1334
+ * It is undesirable to test vma->vm_file as it
1335
+ * should be non-null for valid hugetlb area.
1336
+ * However, vm_file will be NULL in the error
1337
+ * cleanup path of do_mmap_pgoff. When
1338
+ * hugetlbfs ->mmap method fails,
1339
+ * do_mmap_pgoff() nullifies vma->vm_file
1340
+ * before calling this function to clean up.
1341
+ * Since no pte has actually been setup, it is
1342
+ * safe to do nothing in this case.
1343
+ */
1344
+ if (vma -> vm_file )
1345
+ unmap_hugepage_range (vma , start , end , NULL );
1346
+ } else
1347
+ unmap_page_range (tlb , vma , start , end , details );
1348
+ }
1349
+ }
1350
+
1310
1351
/**
1311
1352
* unmap_vmas - unmap a range of memory covered by a list of vma's
1312
1353
* @tlb: address of the caller's struct mmu_gather
@@ -1332,46 +1373,12 @@ void unmap_vmas(struct mmu_gather *tlb,
1332
1373
unsigned long end_addr , unsigned long * nr_accounted ,
1333
1374
struct zap_details * details )
1334
1375
{
1335
- unsigned long start = start_addr ;
1336
1376
struct mm_struct * mm = vma -> vm_mm ;
1337
1377
1338
1378
mmu_notifier_invalidate_range_start (mm , start_addr , end_addr );
1339
- for ( ; vma && vma -> vm_start < end_addr ; vma = vma -> vm_next ) {
1340
- unsigned long end ;
1341
-
1342
- start = max (vma -> vm_start , start_addr );
1343
- if (start >= vma -> vm_end )
1344
- continue ;
1345
- end = min (vma -> vm_end , end_addr );
1346
- if (end <= vma -> vm_start )
1347
- continue ;
1348
-
1349
- if (vma -> vm_flags & VM_ACCOUNT )
1350
- * nr_accounted += (end - start ) >> PAGE_SHIFT ;
1351
-
1352
- if (unlikely (is_pfn_mapping (vma )))
1353
- untrack_pfn_vma (vma , 0 , 0 );
1354
-
1355
- if (start != end ) {
1356
- if (unlikely (is_vm_hugetlb_page (vma ))) {
1357
- /*
1358
- * It is undesirable to test vma->vm_file as it
1359
- * should be non-null for valid hugetlb area.
1360
- * However, vm_file will be NULL in the error
1361
- * cleanup path of do_mmap_pgoff. When
1362
- * hugetlbfs ->mmap method fails,
1363
- * do_mmap_pgoff() nullifies vma->vm_file
1364
- * before calling this function to clean up.
1365
- * Since no pte has actually been setup, it is
1366
- * safe to do nothing in this case.
1367
- */
1368
- if (vma -> vm_file )
1369
- unmap_hugepage_range (vma , start , end , NULL );
1370
- } else
1371
- unmap_page_range (tlb , vma , start , end , details );
1372
- }
1373
- }
1374
-
1379
+ for ( ; vma && vma -> vm_start < end_addr ; vma = vma -> vm_next )
1380
+ unmap_single_vma (tlb , vma , start_addr , end_addr , nr_accounted ,
1381
+ details );
1375
1382
mmu_notifier_invalidate_range_end (mm , start_addr , end_addr );
1376
1383
}
1377
1384
@@ -1381,6 +1388,8 @@ void unmap_vmas(struct mmu_gather *tlb,
1381
1388
* @address: starting address of pages to zap
1382
1389
* @size: number of bytes to zap
1383
1390
* @details: details of nonlinear truncation or shared cache invalidation
1391
+ *
1392
+ * Caller must protect the VMA list
1384
1393
*/
1385
1394
void zap_page_range (struct vm_area_struct * vma , unsigned long address ,
1386
1395
unsigned long size , struct zap_details * details )
@@ -1397,6 +1406,32 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1397
1406
tlb_finish_mmu (& tlb , address , end );
1398
1407
}
1399
1408
1409
+ /**
1410
+ * zap_page_range_single - remove user pages in a given range
1411
+ * @vma: vm_area_struct holding the applicable pages
1412
+ * @address: starting address of pages to zap
1413
+ * @size: number of bytes to zap
1414
+ * @details: details of nonlinear truncation or shared cache invalidation
1415
+ *
1416
+ * The range must fit into one VMA.
1417
+ */
1418
+ static void zap_page_range_single (struct vm_area_struct * vma , unsigned long address ,
1419
+ unsigned long size , struct zap_details * details )
1420
+ {
1421
+ struct mm_struct * mm = vma -> vm_mm ;
1422
+ struct mmu_gather tlb ;
1423
+ unsigned long end = address + size ;
1424
+ unsigned long nr_accounted = 0 ;
1425
+
1426
+ lru_add_drain ();
1427
+ tlb_gather_mmu (& tlb , mm , 0 );
1428
+ update_hiwater_rss (mm );
1429
+ mmu_notifier_invalidate_range_start (mm , address , end );
1430
+ unmap_single_vma (& tlb , vma , address , end , & nr_accounted , details );
1431
+ mmu_notifier_invalidate_range_end (mm , address , end );
1432
+ tlb_finish_mmu (& tlb , address , end );
1433
+ }
1434
+
1400
1435
/**
1401
1436
* zap_vma_ptes - remove ptes mapping the vma
1402
1437
* @vma: vm_area_struct holding ptes to be zapped
@@ -1415,7 +1450,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1415
1450
if (address < vma -> vm_start || address + size > vma -> vm_end ||
1416
1451
!(vma -> vm_flags & VM_PFNMAP ))
1417
1452
return -1 ;
1418
- zap_page_range (vma , address , size , NULL );
1453
+ zap_page_range_single (vma , address , size , NULL );
1419
1454
return 0 ;
1420
1455
}
1421
1456
EXPORT_SYMBOL_GPL (zap_vma_ptes );
@@ -2762,7 +2797,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2762
2797
unsigned long start_addr , unsigned long end_addr ,
2763
2798
struct zap_details * details )
2764
2799
{
2765
- zap_page_range (vma , start_addr , end_addr - start_addr , details );
2800
+ zap_page_range_single (vma , start_addr , end_addr - start_addr , details );
2766
2801
}
2767
2802
2768
2803
static inline void unmap_mapping_range_tree (struct prio_tree_root * root ,
0 commit comments