Skip to content

Commit f5cc4ee

Browse files
author
Al Viro
committed
VM: make zap_page_range() callers that act on a single VMA use separate helper
... and not rely on ->vm_next being there for them... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 6e8bb01 commit f5cc4ee

File tree

1 file changed

+74
-39
lines changed

1 file changed

+74
-39
lines changed

mm/memory.c

Lines changed: 74 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1307,6 +1307,47 @@ static void unmap_page_range(struct mmu_gather *tlb,
13071307
mem_cgroup_uncharge_end();
13081308
}
13091309

1310+
1311+
static void unmap_single_vma(struct mmu_gather *tlb,
1312+
struct vm_area_struct *vma, unsigned long start_addr,
1313+
unsigned long end_addr, unsigned long *nr_accounted,
1314+
struct zap_details *details)
1315+
{
1316+
unsigned long start = max(vma->vm_start, start_addr);
1317+
unsigned long end;
1318+
1319+
if (start >= vma->vm_end)
1320+
return;
1321+
end = min(vma->vm_end, end_addr);
1322+
if (end <= vma->vm_start)
1323+
return;
1324+
1325+
if (vma->vm_flags & VM_ACCOUNT)
1326+
*nr_accounted += (end - start) >> PAGE_SHIFT;
1327+
1328+
if (unlikely(is_pfn_mapping(vma)))
1329+
untrack_pfn_vma(vma, 0, 0);
1330+
1331+
if (start != end) {
1332+
if (unlikely(is_vm_hugetlb_page(vma))) {
1333+
/*
1334+
* It is undesirable to test vma->vm_file as it
1335+
* should be non-null for valid hugetlb area.
1336+
* However, vm_file will be NULL in the error
1337+
* cleanup path of do_mmap_pgoff. When
1338+
* hugetlbfs ->mmap method fails,
1339+
* do_mmap_pgoff() nullifies vma->vm_file
1340+
* before calling this function to clean up.
1341+
* Since no pte has actually been setup, it is
1342+
* safe to do nothing in this case.
1343+
*/
1344+
if (vma->vm_file)
1345+
unmap_hugepage_range(vma, start, end, NULL);
1346+
} else
1347+
unmap_page_range(tlb, vma, start, end, details);
1348+
}
1349+
}
1350+
13101351
/**
13111352
* unmap_vmas - unmap a range of memory covered by a list of vma's
13121353
* @tlb: address of the caller's struct mmu_gather
@@ -1332,46 +1373,12 @@ void unmap_vmas(struct mmu_gather *tlb,
13321373
unsigned long end_addr, unsigned long *nr_accounted,
13331374
struct zap_details *details)
13341375
{
1335-
unsigned long start = start_addr;
13361376
struct mm_struct *mm = vma->vm_mm;
13371377

13381378
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1339-
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
1340-
unsigned long end;
1341-
1342-
start = max(vma->vm_start, start_addr);
1343-
if (start >= vma->vm_end)
1344-
continue;
1345-
end = min(vma->vm_end, end_addr);
1346-
if (end <= vma->vm_start)
1347-
continue;
1348-
1349-
if (vma->vm_flags & VM_ACCOUNT)
1350-
*nr_accounted += (end - start) >> PAGE_SHIFT;
1351-
1352-
if (unlikely(is_pfn_mapping(vma)))
1353-
untrack_pfn_vma(vma, 0, 0);
1354-
1355-
if (start != end) {
1356-
if (unlikely(is_vm_hugetlb_page(vma))) {
1357-
/*
1358-
* It is undesirable to test vma->vm_file as it
1359-
* should be non-null for valid hugetlb area.
1360-
* However, vm_file will be NULL in the error
1361-
* cleanup path of do_mmap_pgoff. When
1362-
* hugetlbfs ->mmap method fails,
1363-
* do_mmap_pgoff() nullifies vma->vm_file
1364-
* before calling this function to clean up.
1365-
* Since no pte has actually been setup, it is
1366-
* safe to do nothing in this case.
1367-
*/
1368-
if (vma->vm_file)
1369-
unmap_hugepage_range(vma, start, end, NULL);
1370-
} else
1371-
unmap_page_range(tlb, vma, start, end, details);
1372-
}
1373-
}
1374-
1379+
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1380+
unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
1381+
details);
13751382
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
13761383
}
13771384

@@ -1381,6 +1388,8 @@ void unmap_vmas(struct mmu_gather *tlb,
13811388
* @address: starting address of pages to zap
13821389
* @size: number of bytes to zap
13831390
* @details: details of nonlinear truncation or shared cache invalidation
1391+
*
1392+
* Caller must protect the VMA list
13841393
*/
13851394
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
13861395
unsigned long size, struct zap_details *details)
@@ -1397,6 +1406,32 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
13971406
tlb_finish_mmu(&tlb, address, end);
13981407
}
13991408

1409+
/**
1410+
* zap_page_range_single - remove user pages in a given range
1411+
* @vma: vm_area_struct holding the applicable pages
1412+
* @address: starting address of pages to zap
1413+
* @size: number of bytes to zap
1414+
* @details: details of nonlinear truncation or shared cache invalidation
1415+
*
1416+
* The range must fit into one VMA.
1417+
*/
1418+
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1419+
unsigned long size, struct zap_details *details)
1420+
{
1421+
struct mm_struct *mm = vma->vm_mm;
1422+
struct mmu_gather tlb;
1423+
unsigned long end = address + size;
1424+
unsigned long nr_accounted = 0;
1425+
1426+
lru_add_drain();
1427+
tlb_gather_mmu(&tlb, mm, 0);
1428+
update_hiwater_rss(mm);
1429+
mmu_notifier_invalidate_range_start(mm, address, end);
1430+
unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
1431+
mmu_notifier_invalidate_range_end(mm, address, end);
1432+
tlb_finish_mmu(&tlb, address, end);
1433+
}
1434+
14001435
/**
14011436
* zap_vma_ptes - remove ptes mapping the vma
14021437
* @vma: vm_area_struct holding ptes to be zapped
@@ -1415,7 +1450,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
14151450
if (address < vma->vm_start || address + size > vma->vm_end ||
14161451
!(vma->vm_flags & VM_PFNMAP))
14171452
return -1;
1418-
zap_page_range(vma, address, size, NULL);
1453+
zap_page_range_single(vma, address, size, NULL);
14191454
return 0;
14201455
}
14211456
EXPORT_SYMBOL_GPL(zap_vma_ptes);
@@ -2762,7 +2797,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
27622797
unsigned long start_addr, unsigned long end_addr,
27632798
struct zap_details *details)
27642799
{
2765-
zap_page_range(vma, start_addr, end_addr - start_addr, details);
2800+
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
27662801
}
27672802

27682803
static inline void unmap_mapping_range_tree(struct prio_tree_root *root,

0 commit comments

Comments
 (0)