Skip to content

Commit a888f1f

Browse files
Hugh DickinsLinus Torvalds
authored andcommitted
[PATCH] mm: vma_adjust insert file earlier
For those arches (arm and parisc) which use the i_mmap tree to implement flush_dcache_page, during split_vma there's a small window in vma_adjust when flush_dcache_mmap_lock is dropped, and pages in the split-off part of the vma might for an instant be invisible to __flush_dcache_page. Though we're more solid there than ever before, I guess it's a bad idea to leave that window: so (with regret, it was structurally nicer before) take __vma_link_file (and vma_prio_tree_init) out of __vma_link. vma_prio_tree_init (which NULLs a few fields) is actually only needed when copying a vma, not when a new one has just been memset to 0. __insert_vm_struct is used by nothing but vma_adjust's split_vma case: co�mment it accordingly, remove its mark_mm_hugetlb (it can never create a new kind of vma) and its validate_mm (another follows immediately). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
1 parent e495dd3 commit a888f1f

File tree

1 file changed

+17
-7
lines changed

1 file changed

+17
-7
lines changed

mm/mmap.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -293,10 +293,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
293293
struct vm_area_struct *prev, struct rb_node **rb_link,
294294
struct rb_node *rb_parent)
295295
{
296-
vma_prio_tree_init(vma);
297296
__vma_link_list(mm, vma, prev, rb_parent);
298297
__vma_link_rb(mm, vma, rb_link, rb_parent);
299-
__vma_link_file(vma);
300298
__anon_vma_link(vma);
301299
}
302300

@@ -312,7 +310,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
312310
if (mapping)
313311
spin_lock(&mapping->i_mmap_lock);
314312
anon_vma_lock(vma);
313+
315314
__vma_link(mm, vma, prev, rb_link, rb_parent);
315+
__vma_link_file(vma);
316+
316317
anon_vma_unlock(vma);
317318
if (mapping)
318319
spin_unlock(&mapping->i_mmap_lock);
@@ -323,9 +324,9 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
323324
}
324325

325326
/*
326-
* Insert vm structure into process list sorted by address and into the
327-
* inode's i_mmap tree. The caller should hold mm->mmap_sem and
328-
* ->f_mappping->i_mmap_lock if vm_file is non-NULL.
327+
* Helper for vma_adjust in the split_vma insert case:
328+
* insert vm structure into list and rbtree and anon_vma,
329+
* but it has already been inserted into prio_tree earlier.
329330
*/
330331
static void
331332
__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
@@ -337,9 +338,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
337338
if (__vma && __vma->vm_start < vma->vm_end)
338339
BUG();
339340
__vma_link(mm, vma, prev, rb_link, rb_parent);
340-
mark_mm_hugetlb(mm, vma);
341341
mm->map_count++;
342-
validate_mm(mm);
343342
}
344343

345344
static inline void
@@ -403,6 +402,15 @@ again: remove_next = 1 + (end > next->vm_end);
403402
if (!(vma->vm_flags & VM_NONLINEAR))
404403
root = &mapping->i_mmap;
405404
spin_lock(&mapping->i_mmap_lock);
405+
if (insert) {
406+
/*
407+
* Put into prio_tree now, so instantiated pages
408+
* are visible to arm/parisc __flush_dcache_page
409+
* throughout; but we cannot insert into address
410+
* space until vma start or end is updated.
411+
*/
412+
__vma_link_file(insert);
413+
}
406414
}
407415

408416
/*
@@ -1463,6 +1471,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
14631471

14641472
/* most fields are the same, copy all, and then fixup */
14651473
*new = *vma;
1474+
vma_prio_tree_init(new);
14661475

14671476
if (new_below)
14681477
new->vm_end = addr;
@@ -1775,6 +1784,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
17751784
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
17761785
if (new_vma) {
17771786
*new_vma = *vma;
1787+
vma_prio_tree_init(new_vma);
17781788
pol = mpol_copy(vma_policy(vma));
17791789
if (IS_ERR(pol)) {
17801790
kmem_cache_free(vm_area_cachep, new_vma);

0 commit comments

Comments
 (0)