Skip to content

Commit ddeaab3

Browse files
mjkravetztorvalds
authored andcommitted
hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization"
This reverts b43a999 The reverted commit caused issues with migration and poisoning of anon huge pages. The LTP move_pages12 test will cause an "unable to handle kernel NULL pointer" BUG would occur with stack similar to: RIP: 0010:down_write+0x1b/0x40 Call Trace: migrate_pages+0x81f/0xb90 __ia32_compat_sys_migrate_pages+0x190/0x190 do_move_pages_to_node.isra.53.part.54+0x2a/0x50 kernel_move_pages+0x566/0x7b0 __x64_sys_move_pages+0x24/0x30 do_syscall_64+0x5b/0x180 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The purpose of the reverted patch was to fix some long existing races with huge pmd sharing. It used i_mmap_rwsem for this purpose with the idea that this could also be used to address truncate/page fault races with another patch. Further analysis has determined that i_mmap_rwsem can not be used to address all these hugetlbfs synchronization issues. Therefore, revert this patch while working an another approach to the underlying issues. Link: http://lkml.kernel.org/r/20190103235452.29335-2-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reported-by: Jan Stancek <jstancek@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Prakash Sangappa <prakash.sangappa@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent e7c5809 commit ddeaab3

File tree

5 files changed

+20
-88
lines changed

5 files changed

+20
-88
lines changed

mm/hugetlb.c

Lines changed: 15 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -3238,7 +3238,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
32383238
struct page *ptepage;
32393239
unsigned long addr;
32403240
int cow;
3241-
struct address_space *mapping = vma->vm_file->f_mapping;
32423241
struct hstate *h = hstate_vma(vma);
32433242
unsigned long sz = huge_page_size(h);
32443243
struct mmu_notifier_range range;
@@ -3250,23 +3249,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
32503249
mmu_notifier_range_init(&range, src, vma->vm_start,
32513250
vma->vm_end);
32523251
mmu_notifier_invalidate_range_start(&range);
3253-
} else {
3254-
/*
3255-
* For shared mappings i_mmap_rwsem must be held to call
3256-
* huge_pte_alloc, otherwise the returned ptep could go
3257-
* away if part of a shared pmd and another thread calls
3258-
* huge_pmd_unshare.
3259-
*/
3260-
i_mmap_lock_read(mapping);
32613252
}
32623253

32633254
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
32643255
spinlock_t *src_ptl, *dst_ptl;
3265-
32663256
src_pte = huge_pte_offset(src, addr, sz);
32673257
if (!src_pte)
32683258
continue;
3269-
32703259
dst_pte = huge_pte_alloc(dst, addr, sz);
32713260
if (!dst_pte) {
32723261
ret = -ENOMEM;
@@ -3337,8 +3326,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
33373326

33383327
if (cow)
33393328
mmu_notifier_invalidate_range_end(&range);
3340-
else
3341-
i_mmap_unlock_read(mapping);
33423329

33433330
return ret;
33443331
}
@@ -3784,18 +3771,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
37843771
};
37853772

37863773
/*
3787-
* hugetlb_fault_mutex and i_mmap_rwsem must be
3788-
* dropped before handling userfault. Reacquire
3789-
* after handling fault to make calling code simpler.
3774+
* hugetlb_fault_mutex must be dropped before
3775+
* handling userfault. Reacquire after handling
3776+
* fault to make calling code simpler.
37903777
*/
37913778
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
37923779
idx, haddr);
37933780
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794-
i_mmap_unlock_read(mapping);
3795-
37963781
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3797-
3798-
i_mmap_lock_read(mapping);
37993782
mutex_lock(&hugetlb_fault_mutex_table[hash]);
38003783
goto out;
38013784
}
@@ -3943,43 +3926,27 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
39433926

39443927
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
39453928
if (ptep) {
3946-
/*
3947-
* Since we hold no locks, ptep could be stale. That is
3948-
* OK as we are only making decisions based on content and
3949-
* not actually modifying content here.
3950-
*/
39513929
entry = huge_ptep_get(ptep);
39523930
if (unlikely(is_hugetlb_entry_migration(entry))) {
39533931
migration_entry_wait_huge(vma, mm, ptep);
39543932
return 0;
39553933
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
39563934
return VM_FAULT_HWPOISON_LARGE |
39573935
VM_FAULT_SET_HINDEX(hstate_index(h));
3936+
} else {
3937+
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3938+
if (!ptep)
3939+
return VM_FAULT_OOM;
39583940
}
39593941

3960-
/*
3961-
* Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
3962-
* until finished with ptep. This prevents huge_pmd_unshare from
3963-
* being called elsewhere and making the ptep no longer valid.
3964-
*
3965-
* ptep could have already be assigned via huge_pte_offset. That
3966-
* is OK, as huge_pte_alloc will return the same value unless
3967-
* something changed.
3968-
*/
39693942
mapping = vma->vm_file->f_mapping;
3970-
i_mmap_lock_read(mapping);
3971-
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3972-
if (!ptep) {
3973-
i_mmap_unlock_read(mapping);
3974-
return VM_FAULT_OOM;
3975-
}
3943+
idx = vma_hugecache_offset(h, vma, haddr);
39763944

39773945
/*
39783946
* Serialize hugepage allocation and instantiation, so that we don't
39793947
* get spurious allocation failures if two CPUs race to instantiate
39803948
* the same page in the page cache.
39813949
*/
3982-
idx = vma_hugecache_offset(h, vma, haddr);
39833950
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
39843951
mutex_lock(&hugetlb_fault_mutex_table[hash]);
39853952

@@ -4067,7 +4034,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
40674034
}
40684035
out_mutex:
40694036
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4070-
i_mmap_unlock_read(mapping);
40714037
/*
40724038
* Generally it's safe to hold refcount during waiting page lock. But
40734039
* here we just wait to defer the next page fault to avoid busy loop and
@@ -4672,12 +4638,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
46724638
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
46734639
* and returns the corresponding pte. While this is not necessary for the
46744640
* !shared pmd case because we can allocate the pmd later as well, it makes the
4675-
* code much cleaner.
4676-
*
4677-
* This routine must be called with i_mmap_rwsem held in at least read mode.
4678-
* For hugetlbfs, this prevents removal of any page table entries associated
4679-
* with the address space. This is important as we are setting up sharing
4680-
* based on existing page table entries (mappings).
4641+
* code much cleaner. pmd allocation is essential for the shared case because
4642+
* pud has to be populated inside the same i_mmap_rwsem section - otherwise
4643+
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
4644+
* bad pmd for sharing.
46814645
*/
46824646
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46834647
{
@@ -4694,6 +4658,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46944658
if (!vma_shareable(vma, addr))
46954659
return (pte_t *)pmd_alloc(mm, pud, addr);
46964660

4661+
i_mmap_lock_write(mapping);
46974662
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
46984663
if (svma == vma)
46994664
continue;
@@ -4723,6 +4688,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
47234688
spin_unlock(ptl);
47244689
out:
47254690
pte = (pte_t *)pmd_alloc(mm, pud, addr);
4691+
i_mmap_unlock_write(mapping);
47264692
return pte;
47274693
}
47284694

@@ -4733,7 +4699,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
47334699
* indicated by page_count > 1, unmap is achieved by clearing pud and
47344700
* decrementing the ref count. If count == 1, the pte page is not shared.
47354701
*
4736-
* Called with page table lock held and i_mmap_rwsem held in write mode.
4702+
* called with page table lock held.
47374703
*
47384704
* returns: 1 successfully unmapped a shared pte page
47394705
* 0 the underlying pte page is not shared, or it is the last user

mm/memory-failure.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966966
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
967967
struct address_space *mapping;
968968
LIST_HEAD(tokill);
969-
bool unmap_success = true;
969+
bool unmap_success;
970970
int kill = 1, forcekill;
971971
struct page *hpage = *hpagep;
972972
bool mlocked = PageMlocked(hpage);
@@ -1028,19 +1028,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
10281028
if (kill)
10291029
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
10301030

1031-
if (!PageHuge(hpage)) {
1032-
unmap_success = try_to_unmap(hpage, ttu);
1033-
} else if (mapping) {
1034-
/*
1035-
* For hugetlb pages, try_to_unmap could potentially call
1036-
* huge_pmd_unshare. Because of this, take semaphore in
1037-
* write mode here and set TTU_RMAP_LOCKED to indicate we
1038-
* have taken the lock at this higer level.
1039-
*/
1040-
i_mmap_lock_write(mapping);
1041-
unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1042-
i_mmap_unlock_write(mapping);
1043-
}
1031+
unmap_success = try_to_unmap(hpage, ttu);
10441032
if (!unmap_success)
10451033
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
10461034
pfn, page_mapcount(hpage));

mm/migrate.c

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1324,19 +1324,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
13241324
goto put_anon;
13251325

13261326
if (page_mapped(hpage)) {
1327-
struct address_space *mapping = page_mapping(hpage);
1328-
1329-
/*
1330-
* try_to_unmap could potentially call huge_pmd_unshare.
1331-
* Because of this, take semaphore in write mode here and
1332-
* set TTU_RMAP_LOCKED to let lower levels know we have
1333-
* taken the lock.
1334-
*/
1335-
i_mmap_lock_write(mapping);
13361327
try_to_unmap(hpage,
1337-
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
1338-
TTU_RMAP_LOCKED);
1339-
i_mmap_unlock_write(mapping);
1328+
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
13401329
page_was_mapped = 1;
13411330
}
13421331

mm/rmap.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
* page->flags PG_locked (lock_page)
2626
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
2727
* mapping->i_mmap_rwsem
28-
* hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
2928
* anon_vma->rwsem
3029
* mm->page_table_lock or pte_lock
3130
* zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1379,9 +1378,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
13791378
/*
13801379
* If sharing is possible, start and end will be adjusted
13811380
* accordingly.
1382-
*
1383-
* If called for a huge page, caller must hold i_mmap_rwsem
1384-
* in write mode as it is possible to call huge_pmd_unshare.
13851381
*/
13861382
adjust_range_if_pmd_sharing_possible(vma, &range.start,
13871383
&range.end);

mm/userfaultfd.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -267,14 +267,10 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
267267
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
268268

269269
/*
270-
* Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
271-
* i_mmap_rwsem ensures the dst_pte remains valid even
272-
* in the case of shared pmds. fault mutex prevents
273-
* races with other faulting threads.
270+
* Serialize via hugetlb_fault_mutex
274271
*/
275-
mapping = dst_vma->vm_file->f_mapping;
276-
i_mmap_lock_read(mapping);
277272
idx = linear_page_index(dst_vma, dst_addr);
273+
mapping = dst_vma->vm_file->f_mapping;
278274
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
279275
idx, dst_addr);
280276
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -283,23 +279,20 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
283279
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
284280
if (!dst_pte) {
285281
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286-
i_mmap_unlock_read(mapping);
287282
goto out_unlock;
288283
}
289284

290285
err = -EEXIST;
291286
dst_pteval = huge_ptep_get(dst_pte);
292287
if (!huge_pte_none(dst_pteval)) {
293288
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294-
i_mmap_unlock_read(mapping);
295289
goto out_unlock;
296290
}
297291

298292
err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
299293
dst_addr, src_addr, &page);
300294

301295
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302-
i_mmap_unlock_read(mapping);
303296
vm_alloc_shared = vm_shared;
304297

305298
cond_resched();

0 commit comments

Comments
 (0)