Skip to content

Commit b43a999

Browse files
mjkravetztorvalds
authored andcommitted
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
While looking at BUGs associated with invalid huge page map counts, it was discovered and observed that a huge pte pointer could become 'invalid' and point to another task's page table. Consider the following: A task takes a page fault on a shared hugetlbfs file and calls huge_pte_alloc to get a ptep. Suppose the returned ptep points to a shared pmd. Now, another task truncates the hugetlbfs file. As part of truncation, it unmaps everyone who has the file mapped. If the range being truncated is covered by a shared pmd, huge_pmd_unshare will be called. For all but the last user of the shared pmd, huge_pmd_unshare will clear the pud pointing to the pmd. If the task in the middle of the page fault is not the last user, the ptep returned by huge_pte_alloc now points to another task's page table or worse. This leads to bad things such as incorrect page map/reference counts or invalid memory references. To fix, expand the use of i_mmap_rwsem as follows: - i_mmap_rwsem is held in read mode whenever huge_pmd_share is called. huge_pmd_share is only called via huge_pte_alloc, so callers of huge_pte_alloc take i_mmap_rwsem before calling. In addition, callers of huge_pte_alloc continue to hold the semaphore until finished with the ptep. - i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is called. [mike.kravetz@oracle.com: add explicit check for mapping != null] Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com Fixes: 39dde65 ("shared page table for hugetlb page") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Prakash Sangappa <prakash.sangappa@oracle.com> Cc: Colin Ian King <colin.king@canonical.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1ecc07f commit b43a999

File tree

5 files changed

+88
-20
lines changed

5 files changed

+88
-20
lines changed

mm/hugetlb.c

Lines changed: 49 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3238,6 +3238,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
32383238
struct page *ptepage;
32393239
unsigned long addr;
32403240
int cow;
3241+
struct address_space *mapping = vma->vm_file->f_mapping;
32413242
struct hstate *h = hstate_vma(vma);
32423243
unsigned long sz = huge_page_size(h);
32433244
struct mmu_notifier_range range;
@@ -3249,13 +3250,23 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
32493250
mmu_notifier_range_init(&range, src, vma->vm_start,
32503251
vma->vm_end);
32513252
mmu_notifier_invalidate_range_start(&range);
3253+
} else {
3254+
/*
3255+
* For shared mappings i_mmap_rwsem must be held to call
3256+
* huge_pte_alloc, otherwise the returned ptep could go
3257+
* away if part of a shared pmd and another thread calls
3258+
* huge_pmd_unshare.
3259+
*/
3260+
i_mmap_lock_read(mapping);
32523261
}
32533262

32543263
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
32553264
spinlock_t *src_ptl, *dst_ptl;
3265+
32563266
src_pte = huge_pte_offset(src, addr, sz);
32573267
if (!src_pte)
32583268
continue;
3269+
32593270
dst_pte = huge_pte_alloc(dst, addr, sz);
32603271
if (!dst_pte) {
32613272
ret = -ENOMEM;
@@ -3326,6 +3337,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
33263337

33273338
if (cow)
33283339
mmu_notifier_invalidate_range_end(&range);
3340+
else
3341+
i_mmap_unlock_read(mapping);
33293342

33303343
return ret;
33313344
}
@@ -3771,14 +3784,18 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
37713784
};
37723785

37733786
/*
3774-
* hugetlb_fault_mutex must be dropped before
3775-
* handling userfault. Reacquire after handling
3776-
* fault to make calling code simpler.
3787+
* hugetlb_fault_mutex and i_mmap_rwsem must be
3788+
* dropped before handling userfault. Reacquire
3789+
* after handling fault to make calling code simpler.
37773790
*/
37783791
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
37793792
idx, haddr);
37803793
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794+
i_mmap_unlock_read(mapping);
3795+
37813796
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3797+
3798+
i_mmap_lock_read(mapping);
37823799
mutex_lock(&hugetlb_fault_mutex_table[hash]);
37833800
goto out;
37843801
}
@@ -3926,27 +3943,43 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
39263943

39273944
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
39283945
if (ptep) {
3946+
/*
3947+
* Since we hold no locks, ptep could be stale. That is
3948+
* OK as we are only making decisions based on content and
3949+
* not actually modifying content here.
3950+
*/
39293951
entry = huge_ptep_get(ptep);
39303952
if (unlikely(is_hugetlb_entry_migration(entry))) {
39313953
migration_entry_wait_huge(vma, mm, ptep);
39323954
return 0;
39333955
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
39343956
return VM_FAULT_HWPOISON_LARGE |
39353957
VM_FAULT_SET_HINDEX(hstate_index(h));
3936-
} else {
3937-
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3938-
if (!ptep)
3939-
return VM_FAULT_OOM;
39403958
}
39413959

3960+
/*
3961+
* Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
3962+
* until finished with ptep. This prevents huge_pmd_unshare from
3963+
* being called elsewhere and making the ptep no longer valid.
3964+
*
3965+
* ptep could have already be assigned via huge_pte_offset. That
3966+
* is OK, as huge_pte_alloc will return the same value unless
3967+
* something changed.
3968+
*/
39423969
mapping = vma->vm_file->f_mapping;
3943-
idx = vma_hugecache_offset(h, vma, haddr);
3970+
i_mmap_lock_read(mapping);
3971+
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3972+
if (!ptep) {
3973+
i_mmap_unlock_read(mapping);
3974+
return VM_FAULT_OOM;
3975+
}
39443976

39453977
/*
39463978
* Serialize hugepage allocation and instantiation, so that we don't
39473979
* get spurious allocation failures if two CPUs race to instantiate
39483980
* the same page in the page cache.
39493981
*/
3982+
idx = vma_hugecache_offset(h, vma, haddr);
39503983
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
39513984
mutex_lock(&hugetlb_fault_mutex_table[hash]);
39523985

@@ -4034,6 +4067,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
40344067
}
40354068
out_mutex:
40364069
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4070+
i_mmap_unlock_read(mapping);
40374071
/*
40384072
* Generally it's safe to hold refcount during waiting page lock. But
40394073
* here we just wait to defer the next page fault to avoid busy loop and
@@ -4638,10 +4672,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
46384672
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
46394673
* and returns the corresponding pte. While this is not necessary for the
46404674
* !shared pmd case because we can allocate the pmd later as well, it makes the
4641-
* code much cleaner. pmd allocation is essential for the shared case because
4642-
* pud has to be populated inside the same i_mmap_rwsem section - otherwise
4643-
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
4644-
* bad pmd for sharing.
4675+
* code much cleaner.
4676+
*
4677+
* This routine must be called with i_mmap_rwsem held in at least read mode.
4678+
* For hugetlbfs, this prevents removal of any page table entries associated
4679+
* with the address space. This is important as we are setting up sharing
4680+
* based on existing page table entries (mappings).
46454681
*/
46464682
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46474683
{
@@ -4658,7 +4694,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46584694
if (!vma_shareable(vma, addr))
46594695
return (pte_t *)pmd_alloc(mm, pud, addr);
46604696

4661-
i_mmap_lock_write(mapping);
46624697
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
46634698
if (svma == vma)
46644699
continue;
@@ -4688,7 +4723,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46884723
spin_unlock(ptl);
46894724
out:
46904725
pte = (pte_t *)pmd_alloc(mm, pud, addr);
4691-
i_mmap_unlock_write(mapping);
46924726
return pte;
46934727
}
46944728

@@ -4699,7 +4733,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46994733
* indicated by page_count > 1, unmap is achieved by clearing pud and
47004734
* decrementing the ref count. If count == 1, the pte page is not shared.
47014735
*
4702-
* called with page table lock held.
4736+
* Called with page table lock held and i_mmap_rwsem held in write mode.
47034737
*
47044738
* returns: 1 successfully unmapped a shared pte page
47054739
* 0 the underlying pte page is not shared, or it is the last user

mm/memory-failure.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966966
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
967967
struct address_space *mapping;
968968
LIST_HEAD(tokill);
969-
bool unmap_success;
969+
bool unmap_success = true;
970970
int kill = 1, forcekill;
971971
struct page *hpage = *hpagep;
972972
bool mlocked = PageMlocked(hpage);
@@ -1028,7 +1028,19 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
10281028
if (kill)
10291029
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
10301030

1031-
unmap_success = try_to_unmap(hpage, ttu);
1031+
if (!PageHuge(hpage)) {
1032+
unmap_success = try_to_unmap(hpage, ttu);
1033+
} else if (mapping) {
1034+
/*
1035+
* For hugetlb pages, try_to_unmap could potentially call
1036+
* huge_pmd_unshare. Because of this, take semaphore in
1037+
* write mode here and set TTU_RMAP_LOCKED to indicate we
1038+
* have taken the lock at this higer level.
1039+
*/
1040+
i_mmap_lock_write(mapping);
1041+
unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1042+
i_mmap_unlock_write(mapping);
1043+
}
10321044
if (!unmap_success)
10331045
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
10341046
pfn, page_mapcount(hpage));

mm/migrate.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1324,8 +1324,19 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
13241324
goto put_anon;
13251325

13261326
if (page_mapped(hpage)) {
1327+
struct address_space *mapping = page_mapping(hpage);
1328+
1329+
/*
1330+
* try_to_unmap could potentially call huge_pmd_unshare.
1331+
* Because of this, take semaphore in write mode here and
1332+
* set TTU_RMAP_LOCKED to let lower levels know we have
1333+
* taken the lock.
1334+
*/
1335+
i_mmap_lock_write(mapping);
13271336
try_to_unmap(hpage,
1328-
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1337+
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
1338+
TTU_RMAP_LOCKED);
1339+
i_mmap_unlock_write(mapping);
13291340
page_was_mapped = 1;
13301341
}
13311342

mm/rmap.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
* page->flags PG_locked (lock_page)
2626
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
2727
* mapping->i_mmap_rwsem
28+
* hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
2829
* anon_vma->rwsem
2930
* mm->page_table_lock or pte_lock
3031
* zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1378,6 +1379,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
13781379
/*
13791380
* If sharing is possible, start and end will be adjusted
13801381
* accordingly.
1382+
*
1383+
* If called for a huge page, caller must hold i_mmap_rwsem
1384+
* in write mode as it is possible to call huge_pmd_unshare.
13811385
*/
13821386
adjust_range_if_pmd_sharing_possible(vma, &range.start,
13831387
&range.end);

mm/userfaultfd.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,10 +267,14 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
267267
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
268268

269269
/*
270-
* Serialize via hugetlb_fault_mutex
270+
* Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
271+
* i_mmap_rwsem ensures the dst_pte remains valid even
272+
* in the case of shared pmds. fault mutex prevents
273+
* races with other faulting threads.
271274
*/
272-
idx = linear_page_index(dst_vma, dst_addr);
273275
mapping = dst_vma->vm_file->f_mapping;
276+
i_mmap_lock_read(mapping);
277+
idx = linear_page_index(dst_vma, dst_addr);
274278
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
275279
idx, dst_addr);
276280
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -279,20 +283,23 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
279283
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
280284
if (!dst_pte) {
281285
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286+
i_mmap_unlock_read(mapping);
282287
goto out_unlock;
283288
}
284289

285290
err = -EEXIST;
286291
dst_pteval = huge_ptep_get(dst_pte);
287292
if (!huge_pte_none(dst_pteval)) {
288293
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294+
i_mmap_unlock_read(mapping);
289295
goto out_unlock;
290296
}
291297

292298
err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
293299
dst_addr, src_addr, &page);
294300

295301
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302+
i_mmap_unlock_read(mapping);
296303
vm_alloc_shared = vm_shared;
297304

298305
cond_resched();

0 commit comments

Comments
 (0)