Skip to content

Commit de466bd

Browse files
Mel Gormantorvalds
authored andcommitted
mm: numa: avoid unnecessary disruption of NUMA hinting during migration
do_huge_pmd_numa_page() handles the case where there is parallel THP migration. However, by the time it is checked the NUMA hinting information has already been disrupted. This patch adds an earlier check with some helpers. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Alex Thorlton <athorlton@sgi.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1667918 commit de466bd

File tree

3 files changed

+37
-6
lines changed

3 files changed

+37
-6
lines changed

include/linux/migrate.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,10 +90,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
9090
#endif /* CONFIG_MIGRATION */
9191

9292
#ifdef CONFIG_NUMA_BALANCING
93+
extern bool pmd_trans_migrating(pmd_t pmd);
94+
extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
9395
extern int migrate_misplaced_page(struct page *page,
9496
struct vm_area_struct *vma, int node);
9597
extern bool migrate_ratelimited(int node);
9698
#else
99+
static inline bool pmd_trans_migrating(pmd_t pmd)
100+
{
101+
return false;
102+
}
103+
static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
104+
{
105+
}
97106
static inline int migrate_misplaced_page(struct page *page,
98107
struct vm_area_struct *vma, int node)
99108
{

mm/huge_memory.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
882882
ret = 0;
883883
goto out_unlock;
884884
}
885+
886+
/* mmap_sem prevents this happening but warn if that changes */
887+
WARN_ON(pmd_trans_migrating(pmd));
888+
885889
if (unlikely(pmd_trans_splitting(pmd))) {
886890
/* split huge page running from under us */
887891
spin_unlock(src_ptl);
@@ -1299,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
12991303
if (unlikely(!pmd_same(pmd, *pmdp)))
13001304
goto out_unlock;
13011305

1306+
/*
1307+
* If there are potential migrations, wait for completion and retry
1308+
* without disrupting NUMA hinting information. Do not relock and
1309+
* check_same as the page may no longer be mapped.
1310+
*/
1311+
if (unlikely(pmd_trans_migrating(*pmdp))) {
1312+
spin_unlock(ptl);
1313+
wait_migrate_huge_page(vma->anon_vma, pmdp);
1314+
goto out;
1315+
}
1316+
13021317
page = pmd_page(pmd);
13031318
BUG_ON(is_huge_zero_page(page));
13041319
page_nid = page_to_nid(page);
@@ -1329,12 +1344,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
13291344
goto clear_pmdnuma;
13301345
}
13311346

1332-
/*
1333-
* If there are potential migrations, wait for completion and retry. We
1334-
* do not relock and check_same as the page may no longer be mapped.
1335-
* Furtermore, even if the page is currently misplaced, there is no
1336-
* guarantee it is still misplaced after the migration completes.
1337-
*/
1347+
/* Migration could have started since the pmd_trans_migrating check */
13381348
if (!page_locked) {
13391349
spin_unlock(ptl);
13401350
wait_on_page_locked(page);

mm/migrate.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1655,6 +1655,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
16551655
return 1;
16561656
}
16571657

1658+
bool pmd_trans_migrating(pmd_t pmd)
1659+
{
1660+
struct page *page = pmd_page(pmd);
1661+
return PageLocked(page);
1662+
}
1663+
1664+
void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
1665+
{
1666+
struct page *page = pmd_page(*pmd);
1667+
wait_on_page_locked(page);
1668+
}
1669+
16581670
/*
16591671
* Attempt to migrate a misplaced page to the specified destination
16601672
* node. Caller is expected to have an elevated reference count on

0 commit comments

Comments
 (0)