Skip to content

Commit af0fb9d

Browse files
Michal Hockotorvalds
authored andcommitted
mm, hugetlb: unify core page allocation accounting and initialization
Patch series "mm, hugetlb: allocation API and migration improvements" Motivation: this is a follow up for [3] for the allocation API and [4] for the hugetlb migration. It wasn't really easy to split those into two separate patch series as they share some code. My primary motivation to touch this code is to make the gigantic pages migration working. The giga pages allocation code is just too fragile and hacked into the hugetlb code now. This series tries to move giga pages closer to the first class citizen. We are not there yet but having 5 patches is quite a lot already and it will already make the code much easier to follow. I will come with other changes on top after this sees some review. The first two patches should be trivial to review. The third patch changes the way how we migrate huge pages. Newly allocated pages are a subject of the overcommit check and they participate surplus accounting which is quite unfortunate as the changelog explains. This patch doesn't change anything wrt. giga pages. Patch #4 removes the surplus accounting hack from __alloc_surplus_huge_page. I hope I didn't miss anything there and a deeper review is really due there. Patch #5 finally unifies allocation paths and giga pages shouldn't be any special anymore. There is also some renaming going on as well. This patch (of 6): hugetlb allocator has two entry points to the page allocator - alloc_fresh_huge_page_node - __hugetlb_alloc_buddy_huge_page The two differ very subtly in two aspects. The first one doesn't care about HTLB_BUDDY_* stats and it doesn't initialize the huge page. prep_new_huge_page is not used because it not only initializes hugetlb specific stuff but because it also put_page and releases the page to the hugetlb pool which is not what is required in some contexts. This makes things more complicated than necessary. Simplify things by a) removing the page allocator entry point duplicity and only keep __hugetlb_alloc_buddy_huge_page and b) make prep_new_huge_page more reusable by removing the put_page which moves the page to the allocator pool. All current callers are updated to call put_page explicitly. Later patches will add new callers which won't need it. This patch shouldn't introduce any functional change. Link: http://lkml.kernel.org/r/20180103093213.26329-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrea Reale <ar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1ab5c05 commit af0fb9d

File tree

1 file changed

+29
-32
lines changed

1 file changed

+29
-32
lines changed

mm/hugetlb.c

Lines changed: 29 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1157,6 +1157,7 @@ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
11571157
if (page) {
11581158
prep_compound_gigantic_page(page, huge_page_order(h));
11591159
prep_new_huge_page(h, page, nid);
1160+
put_page(page); /* free it into the hugepage allocator */
11601161
}
11611162

11621163
return page;
@@ -1304,7 +1305,6 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
13041305
h->nr_huge_pages++;
13051306
h->nr_huge_pages_node[nid]++;
13061307
spin_unlock(&hugetlb_lock);
1307-
put_page(page); /* free it into the hugepage allocator */
13081308
}
13091309

13101310
static void prep_compound_gigantic_page(struct page *page, unsigned int order)
@@ -1381,41 +1381,49 @@ pgoff_t __basepage_index(struct page *page)
13811381
return (index << compound_order(page_head)) + compound_idx;
13821382
}
13831383

1384-
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1384+
static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1385+
gfp_t gfp_mask, int nid, nodemask_t *nmask)
13851386
{
1387+
int order = huge_page_order(h);
13861388
struct page *page;
13871389

1388-
page = __alloc_pages_node(nid,
1389-
htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1390-
__GFP_RETRY_MAYFAIL|__GFP_NOWARN,
1391-
huge_page_order(h));
1392-
if (page) {
1393-
prep_new_huge_page(h, page, nid);
1394-
}
1390+
gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1391+
if (nid == NUMA_NO_NODE)
1392+
nid = numa_mem_id();
1393+
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1394+
if (page)
1395+
__count_vm_event(HTLB_BUDDY_PGALLOC);
1396+
else
1397+
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
13951398

13961399
return page;
13971400
}
13981401

1402+
/*
1403+
* Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1404+
* manner.
1405+
*/
13991406
static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
14001407
{
14011408
struct page *page;
14021409
int nr_nodes, node;
1403-
int ret = 0;
1410+
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
14041411

14051412
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1406-
page = alloc_fresh_huge_page_node(h, node);
1407-
if (page) {
1408-
ret = 1;
1413+
page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
1414+
node, nodes_allowed);
1415+
if (page)
14091416
break;
1410-
}
1417+
14111418
}
14121419

1413-
if (ret)
1414-
count_vm_event(HTLB_BUDDY_PGALLOC);
1415-
else
1416-
count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1420+
if (!page)
1421+
return 0;
14171422

1418-
return ret;
1423+
prep_new_huge_page(h, page, page_to_nid(page));
1424+
put_page(page); /* free it into the hugepage allocator */
1425+
1426+
return 1;
14191427
}
14201428

14211429
/*
@@ -1523,17 +1531,6 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
15231531
return rc;
15241532
}
15251533

1526-
static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1527-
gfp_t gfp_mask, int nid, nodemask_t *nmask)
1528-
{
1529-
int order = huge_page_order(h);
1530-
1531-
gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1532-
if (nid == NUMA_NO_NODE)
1533-
nid = numa_mem_id();
1534-
return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1535-
}
1536-
15371534
static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
15381535
int nid, nodemask_t *nmask)
15391536
{
@@ -1589,11 +1586,9 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
15891586
*/
15901587
h->nr_huge_pages_node[r_nid]++;
15911588
h->surplus_huge_pages_node[r_nid]++;
1592-
__count_vm_event(HTLB_BUDDY_PGALLOC);
15931589
} else {
15941590
h->nr_huge_pages--;
15951591
h->surplus_huge_pages--;
1596-
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
15971592
}
15981593
spin_unlock(&hugetlb_lock);
15991594

@@ -2148,6 +2143,8 @@ static void __init gather_bootmem_prealloc(void)
21482143
prep_compound_huge_page(page, h->order);
21492144
WARN_ON(PageReserved(page));
21502145
prep_new_huge_page(h, page, page_to_nid(page));
2146+
put_page(page); /* free it into the hugepage allocator */
2147+
21512148
/*
21522149
* If we had gigantic hugepages allocated at boot time, we need
21532150
* to restore the 'stolen' pages to totalram_pages in order to

0 commit comments

Comments
 (0)