Skip to content

Commit d9cc948

Browse files
Michal Hockotorvalds
authored andcommitted
mm, hugetlb: integrate giga hugetlb more naturally to the allocation path
Gigantic hugetlb pages were ingrown to the hugetlb code as an alien specie with a lot of special casing. The allocation path is not an exception. Unnecessarily so to be honest. It is true that the underlying allocator is different but that is an implementation detail. This patch unifies the hugetlb allocation path that a prepares fresh pool pages. alloc_fresh_gigantic_page basically copies alloc_fresh_huge_page logic so we can move everything there. This will simplify set_max_huge_pages which doesn't have to care about what kind of huge page we allocate. Link: http://lkml.kernel.org/r/20180103093213.26329-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrea Reale <ar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent af0fb9d commit d9cc948

File tree

1 file changed

+14
-41
lines changed

1 file changed

+14
-41
lines changed

mm/hugetlb.c

Lines changed: 14 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,19 +1106,18 @@ static bool zone_spans_last_pfn(const struct zone *zone,
11061106
return zone_spans_pfn(zone, last_pfn);
11071107
}
11081108

1109-
static struct page *alloc_gigantic_page(int nid, struct hstate *h)
1109+
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1110+
int nid, nodemask_t *nodemask)
11101111
{
11111112
unsigned int order = huge_page_order(h);
11121113
unsigned long nr_pages = 1 << order;
11131114
unsigned long ret, pfn, flags;
11141115
struct zonelist *zonelist;
11151116
struct zone *zone;
11161117
struct zoneref *z;
1117-
gfp_t gfp_mask;
11181118

1119-
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
11201119
zonelist = node_zonelist(nid, gfp_mask);
1121-
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
1120+
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
11221121
spin_lock_irqsave(&zone->lock, flags);
11231122

11241123
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
@@ -1149,42 +1148,13 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h)
11491148
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
11501149
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
11511150

1152-
static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1153-
{
1154-
struct page *page;
1155-
1156-
page = alloc_gigantic_page(nid, h);
1157-
if (page) {
1158-
prep_compound_gigantic_page(page, huge_page_order(h));
1159-
prep_new_huge_page(h, page, nid);
1160-
put_page(page); /* free it into the hugepage allocator */
1161-
}
1162-
1163-
return page;
1164-
}
1165-
1166-
static int alloc_fresh_gigantic_page(struct hstate *h,
1167-
nodemask_t *nodes_allowed)
1168-
{
1169-
struct page *page = NULL;
1170-
int nr_nodes, node;
1171-
1172-
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1173-
page = alloc_fresh_gigantic_page_node(h, node);
1174-
if (page)
1175-
return 1;
1176-
}
1177-
1178-
return 0;
1179-
}
1180-
11811151
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
11821152
static inline bool gigantic_page_supported(void) { return false; }
1153+
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1154+
int nid, nodemask_t *nodemask) { return NULL; }
11831155
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
11841156
static inline void destroy_compound_gigantic_page(struct page *page,
11851157
unsigned int order) { }
1186-
static inline int alloc_fresh_gigantic_page(struct hstate *h,
1187-
nodemask_t *nodes_allowed) { return 0; }
11881158
#endif
11891159

11901160
static void update_and_free_page(struct hstate *h, struct page *page)
@@ -1410,8 +1380,12 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
14101380
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
14111381

14121382
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1413-
page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
1414-
node, nodes_allowed);
1383+
if (hstate_is_gigantic(h))
1384+
page = alloc_gigantic_page(h, gfp_mask,
1385+
node, nodes_allowed);
1386+
else
1387+
page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
1388+
node, nodes_allowed);
14151389
if (page)
14161390
break;
14171391

@@ -1420,6 +1394,8 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
14201394
if (!page)
14211395
return 0;
14221396

1397+
if (hstate_is_gigantic(h))
1398+
prep_compound_gigantic_page(page, huge_page_order(h));
14231399
prep_new_huge_page(h, page, page_to_nid(page));
14241400
put_page(page); /* free it into the hugepage allocator */
14251401

@@ -2307,10 +2283,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
23072283
/* yield cpu to avoid soft lockup */
23082284
cond_resched();
23092285

2310-
if (hstate_is_gigantic(h))
2311-
ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2312-
else
2313-
ret = alloc_fresh_huge_page(h, nodes_allowed);
2286+
ret = alloc_fresh_huge_page(h, nodes_allowed);
23142287
spin_lock(&hugetlb_lock);
23152288
if (!ret)
23162289
goto out;

0 commit comments

Comments
 (0)