Skip to content

Commit 2845426

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: implement lruvec stat functions on top of each other
The implementation of the lruvec stat functions and their variants for accounting through a page, or accounting from a preemptible context, are mostly identical and needlessly repetitive. Implement the lruvec_page functions by looking up the page's lruvec and then using the lruvec function. Implement the functions for preemptible contexts by disabling preemption before calling the atomic context functions. Link: http://lkml.kernel.org/r/20171103153336.24044-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent c9019e9 commit 2845426

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

include/linux/memcontrol.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
569569
{
570570
struct mem_cgroup_per_node *pn;
571571

572+
/* Update node */
572573
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
574+
573575
if (mem_cgroup_disabled())
574576
return;
577+
575578
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
579+
580+
/* Update memcg */
576581
__mod_memcg_state(pn->memcg, idx, val);
582+
583+
/* Update lruvec */
577584
__this_cpu_add(pn->lruvec_stat->count[idx], val);
578585
}
579586

580587
static inline void mod_lruvec_state(struct lruvec *lruvec,
581588
enum node_stat_item idx, int val)
582589
{
583-
struct mem_cgroup_per_node *pn;
584-
585-
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
586-
if (mem_cgroup_disabled())
587-
return;
588-
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
589-
mod_memcg_state(pn->memcg, idx, val);
590-
this_cpu_add(pn->lruvec_stat->count[idx], val);
590+
preempt_disable();
591+
__mod_lruvec_state(lruvec, idx, val);
592+
preempt_enable();
591593
}
592594

593595
static inline void __mod_lruvec_page_state(struct page *page,
594596
enum node_stat_item idx, int val)
595597
{
596-
struct mem_cgroup_per_node *pn;
598+
pg_data_t *pgdat = page_pgdat(page);
599+
struct lruvec *lruvec;
597600

598-
__mod_node_page_state(page_pgdat(page), idx, val);
599-
if (mem_cgroup_disabled() || !page->mem_cgroup)
601+
/* Untracked pages have no memcg, no lruvec. Update only the node */
602+
if (!page->mem_cgroup) {
603+
__mod_node_page_state(pgdat, idx, val);
600604
return;
601-
__mod_memcg_state(page->mem_cgroup, idx, val);
602-
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
603-
__this_cpu_add(pn->lruvec_stat->count[idx], val);
605+
}
606+
607+
lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
608+
__mod_lruvec_state(lruvec, idx, val);
604609
}
605610

606611
static inline void mod_lruvec_page_state(struct page *page,
607612
enum node_stat_item idx, int val)
608613
{
609-
struct mem_cgroup_per_node *pn;
610-
611-
mod_node_page_state(page_pgdat(page), idx, val);
612-
if (mem_cgroup_disabled() || !page->mem_cgroup)
613-
return;
614-
mod_memcg_state(page->mem_cgroup, idx, val);
615-
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
616-
this_cpu_add(pn->lruvec_stat->count[idx], val);
614+
preempt_disable();
615+
__mod_lruvec_page_state(page, idx, val);
616+
preempt_enable();
617617
}
618618

619619
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,

0 commit comments

Comments
 (0)