Skip to content

Commit

Permalink
mm: memcontrol: decouple reference counting from page accounting
Browse files Browse the repository at this point in the history
The reference counting of a memcg is currently coupled directly to how
many 4k pages are charged to it.  This doesn't work well with Roman's new
slab controller, which maintains pools of objects and doesn't want to keep
an extra balance sheet for the pages backing those objects.

This unusual refcounting design (reference counts usually track pointers
to an object) is only for historical reasons: memcg used to not take any
css references and simply stalled offlining until all charges had been
reparented and the page counters had dropped to zero.  When we got rid of
the reparenting requirement, the simple mechanical translation was to take
a reference for every charge.

More historical context can be found in commit e8ea14c ("mm:
memcontrol: take a css reference for each charged page"), commit
64f2199 ("mm: memcontrol: remove obsolete kmemcg pinning tricks") and
commit b205256 ("mm: memcontrol: continue cache reclaim from offlined
groups").

The new slab controller exposes the limitations in this scheme, so let's
switch it to a more idiomatic reference counting model based on actual
kernel pointers to the memcg:

- The per-cpu stock holds a reference to the memcg its caching

- User pages hold a reference for their page->mem_cgroup. Transparent
  huge pages will no longer acquire tail references in advance, we'll
  get them if needed during the split.

- Kernel pages hold a reference for their page->mem_cgroup

- Pages allocated in the root cgroup will acquire and release css
  references for simplicity. css_get() and css_put() optimize that.

- The current memcg_charge_slab() already hacked around the per-charge
  references; this change gets rid of that as well.

- tcp accounting will handle reference in mem_cgroup_sk_{alloc,free}

Roman:
1) Rebased on top of the current mm tree: added css_get() in
   mem_cgroup_charge(), dropped mem_cgroup_try_charge() part
2) I've reformatted commit references in the commit log to make
   checkpatch.pl happy.

[hughd@google.com: remove css_put_many() from __mem_cgroup_clear_mc()]
  Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2007302011450.2347@eggly.anvils

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Roman Gushchin <guro@fb.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/20200623174037.3951353-6-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
hnaz authored and torvalds committed Aug 7, 2020
1 parent 4138fdf commit 1a3e1f4
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 20 deletions.
39 changes: 21 additions & 18 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2094,13 +2094,17 @@ static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;

if (!old)
return;

if (stock->nr_pages) {
page_counter_uncharge(&old->memory, stock->nr_pages);
if (do_memsw_account())
page_counter_uncharge(&old->memsw, stock->nr_pages);
css_put_many(&old->css, stock->nr_pages);
stock->nr_pages = 0;
}

css_put(&old->css);
stock->cached = NULL;
}

Expand Down Expand Up @@ -2136,6 +2140,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
css_get(&memcg->css);
stock->cached = memcg;
}
stock->nr_pages += nr_pages;
Expand Down Expand Up @@ -2594,12 +2599,10 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_charge(&memcg->memsw, nr_pages);
css_get_many(&memcg->css, nr_pages);

return 0;

done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);

Expand Down Expand Up @@ -2657,8 +2660,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
page_counter_uncharge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_uncharge(&memcg->memsw, nr_pages);

css_put_many(&memcg->css, nr_pages);
}
#endif

Expand Down Expand Up @@ -2966,6 +2967,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
if (!ret) {
page->mem_cgroup = memcg;
__SetPageKmemcg(page);
return 0;
}
}
css_put(&memcg->css);
Expand All @@ -2988,12 +2990,11 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
__memcg_kmem_uncharge(memcg, nr_pages);
page->mem_cgroup = NULL;
css_put(&memcg->css);

/* slab pages do not have PageKmemcg flag set */
if (PageKmemcg(page))
__ClearPageKmemcg(page);

css_put_many(&memcg->css, nr_pages);
}
#endif /* CONFIG_MEMCG_KMEM */

Expand All @@ -3005,13 +3006,16 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
*/
void mem_cgroup_split_huge_fixup(struct page *head)
{
struct mem_cgroup *memcg = head->mem_cgroup;
int i;

if (mem_cgroup_disabled())
return;

for (i = 1; i < HPAGE_PMD_NR; i++)
head[i].mem_cgroup = head->mem_cgroup;
for (i = 1; i < HPAGE_PMD_NR; i++) {
css_get(&memcg->css);
head[i].mem_cgroup = memcg;
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

Expand Down Expand Up @@ -5452,7 +5456,10 @@ static int mem_cgroup_move_account(struct page *page,
*/
smp_mb();

page->mem_cgroup = to; /* caller should have done css_get */
css_get(&to->css);
css_put(&from->css);

page->mem_cgroup = to;

__unlock_page_memcg(from);

Expand Down Expand Up @@ -5673,8 +5680,6 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);

css_put_many(&mc.to->css, mc.moved_swap);

mc.moved_swap = 0;
}
memcg_oom_recover(from);
Expand Down Expand Up @@ -6502,6 +6507,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
if (ret)
goto out_put;

css_get(&memcg->css);
commit_charge(page, memcg);

local_irq_disable();
Expand Down Expand Up @@ -6556,9 +6562,6 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);

if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, ug->nr_pages);
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
Expand Down Expand Up @@ -6596,6 +6599,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)

ug->dummy_page = page;
page->mem_cgroup = NULL;
css_put(&ug->memcg->css);
}

static void uncharge_list(struct list_head *page_list)
Expand Down Expand Up @@ -6701,8 +6705,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_charge(&memcg->memsw, nr_pages);
css_get_many(&memcg->css, nr_pages);

css_get(&memcg->css);
commit_charge(newpage, memcg);

local_irq_save(flags);
Expand Down Expand Up @@ -6939,8 +6943,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
mem_cgroup_charge_statistics(memcg, page, -nr_entries);
memcg_check_events(memcg, page);

if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_entries);
css_put(&memcg->css);
}

/**
Expand Down
2 changes: 0 additions & 2 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -402,9 +402,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT);

/* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
css_put_many(&memcg->css, nr_pages);
out:
css_put(&memcg->css);
return ret;
Expand Down

0 comments on commit 1a3e1f4

Please sign in to comment.