Skip to content

Commit 41ea28d

Browse files
yosrym93gregkh
authored andcommitted
mm: memcg: optimize parent iteration in memcg_rstat_updated()
commit 9cee7e8 upstream. In memcg_rstat_updated(), we iterate the memcg being updated and its parents to update memcg->vmstats_percpu->stats_updates in the fast path (i.e. no atomic updates). According to my math, this is 3 memory loads (and potentially 3 cache misses) per memcg: - Load the address of memcg->vmstats_percpu. - Load vmstats_percpu->stats_updates (based on some percpu calculation). - Load the address of the parent memcg. Avoid most of the cache misses by caching a pointer from each struct memcg_vmstats_percpu to its parent on the corresponding CPU. In this case, for the first memcg we have 2 memory loads (same as above): - Load the address of memcg->vmstats_percpu. - Load vmstats_percpu->stats_updates (based on some percpu calculation). Then for each additional memcg, we need a single load to get the parent's stats_updates directly. This reduces the number of loads from O(3N) to O(2+N) -- where N is the number of memcgs we need to iterate. Additionally, stash a pointer to memcg->vmstats in each struct memcg_vmstats_percpu such that we can access the atomic counter that all CPUs fold into, memcg->vmstats->stats_updates. memcg_should_flush_stats() is changed to memcg_vmstats_needs_flush() to accept a struct memcg_vmstats pointer accordingly. In struct memcg_vmstats_percpu, make sure both pointers together with stats_updates live on the same cacheline. Finally, update mem_cgroup_alloc() to take in a parent pointer and initialize the new cache pointers on each CPU. The percpu loop in mem_cgroup_alloc() may look concerning, but there are multiple similar loops in the cgroup creation path (e.g. cgroup_rstat_init()), most of which are hidden within alloc_percpu(). According to Oliver's testing [1], this fixes multiple 30-38% regressions in vm-scalability, will-it-scale-tlb_flush2, and will-it-scale-fallocate1. This comes at a cost of 2 more pointers per CPU (<2KB on a machine with 128 CPUs). [1] https://lore.kernel.org/lkml/ZbDJsfsZt2ITyo61@xsang-OptiPlex-9020/ [yosryahmed@google.com: fix struct memcg_vmstats_percpu size and alignment] Link: https://lkml.kernel.org/r/20240203044612.1234216-1-yosryahmed@google.com Link: https://lkml.kernel.org/r/20240124100023.660032-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Fixes: 8d59d22 ("mm: memcg: make stats flushing threshold per-memcg") Tested-by: kernel test robot <oliver.sang@intel.com> Reported-by: kernel test robot <oliver.sang@intel.com> Closes: https://lore.kernel.org/oe-lkp/202401221624.cb53a8ca-oliver.sang@intel.com Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 6b97ad9 commit 41ea28d

File tree

1 file changed

+35
-21
lines changed

1 file changed

+35
-21
lines changed

mm/memcontrol.c

Lines changed: 35 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -617,6 +617,15 @@ static inline int memcg_events_index(enum vm_event_item idx)
617617
}
618618

619619
struct memcg_vmstats_percpu {
620+
/* Stats updates since the last flush */
621+
unsigned int stats_updates;
622+
623+
/* Cached pointers for fast iteration in memcg_rstat_updated() */
624+
struct memcg_vmstats_percpu *parent;
625+
struct memcg_vmstats *vmstats;
626+
627+
/* The above should fit a single cacheline for memcg_rstat_updated() */
628+
620629
/* Local (CPU and cgroup) page state & events */
621630
long state[MEMCG_NR_STAT];
622631
unsigned long events[NR_MEMCG_EVENTS];
@@ -628,10 +637,7 @@ struct memcg_vmstats_percpu {
628637
/* Cgroup1: threshold notifications & softlimit tree updates */
629638
unsigned long nr_page_events;
630639
unsigned long targets[MEM_CGROUP_NTARGETS];
631-
632-
/* Stats updates since the last flush */
633-
unsigned int stats_updates;
634-
};
640+
} ____cacheline_aligned;
635641

636642
struct memcg_vmstats {
637643
/* Aggregated (CPU and subtree) page state & events */
@@ -694,36 +700,35 @@ static void memcg_stats_unlock(void)
694700
}
695701

696702

697-
static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
703+
static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
698704
{
699-
return atomic64_read(&memcg->vmstats->stats_updates) >
705+
return atomic64_read(&vmstats->stats_updates) >
700706
MEMCG_CHARGE_BATCH * num_online_cpus();
701707
}
702708

703709
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
704710
{
711+
struct memcg_vmstats_percpu *statc;
705712
int cpu = smp_processor_id();
706-
unsigned int x;
707713

708714
if (!val)
709715
return;
710716

711717
cgroup_rstat_updated(memcg->css.cgroup, cpu);
712-
713-
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
714-
x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
715-
abs(val));
716-
717-
if (x < MEMCG_CHARGE_BATCH)
718+
statc = this_cpu_ptr(memcg->vmstats_percpu);
719+
for (; statc; statc = statc->parent) {
720+
statc->stats_updates += abs(val);
721+
if (statc->stats_updates < MEMCG_CHARGE_BATCH)
718722
continue;
719723

720724
/*
721725
* If @memcg is already flush-able, increasing stats_updates is
722726
* redundant. Avoid the overhead of the atomic update.
723727
*/
724-
if (!memcg_should_flush_stats(memcg))
725-
atomic64_add(x, &memcg->vmstats->stats_updates);
726-
__this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
728+
if (!memcg_vmstats_needs_flush(statc->vmstats))
729+
atomic64_add(statc->stats_updates,
730+
&statc->vmstats->stats_updates);
731+
statc->stats_updates = 0;
727732
}
728733
}
729734

@@ -752,7 +757,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
752757
if (!memcg)
753758
memcg = root_mem_cgroup;
754759

755-
if (memcg_should_flush_stats(memcg))
760+
if (memcg_vmstats_needs_flush(memcg->vmstats))
756761
do_flush_stats(memcg);
757762
}
758763

@@ -766,7 +771,7 @@ void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
766771
static void flush_memcg_stats_dwork(struct work_struct *w)
767772
{
768773
/*
769-
* Deliberately ignore memcg_should_flush_stats() here so that flushing
774+
* Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
770775
* in latency-sensitive paths is as cheap as possible.
771776
*/
772777
do_flush_stats(root_mem_cgroup);
@@ -5328,10 +5333,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
53285333
__mem_cgroup_free(memcg);
53295334
}
53305335

5331-
static struct mem_cgroup *mem_cgroup_alloc(void)
5336+
static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
53325337
{
5338+
struct memcg_vmstats_percpu *statc, *pstatc;
53335339
struct mem_cgroup *memcg;
5334-
int node;
5340+
int node, cpu;
53355341
int __maybe_unused i;
53365342
long error = -ENOMEM;
53375343

@@ -5354,6 +5360,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
53545360
if (!memcg->vmstats_percpu)
53555361
goto fail;
53565362

5363+
for_each_possible_cpu(cpu) {
5364+
if (parent)
5365+
pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5366+
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5367+
statc->parent = parent ? pstatc : NULL;
5368+
statc->vmstats = memcg->vmstats;
5369+
}
5370+
53575371
for_each_node(node)
53585372
if (alloc_mem_cgroup_per_node_info(memcg, node))
53595373
goto fail;
@@ -5399,7 +5413,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
53995413
struct mem_cgroup *memcg, *old_memcg;
54005414

54015415
old_memcg = set_active_memcg(parent);
5402-
memcg = mem_cgroup_alloc();
5416+
memcg = mem_cgroup_alloc(parent);
54035417
set_active_memcg(old_memcg);
54045418
if (IS_ERR(memcg))
54055419
return ERR_CAST(memcg);

0 commit comments

Comments
 (0)