Skip to content

Commit 2b1d554

Browse files
Xiu Jianfengakpm00
authored andcommitted
memcg: factor out mem_cgroup_stat_aggregate()
Currently mem_cgroup_css_rstat_flush() is used to flush the per-CPU statistics from a specified CPU into the global statistics of the memcg. It processes three kinds of data in three for loops using exactly the same method. Therefore, the for loop can be factored out and may make the code more clean. Link: https://lkml.kernel.org/r/20241026093407.310955-1-xiujianfeng@huaweicloud.com Signed-off-by: Xiu Jianfeng <xiujianfeng@huawei.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Wang Weiyang <wangweiyang2@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent e8c1a29 commit 2b1d554

File tree

1 file changed

+70
-59
lines changed

1 file changed

+70
-59
lines changed

mm/memcontrol.c

Lines changed: 70 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -3730,68 +3730,90 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
37303730
memcg_wb_domain_size_changed(memcg);
37313731
}
37323732

3733-
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3733+
struct aggregate_control {
3734+
/* pointer to the aggregated (CPU and subtree aggregated) counters */
3735+
long *aggregate;
3736+
/* pointer to the non-hierarchichal (CPU aggregated) counters */
3737+
long *local;
3738+
/* pointer to the pending child counters during tree propagation */
3739+
long *pending;
3740+
/* pointer to the parent's pending counters, could be NULL */
3741+
long *ppending;
3742+
/* pointer to the percpu counters to be aggregated */
3743+
long *cstat;
3744+
/* pointer to the percpu counters of the last aggregation*/
3745+
long *cstat_prev;
3746+
/* size of the above counters */
3747+
int size;
3748+
};
3749+
3750+
static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
37343751
{
3735-
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3736-
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3737-
struct memcg_vmstats_percpu *statc;
3752+
int i;
37383753
long delta, delta_cpu, v;
3739-
int i, nid;
3740-
3741-
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
37423754

3743-
for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
3755+
for (i = 0; i < ac->size; i++) {
37443756
/*
37453757
* Collect the aggregated propagation counts of groups
37463758
* below us. We're in a per-cpu loop here and this is
37473759
* a global counter, so the first cycle will get them.
37483760
*/
3749-
delta = memcg->vmstats->state_pending[i];
3761+
delta = ac->pending[i];
37503762
if (delta)
3751-
memcg->vmstats->state_pending[i] = 0;
3763+
ac->pending[i] = 0;
37523764

37533765
/* Add CPU changes on this level since the last flush */
37543766
delta_cpu = 0;
3755-
v = READ_ONCE(statc->state[i]);
3756-
if (v != statc->state_prev[i]) {
3757-
delta_cpu = v - statc->state_prev[i];
3767+
v = READ_ONCE(ac->cstat[i]);
3768+
if (v != ac->cstat_prev[i]) {
3769+
delta_cpu = v - ac->cstat_prev[i];
37583770
delta += delta_cpu;
3759-
statc->state_prev[i] = v;
3771+
ac->cstat_prev[i] = v;
37603772
}
37613773

37623774
/* Aggregate counts on this level and propagate upwards */
37633775
if (delta_cpu)
3764-
memcg->vmstats->state_local[i] += delta_cpu;
3776+
ac->local[i] += delta_cpu;
37653777

37663778
if (delta) {
3767-
memcg->vmstats->state[i] += delta;
3768-
if (parent)
3769-
parent->vmstats->state_pending[i] += delta;
3779+
ac->aggregate[i] += delta;
3780+
if (ac->ppending)
3781+
ac->ppending[i] += delta;
37703782
}
37713783
}
3784+
}
37723785

3773-
for (i = 0; i < NR_MEMCG_EVENTS; i++) {
3774-
delta = memcg->vmstats->events_pending[i];
3775-
if (delta)
3776-
memcg->vmstats->events_pending[i] = 0;
3777-
3778-
delta_cpu = 0;
3779-
v = READ_ONCE(statc->events[i]);
3780-
if (v != statc->events_prev[i]) {
3781-
delta_cpu = v - statc->events_prev[i];
3782-
delta += delta_cpu;
3783-
statc->events_prev[i] = v;
3784-
}
3786+
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3787+
{
3788+
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3789+
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3790+
struct memcg_vmstats_percpu *statc;
3791+
struct aggregate_control ac;
3792+
int nid;
37853793

3786-
if (delta_cpu)
3787-
memcg->vmstats->events_local[i] += delta_cpu;
3794+
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
37883795

3789-
if (delta) {
3790-
memcg->vmstats->events[i] += delta;
3791-
if (parent)
3792-
parent->vmstats->events_pending[i] += delta;
3793-
}
3794-
}
3796+
ac = (struct aggregate_control) {
3797+
.aggregate = memcg->vmstats->state,
3798+
.local = memcg->vmstats->state_local,
3799+
.pending = memcg->vmstats->state_pending,
3800+
.ppending = parent ? parent->vmstats->state_pending : NULL,
3801+
.cstat = statc->state,
3802+
.cstat_prev = statc->state_prev,
3803+
.size = MEMCG_VMSTAT_SIZE,
3804+
};
3805+
mem_cgroup_stat_aggregate(&ac);
3806+
3807+
ac = (struct aggregate_control) {
3808+
.aggregate = memcg->vmstats->events,
3809+
.local = memcg->vmstats->events_local,
3810+
.pending = memcg->vmstats->events_pending,
3811+
.ppending = parent ? parent->vmstats->events_pending : NULL,
3812+
.cstat = statc->events,
3813+
.cstat_prev = statc->events_prev,
3814+
.size = NR_MEMCG_EVENTS,
3815+
};
3816+
mem_cgroup_stat_aggregate(&ac);
37953817

37963818
for_each_node_state(nid, N_MEMORY) {
37973819
struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
@@ -3804,28 +3826,17 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
38043826

38053827
lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
38063828

3807-
for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
3808-
delta = lstats->state_pending[i];
3809-
if (delta)
3810-
lstats->state_pending[i] = 0;
3811-
3812-
delta_cpu = 0;
3813-
v = READ_ONCE(lstatc->state[i]);
3814-
if (v != lstatc->state_prev[i]) {
3815-
delta_cpu = v - lstatc->state_prev[i];
3816-
delta += delta_cpu;
3817-
lstatc->state_prev[i] = v;
3818-
}
3819-
3820-
if (delta_cpu)
3821-
lstats->state_local[i] += delta_cpu;
3829+
ac = (struct aggregate_control) {
3830+
.aggregate = lstats->state,
3831+
.local = lstats->state_local,
3832+
.pending = lstats->state_pending,
3833+
.ppending = plstats ? plstats->state_pending : NULL,
3834+
.cstat = lstatc->state,
3835+
.cstat_prev = lstatc->state_prev,
3836+
.size = NR_MEMCG_NODE_STAT_ITEMS,
3837+
};
3838+
mem_cgroup_stat_aggregate(&ac);
38223839

3823-
if (delta) {
3824-
lstats->state[i] += delta;
3825-
if (plstats)
3826-
plstats->state_pending[i] += delta;
3827-
}
3828-
}
38293840
}
38303841
WRITE_ONCE(statc->stats_updates, 0);
38313842
/* We are in a per-cpu loop here, only do the atomic write once */

0 commit comments

Comments
 (0)