Skip to content

Commit 35822fd

Browse files
yosrym93akpm00
authored andcommitted
memcg: remove mem_cgroup_flush_stats_atomic()
Previous patches removed all callers of mem_cgroup_flush_stats_atomic(). Remove the function and simplify the code. Link: https://lkml.kernel.org/r/20230421174020.2994750-5-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Acked-by: Shakeel Butt <shakeelb@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Christian Brauner <brauner@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@kernel.dk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Koutný <mkoutny@suse.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent f82a7a8 commit 35822fd

File tree

2 files changed

+5
-24
lines changed

2 files changed

+5
-24
lines changed

include/linux/memcontrol.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,7 +1038,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
10381038
}
10391039

10401040
void mem_cgroup_flush_stats(void);
1041-
void mem_cgroup_flush_stats_atomic(void);
10421041
void mem_cgroup_flush_stats_ratelimited(void);
10431042

10441043
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
@@ -1537,10 +1536,6 @@ static inline void mem_cgroup_flush_stats(void)
15371536
{
15381537
}
15391538

1540-
static inline void mem_cgroup_flush_stats_atomic(void)
1541-
{
1542-
}
1543-
15441539
static inline void mem_cgroup_flush_stats_ratelimited(void)
15451540
{
15461541
}

mm/memcontrol.c

Lines changed: 5 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -639,7 +639,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
639639
}
640640
}
641641

642-
static void do_flush_stats(bool atomic)
642+
static void do_flush_stats(void)
643643
{
644644
/*
645645
* We always flush the entire tree, so concurrent flushers can just
@@ -652,30 +652,16 @@ static void do_flush_stats(bool atomic)
652652

653653
WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
654654

655-
if (atomic)
656-
cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup);
657-
else
658-
cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
655+
cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
659656

660657
atomic_set(&stats_flush_threshold, 0);
661658
atomic_set(&stats_flush_ongoing, 0);
662659
}
663660

664-
static bool should_flush_stats(void)
665-
{
666-
return atomic_read(&stats_flush_threshold) > num_online_cpus();
667-
}
668-
669661
void mem_cgroup_flush_stats(void)
670662
{
671-
if (should_flush_stats())
672-
do_flush_stats(false);
673-
}
674-
675-
void mem_cgroup_flush_stats_atomic(void)
676-
{
677-
if (should_flush_stats())
678-
do_flush_stats(true);
663+
if (atomic_read(&stats_flush_threshold) > num_online_cpus())
664+
do_flush_stats();
679665
}
680666

681667
void mem_cgroup_flush_stats_ratelimited(void)
@@ -690,7 +676,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
690676
* Always flush here so that flushing in latency-sensitive paths is
691677
* as cheap as possible.
692678
*/
693-
do_flush_stats(false);
679+
do_flush_stats();
694680
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
695681
}
696682

0 commit comments

Comments
 (0)