Skip to content

Commit 3523dd7

Browse files
shakeelbakpm00
authored andcommitted
memcg: separate local_trylock for memcg and obj
The per-cpu stock_lock protects cached memcg and cached objcg and their respective fields. However there is no dependency between these fields and it is better to have fine grained separate locks for cached memcg and cached objcg. This decoupling of locks allows us to make the memcg charge cache and objcg charge cache to be nmi safe independently. At the moment, memcg charge cache is already nmi safe and this decoupling will allow to make memcg charge cache work without disabling irqs. Link: https://lkml.kernel.org/r/20250506225533.2580386-3-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumaze <edumazet@google.com> Cc: Jakub Kacinski <kuba@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 2fba596 commit 3523dd7

File tree

1 file changed

+26
-23
lines changed

1 file changed

+26
-23
lines changed

mm/memcontrol.c

Lines changed: 26 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1779,13 +1779,14 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
17791779
*/
17801780
#define NR_MEMCG_STOCK 7
17811781
struct memcg_stock_pcp {
1782-
local_trylock_t stock_lock;
1782+
local_trylock_t memcg_lock;
17831783
uint8_t nr_pages[NR_MEMCG_STOCK];
17841784
struct mem_cgroup *cached[NR_MEMCG_STOCK];
17851785

1786+
local_trylock_t obj_lock;
1787+
unsigned int nr_bytes;
17861788
struct obj_cgroup *cached_objcg;
17871789
struct pglist_data *cached_pgdat;
1788-
unsigned int nr_bytes;
17891790
int nr_slab_reclaimable_b;
17901791
int nr_slab_unreclaimable_b;
17911792

@@ -1794,7 +1795,8 @@ struct memcg_stock_pcp {
17941795
#define FLUSHING_CACHED_CHARGE 0
17951796
};
17961797
static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
1797-
.stock_lock = INIT_LOCAL_TRYLOCK(stock_lock),
1798+
.memcg_lock = INIT_LOCAL_TRYLOCK(memcg_lock),
1799+
.obj_lock = INIT_LOCAL_TRYLOCK(obj_lock),
17981800
};
17991801
static DEFINE_MUTEX(percpu_charge_mutex);
18001802

@@ -1822,7 +1824,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18221824
int i;
18231825

18241826
if (nr_pages > MEMCG_CHARGE_BATCH ||
1825-
!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
1827+
!local_trylock_irqsave(&memcg_stock.memcg_lock, flags))
18261828
return ret;
18271829

18281830
stock = this_cpu_ptr(&memcg_stock);
@@ -1839,7 +1841,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18391841
break;
18401842
}
18411843

1842-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1844+
local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
18431845

18441846
return ret;
18451847
}
@@ -1885,19 +1887,19 @@ static void drain_local_stock(struct work_struct *dummy)
18851887
struct memcg_stock_pcp *stock;
18861888
unsigned long flags;
18871889

1888-
/*
1889-
* The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1890-
* drain_stock races is that we always operate on local CPU stock
1891-
* here with IRQ disabled
1892-
*/
1893-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1890+
if (WARN_ONCE(!in_task(), "drain in non-task context"))
1891+
return;
18941892

1893+
local_lock_irqsave(&memcg_stock.obj_lock, flags);
18951894
stock = this_cpu_ptr(&memcg_stock);
18961895
drain_obj_stock(stock);
1896+
local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
1897+
1898+
local_lock_irqsave(&memcg_stock.memcg_lock, flags);
1899+
stock = this_cpu_ptr(&memcg_stock);
18971900
drain_stock_fully(stock);
18981901
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1899-
1900-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1902+
local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
19011903
}
19021904

19031905
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -1920,10 +1922,10 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19201922
VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
19211923

19221924
if (nr_pages > MEMCG_CHARGE_BATCH ||
1923-
!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1925+
!local_trylock_irqsave(&memcg_stock.memcg_lock, flags)) {
19241926
/*
19251927
* In case of larger than batch refill or unlikely failure to
1926-
* lock the percpu stock_lock, uncharge memcg directly.
1928+
* lock the percpu memcg_lock, uncharge memcg directly.
19271929
*/
19281930
memcg_uncharge(memcg, nr_pages);
19291931
return;
@@ -1955,7 +1957,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19551957
WRITE_ONCE(stock->nr_pages[i], nr_pages);
19561958
}
19571959

1958-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1960+
local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
19591961
}
19601962

19611963
static bool is_drain_needed(struct memcg_stock_pcp *stock,
@@ -2030,11 +2032,12 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
20302032

20312033
stock = &per_cpu(memcg_stock, cpu);
20322034

2033-
/* drain_obj_stock requires stock_lock */
2034-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2035+
/* drain_obj_stock requires obj_lock */
2036+
local_lock_irqsave(&memcg_stock.obj_lock, flags);
20352037
drain_obj_stock(stock);
2036-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2038+
local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
20372039

2040+
/* no need for the local lock */
20382041
drain_stock_fully(stock);
20392042

20402043
return 0;
@@ -2887,7 +2890,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28872890
unsigned long flags;
28882891
bool ret = false;
28892892

2890-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2893+
local_lock_irqsave(&memcg_stock.obj_lock, flags);
28912894

28922895
stock = this_cpu_ptr(&memcg_stock);
28932896
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
@@ -2898,7 +2901,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28982901
__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
28992902
}
29002903

2901-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2904+
local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
29022905

29032906
return ret;
29042907
}
@@ -2987,7 +2990,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
29872990
unsigned long flags;
29882991
unsigned int nr_pages = 0;
29892992

2990-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2993+
local_lock_irqsave(&memcg_stock.obj_lock, flags);
29912994

29922995
stock = this_cpu_ptr(&memcg_stock);
29932996
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -3009,7 +3012,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
30093012
stock->nr_bytes &= (PAGE_SIZE - 1);
30103013
}
30113014

3012-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3015+
local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
30133016

30143017
if (nr_pages)
30153018
obj_cgroup_uncharge_pages(objcg, nr_pages);

0 commit comments

Comments
 (0)