Skip to content

Commit 9e619cd

Browse files
shakeelbakpm00
authored andcommitted
memcg: no irq disable for memcg stock lock
There is no need to disable irqs to use memcg per-cpu stock, so let's just not do that. One consequence of this change is if the kernel while in task context has the memcg stock lock and that cpu got interrupted. The memcg charges on that cpu in the irq context will take the slow path of memcg charging. However that should be super rare and should be fine in general. Link: https://lkml.kernel.org/r/20250506225533.2580386-5-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumaze <edumazet@google.com> Cc: Jakub Kacinski <kuba@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent c80509e commit 9e619cd

File tree

1 file changed

+6
-9
lines changed

1 file changed

+6
-9
lines changed

mm/memcontrol.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1829,12 +1829,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18291829
{
18301830
struct memcg_stock_pcp *stock;
18311831
uint8_t stock_pages;
1832-
unsigned long flags;
18331832
bool ret = false;
18341833
int i;
18351834

18361835
if (nr_pages > MEMCG_CHARGE_BATCH ||
1837-
!local_trylock_irqsave(&memcg_stock.lock, flags))
1836+
!local_trylock(&memcg_stock.lock))
18381837
return ret;
18391838

18401839
stock = this_cpu_ptr(&memcg_stock);
@@ -1851,7 +1850,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18511850
break;
18521851
}
18531852

1854-
local_unlock_irqrestore(&memcg_stock.lock, flags);
1853+
local_unlock(&memcg_stock.lock);
18551854

18561855
return ret;
18571856
}
@@ -1895,18 +1894,17 @@ static void drain_stock_fully(struct memcg_stock_pcp *stock)
18951894
static void drain_local_memcg_stock(struct work_struct *dummy)
18961895
{
18971896
struct memcg_stock_pcp *stock;
1898-
unsigned long flags;
18991897

19001898
if (WARN_ONCE(!in_task(), "drain in non-task context"))
19011899
return;
19021900

1903-
local_lock_irqsave(&memcg_stock.lock, flags);
1901+
local_lock(&memcg_stock.lock);
19041902

19051903
stock = this_cpu_ptr(&memcg_stock);
19061904
drain_stock_fully(stock);
19071905
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
19081906

1909-
local_unlock_irqrestore(&memcg_stock.lock, flags);
1907+
local_unlock(&memcg_stock.lock);
19101908
}
19111909

19121910
static void drain_local_obj_stock(struct work_struct *dummy)
@@ -1931,7 +1929,6 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19311929
struct memcg_stock_pcp *stock;
19321930
struct mem_cgroup *cached;
19331931
uint8_t stock_pages;
1934-
unsigned long flags;
19351932
bool success = false;
19361933
int empty_slot = -1;
19371934
int i;
@@ -1946,7 +1943,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19461943
VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
19471944

19481945
if (nr_pages > MEMCG_CHARGE_BATCH ||
1949-
!local_trylock_irqsave(&memcg_stock.lock, flags)) {
1946+
!local_trylock(&memcg_stock.lock)) {
19501947
/*
19511948
* In case of larger than batch refill or unlikely failure to
19521949
* lock the percpu memcg_stock.lock, uncharge memcg directly.
@@ -1981,7 +1978,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
19811978
WRITE_ONCE(stock->nr_pages[i], nr_pages);
19821979
}
19831980

1984-
local_unlock_irqrestore(&memcg_stock.lock, flags);
1981+
local_unlock(&memcg_stock.lock);
19851982
}
19861983

19871984
static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,

0 commit comments

Comments
 (0)