Skip to content

Commit 1109208

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: move socket code for unified hierarchy accounting
The unified hierarchy memory controller will account socket memory. Move the infrastructure functions accordingly. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 7941d21 commit 1109208

File tree

1 file changed

+74
-74
lines changed

1 file changed

+74
-74
lines changed

mm/memcontrol.c

Lines changed: 74 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -294,80 +294,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
294294
return mem_cgroup_from_css(css);
295295
}
296296

297-
/* Writing them here to avoid exposing memcg's inner layout */
298-
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
299-
300-
struct static_key memcg_sockets_enabled_key;
301-
EXPORT_SYMBOL(memcg_sockets_enabled_key);
302-
303-
void sock_update_memcg(struct sock *sk)
304-
{
305-
struct mem_cgroup *memcg;
306-
307-
/* Socket cloning can throw us here with sk_cgrp already
308-
* filled. It won't however, necessarily happen from
309-
* process context. So the test for root memcg given
310-
* the current task's memcg won't help us in this case.
311-
*
312-
* Respecting the original socket's memcg is a better
313-
* decision in this case.
314-
*/
315-
if (sk->sk_memcg) {
316-
BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
317-
css_get(&sk->sk_memcg->css);
318-
return;
319-
}
320-
321-
rcu_read_lock();
322-
memcg = mem_cgroup_from_task(current);
323-
if (memcg != root_mem_cgroup &&
324-
memcg->tcp_mem.active &&
325-
css_tryget_online(&memcg->css))
326-
sk->sk_memcg = memcg;
327-
rcu_read_unlock();
328-
}
329-
EXPORT_SYMBOL(sock_update_memcg);
330-
331-
void sock_release_memcg(struct sock *sk)
332-
{
333-
WARN_ON(!sk->sk_memcg);
334-
css_put(&sk->sk_memcg->css);
335-
}
336-
337-
/**
338-
* mem_cgroup_charge_skmem - charge socket memory
339-
* @memcg: memcg to charge
340-
* @nr_pages: number of pages to charge
341-
*
342-
* Charges @nr_pages to @memcg. Returns %true if the charge fit within
343-
* @memcg's configured limit, %false if the charge had to be forced.
344-
*/
345-
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
346-
{
347-
struct page_counter *counter;
348-
349-
if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
350-
nr_pages, &counter)) {
351-
memcg->tcp_mem.memory_pressure = 0;
352-
return true;
353-
}
354-
page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
355-
memcg->tcp_mem.memory_pressure = 1;
356-
return false;
357-
}
358-
359-
/**
360-
* mem_cgroup_uncharge_skmem - uncharge socket memory
361-
* @memcg - memcg to uncharge
362-
* @nr_pages - number of pages to uncharge
363-
*/
364-
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
365-
{
366-
page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
367-
}
368-
369-
#endif
370-
371297
#ifdef CONFIG_MEMCG_KMEM
372298
/*
373299
* This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
@@ -5607,6 +5533,80 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
56075533
commit_charge(newpage, memcg, true);
56085534
}
56095535

5536+
/* Writing them here to avoid exposing memcg's inner layout */
5537+
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
5538+
5539+
struct static_key memcg_sockets_enabled_key;
5540+
EXPORT_SYMBOL(memcg_sockets_enabled_key);
5541+
5542+
void sock_update_memcg(struct sock *sk)
5543+
{
5544+
struct mem_cgroup *memcg;
5545+
5546+
/* Socket cloning can throw us here with sk_cgrp already
5547+
* filled. It won't however, necessarily happen from
5548+
* process context. So the test for root memcg given
5549+
* the current task's memcg won't help us in this case.
5550+
*
5551+
* Respecting the original socket's memcg is a better
5552+
* decision in this case.
5553+
*/
5554+
if (sk->sk_memcg) {
5555+
BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5556+
css_get(&sk->sk_memcg->css);
5557+
return;
5558+
}
5559+
5560+
rcu_read_lock();
5561+
memcg = mem_cgroup_from_task(current);
5562+
if (memcg != root_mem_cgroup &&
5563+
memcg->tcp_mem.active &&
5564+
css_tryget_online(&memcg->css))
5565+
sk->sk_memcg = memcg;
5566+
rcu_read_unlock();
5567+
}
5568+
EXPORT_SYMBOL(sock_update_memcg);
5569+
5570+
void sock_release_memcg(struct sock *sk)
5571+
{
5572+
WARN_ON(!sk->sk_memcg);
5573+
css_put(&sk->sk_memcg->css);
5574+
}
5575+
5576+
/**
5577+
* mem_cgroup_charge_skmem - charge socket memory
5578+
* @memcg: memcg to charge
5579+
* @nr_pages: number of pages to charge
5580+
*
5581+
* Charges @nr_pages to @memcg. Returns %true if the charge fit within
5582+
* @memcg's configured limit, %false if the charge had to be forced.
5583+
*/
5584+
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5585+
{
5586+
struct page_counter *counter;
5587+
5588+
if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
5589+
nr_pages, &counter)) {
5590+
memcg->tcp_mem.memory_pressure = 0;
5591+
return true;
5592+
}
5593+
page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
5594+
memcg->tcp_mem.memory_pressure = 1;
5595+
return false;
5596+
}
5597+
5598+
/**
5599+
* mem_cgroup_uncharge_skmem - uncharge socket memory
5600+
* @memcg - memcg to uncharge
5601+
* @nr_pages - number of pages to uncharge
5602+
*/
5603+
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5604+
{
5605+
page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
5606+
}
5607+
5608+
#endif
5609+
56105610
/*
56115611
* subsys_initcall() for memory controller.
56125612
*

0 commit comments

Comments
 (0)