Skip to content

Commit

Permalink
mm/memcg: Optimize user context object stock access
Browse files Browse the repository at this point in the history
Most kmem_cache_alloc() calls are from user context. With instrumentation
enabled, the measured amount of kmem_cache_alloc() calls from non-task
context was about 0.01% of the total.

The irq disable/enable sequence used in this case to access content
from object stock is slow.  To optimize for user context access, there
are now two object stocks for task context and interrupt context access
respectively.

The task context object stock can be accessed after disabling preemption
which is cheap in non-preempt kernel. The interrupt context object stock
can only be accessed after disabling interrupt. User context code can
access interrupt object stock, but not vice versa.

The mod_objcg_state() function is also modified to make sure that memcg
and lruvec stat updates are done with interrupted disabled.

The downside of this change is that there are more data stored in local
object stocks and not reflected in the charge counter and the vmstat
arrays.  However, this is a small price to pay for better performance.

Signed-off-by: Waiman Long <longman@redhat.com>
  • Loading branch information
Waiman Long authored and intel-lab-lkp committed Apr 9, 2021
1 parent 6b724b1 commit 1f4e22f
Showing 1 changed file with 57 additions and 14 deletions.
71 changes: 57 additions & 14 deletions mm/memcontrol.c
Expand Up @@ -2229,7 +2229,8 @@ struct obj_stock {
struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
struct obj_stock obj;
struct obj_stock task_obj;
struct obj_stock irq_obj;

struct work_struct work;
unsigned long flags;
Expand All @@ -2254,11 +2255,46 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
}
#endif

/*
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
* sequence used in this case to access content from object stock is slow.
* To optimize for user context access, there are now two object stocks for
* task context and interrupt context access respectively.
*
* The task context object stock can be accessed by disabling preemption only
* which is cheap in non-preempt kernel. The interrupt context object stock
* can only be accessed after disabling interrupt. User context code can
* access interrupt object stock, but not vice versa.
*/
static inline struct obj_stock *current_obj_stock(void)
{
struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);

return &stock->obj;
return in_task() ? &stock->task_obj : &stock->irq_obj;
}

#define get_obj_stock(flags) \
({ \
struct memcg_stock_pcp *stock; \
struct obj_stock *obj_stock; \
\
if (in_task()) { \
preempt_disable(); \
(flags) = -1L; \
obj_stock = &stock->task_obj; \
} else { \
local_irq_save(flags); \
obj_stock = &stock->irq_obj; \
} \
obj_stock; \
})

static inline void put_obj_stock(unsigned long flags)
{
if (flags == -1L)
preempt_enable();
else
local_irq_restore(flags);
}

/**
Expand Down Expand Up @@ -2327,7 +2363,9 @@ static void drain_local_stock(struct work_struct *dummy)
local_irq_save(flags);

stock = this_cpu_ptr(&memcg_stock);
drain_obj_stock(&stock->obj);
drain_obj_stock(&stock->irq_obj);
if (in_task())
drain_obj_stock(&stock->task_obj);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);

Expand Down Expand Up @@ -3183,7 +3221,7 @@ static inline void mod_objcg_state(struct obj_cgroup *objcg,
memcg = obj_cgroup_memcg(objcg);
if (pgdat)
lruvec = mem_cgroup_lruvec(memcg, pgdat);
__mod_memcg_lruvec_state(memcg, lruvec, idx, nr);
mod_memcg_lruvec_state(memcg, lruvec, idx, nr);
rcu_read_unlock();
}

Expand All @@ -3193,15 +3231,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
unsigned long flags;
bool ret = false;

local_irq_save(flags);
stock = get_obj_stock(flags);

stock = current_obj_stock();
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
stock->nr_bytes -= nr_bytes;
ret = true;
}

local_irq_restore(flags);
put_obj_stock(flags);

return ret;
}
Expand Down Expand Up @@ -3254,8 +3292,13 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
{
struct mem_cgroup *memcg;

if (stock->obj.cached_objcg) {
memcg = obj_cgroup_memcg(stock->obj.cached_objcg);
if (in_task() && stock->task_obj.cached_objcg) {
memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
return true;
}
if (stock->irq_obj.cached_objcg) {
memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
return true;
}
Expand Down Expand Up @@ -3283,9 +3326,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
unsigned long flags;

local_irq_save(flags);
get_obj_stock(flags);
__refill_obj_stock(objcg, nr_bytes);
local_irq_restore(flags);
put_obj_stock(flags);
}

static void __mod_obj_stock_state(struct obj_cgroup *objcg,
Expand Down Expand Up @@ -3325,9 +3368,9 @@ void mod_obj_stock_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
{
unsigned long flags;

local_irq_save(flags);
get_obj_stock(flags);
__mod_obj_stock_state(objcg, pgdat, idx, nr);
local_irq_restore(flags);
put_obj_stock(flags);
}

int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
Expand Down Expand Up @@ -3380,10 +3423,10 @@ void obj_cgroup_uncharge_mod_state(struct obj_cgroup *objcg, size_t size,
{
unsigned long flags;

local_irq_save(flags);
get_obj_stock(flags);
__refill_obj_stock(objcg, size);
__mod_obj_stock_state(objcg, pgdat, idx, -(int)size);
local_irq_restore(flags);
put_obj_stock(flags);
}

#endif /* CONFIG_MEMCG_KMEM */
Expand Down

0 comments on commit 1f4e22f

Please sign in to comment.