Skip to content

Commit

Permalink
mm: memcontrol: make lruvec lock safe when LRU pages are reparented
Browse files Browse the repository at this point in the history
The diagram below shows how to make the folio lruvec lock safe when LRU
pages are reparented.

folio_lruvec_lock(folio)
    retry:
	lruvec = folio_lruvec(folio);

        // The folio is reparented at this time.
        spin_lock(&lruvec->lru_lock);

        if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio)))
            // Acquired the wrong lruvec lock and need to retry.
            // Because this folio is on the parent memcg lruvec list.
            goto retry;

        // If we reach here, it means that folio_memcg(folio) is stable.

memcg_reparent_objcgs(memcg)
    // lruvec belongs to memcg and lruvec_parent belongs to parent memcg.
    spin_lock(&lruvec->lru_lock);
    spin_lock(&lruvec_parent->lru_lock);

    // Move all the pages from the lruvec list to the parent lruvec list.

    spin_unlock(&lruvec_parent->lru_lock);
    spin_unlock(&lruvec->lru_lock);

After we acquire the lruvec lock, we need to check whether the folio is
reparented. If so, we need to reacquire the new lruvec lock. On the
routine of the LRU pages reparenting, we will also acquire the lruvec
lock (will be implemented in the later patch). So folio_memcg() cannot
be changed when we hold the lruvec lock.

Since lruvec_memcg(lruvec) is always equal to folio_memcg(folio) after
we hold the lruvec lock, lruvec_memcg_debug() check is pointless. So
remove it.

This is a preparation for reparenting the LRU pages.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Roman Gushchin <guro@fb.com>
  • Loading branch information
Muchun Song authored and intel-lab-lkp committed Aug 14, 2021
1 parent 7b561df commit 85e68f3
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 39 deletions.
18 changes: 3 additions & 15 deletions include/linux/memcontrol.h
Expand Up @@ -769,7 +769,9 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
* folio_lruvec - return lruvec for isolating/putting an LRU folio
* @folio: Pointer to the folio.
*
* This function relies on folio->mem_cgroup being stable.
* The lruvec can be changed to its parent lruvec when the page reparented.
* The caller need to recheck if it cares about this changes (just like
* folio_lruvec_lock() does).
*/
static inline struct lruvec *folio_lruvec(struct folio *folio)
{
Expand All @@ -788,15 +790,6 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags);

#ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
#else
static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
#endif

static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
Expand Down Expand Up @@ -1243,11 +1236,6 @@ static inline struct lruvec *folio_lruvec(struct folio *folio)
return &pgdat->__lruvec;
}

static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}

static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
Expand Down
10 changes: 9 additions & 1 deletion mm/compaction.c
Expand Up @@ -515,6 +515,8 @@ compact_folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags,
{
struct lruvec *lruvec;

rcu_read_lock();
retry:
lruvec = folio_lruvec(folio);

/* Track if the lock is contended in async mode */
Expand All @@ -527,7 +529,13 @@ compact_folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags,

spin_lock_irqsave(&lruvec->lru_lock, *flags);
out:
lruvec_memcg_debug(lruvec, folio);
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
goto retry;
}

/* See the comments in folio_lruvec_lock(). */
rcu_read_unlock();

return lruvec;
}
Expand Down
62 changes: 39 additions & 23 deletions mm/memcontrol.c
Expand Up @@ -1146,23 +1146,6 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
return ret;
}

#ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
struct mem_cgroup *memcg;

if (mem_cgroup_disabled())
return;

memcg = folio_memcg(folio);

if (!memcg)
VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
else
VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
}
#endif

/**
* folio_lruvec_lock - lock and return lruvec for a given folio.
* @folio: Pointer to the folio.
Expand All @@ -1175,31 +1158,64 @@ void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
*/
struct lruvec *folio_lruvec_lock(struct folio *folio)
{
struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;

rcu_read_lock();
retry:
lruvec = folio_lruvec(folio);
spin_lock(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, folio);

if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock(&lruvec->lru_lock);
goto retry;
}

/*
* Preemption is disabled in the internal of spin_lock, which can serve
* as RCU read-side critical sections.
*/
rcu_read_unlock();

return lruvec;
}

struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;

rcu_read_lock();
retry:
lruvec = folio_lruvec(folio);
spin_lock_irq(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, folio);

if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
goto retry;
}

/* See the comments in folio_lruvec_lock(). */
rcu_read_unlock();

return lruvec;
}

struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags)
{
struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;

rcu_read_lock();
retry:
lruvec = folio_lruvec(folio);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
lruvec_memcg_debug(lruvec, folio);

if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
goto retry;
}

/* See the comments in folio_lruvec_lock(). */
rcu_read_unlock();

return lruvec;
}
Expand Down
4 changes: 4 additions & 0 deletions mm/swap.c
Expand Up @@ -295,6 +295,10 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)

void lru_note_cost_folio(struct folio *folio)
{
/*
* The rcu read lock is held by the caller, so we do not need to
* care about the lruvec returned by folio_lruvec() being released.
*/
lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
folio_nr_pages(folio));
}
Expand Down

0 comments on commit 85e68f3

Please sign in to comment.