Skip to content

Commit

Permalink
mm: vmscan: rework move_pages_to_lru()
Browse files Browse the repository at this point in the history
In the later patch, we will reparent the LRU pages. The pages moved to
appropriate LRU list can be reparented during the process of the
move_pages_to_lru(). So holding a lruvec lock by the caller is wrong, we
should use the more general interface of folio_lruvec_relock_irq() to
acquire the correct lruvec lock.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
  • Loading branch information
Muchun Song authored and intel-lab-lkp committed Aug 14, 2021
1 parent 85e68f3 commit 5bfd087
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 24 deletions.
1 change: 1 addition & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)

#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
#define lru_to_folio(head) (list_entry((head)->prev, struct folio, lru))

void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk);
Expand Down
49 changes: 25 additions & 24 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2153,23 +2153,28 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
* move_pages_to_lru() moves pages from private @list to appropriate LRU list.
* On return, @list is reused as a list of pages to be freed by the caller.
*
* Returns the number of pages moved to the given lruvec.
* Returns the number of pages moved to the appropriate LRU list.
*
* Note: The caller must not hold any lruvec lock.
*/
static unsigned int move_pages_to_lru(struct lruvec *lruvec,
struct list_head *list)
static unsigned int move_pages_to_lru(struct list_head *list)
{
int nr_pages, nr_moved = 0;
int nr_moved = 0;
struct lruvec *lruvec = NULL;
LIST_HEAD(pages_to_free);
struct page *page;

while (!list_empty(list)) {
page = lru_to_page(list);
int nr_pages;
struct folio *folio = lru_to_folio(list);
struct page *page = &folio->page;

lruvec = folio_lruvec_relock_irq(folio, lruvec);
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
spin_unlock_irq(&lruvec->lru_lock);
unlock_page_lruvec_irq(lruvec);
putback_lru_page(page);
spin_lock_irq(&lruvec->lru_lock);
lruvec = NULL;
continue;
}

Expand All @@ -2190,27 +2195,25 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
__clear_page_lru_flags(page);

if (unlikely(PageCompound(page))) {
spin_unlock_irq(&lruvec->lru_lock);
unlock_page_lruvec_irq(lruvec);
destroy_compound_page(page);
spin_lock_irq(&lruvec->lru_lock);
lruvec = NULL;
} else
list_add(&page->lru, &pages_to_free);

continue;
}

/*
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
if (PageActive(page))
workingset_age_nonresident(lruvec, nr_pages);
}

if (lruvec)
unlock_page_lruvec_irq(lruvec);
/*
* To save our caller's stack, now use input list for pages to free.
*/
Expand Down Expand Up @@ -2284,16 +2287,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,

nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);

spin_lock_irq(&lruvec->lru_lock);
move_pages_to_lru(lruvec, &page_list);
move_pages_to_lru(&page_list);

local_irq_disable();
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
local_irq_enable();

lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
Expand Down Expand Up @@ -2420,18 +2423,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
/*
* Move pages back to the lru list.
*/
spin_lock_irq(&lruvec->lru_lock);

nr_activate = move_pages_to_lru(lruvec, &l_active);
nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
nr_activate = move_pages_to_lru(&l_active);
nr_deactivate = move_pages_to_lru(&l_inactive);
/* Keep all free pages in l_active list */
list_splice(&l_inactive, &l_active);

local_irq_disable();
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);

__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&lruvec->lru_lock);
local_irq_enable();

mem_cgroup_uncharge_list(&l_active);
free_unref_page_list(&l_active);
Expand Down

0 comments on commit 5bfd087

Please sign in to comment.