Skip to content

Commit

Permalink
mm: don't pass "enum lru_list" to del_page_from_lru_list()
Browse files Browse the repository at this point in the history
The parameter is redundant in the sense that it can be potentially
extracted from the "struct page" parameter by page_lru(). We need to
make sure that existing PageActive() or PageUnevictable() remains
until the function returns. A few places don't conform, and simple
reordering fixes them.

This patch may have left page_off_lru() seemingly odd, and we'll take
care of it in the next patch.

Link: https://lore.kernel.org/linux-mm/20201207220949.830352-6-yuzhao@google.com/
Signed-off-by: Yu Zhao <yuzhao@google.com>
  • Loading branch information
yuzhaogoogle authored and xanmod committed Mar 17, 2021
1 parent ad505c5 commit 6bec42c
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 23 deletions.
5 changes: 3 additions & 2 deletions include/linux/mm_inline.h
Expand Up @@ -124,9 +124,10 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
}

static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
struct lruvec *lruvec)
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-thp_nr_pages(page));
}
#endif
2 changes: 1 addition & 1 deletion mm/compaction.c
Expand Up @@ -1034,7 +1034,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
low_pfn += compound_nr(page) - 1;

/* Successfully isolated */
del_page_from_lru_list(page, lruvec, page_lru(page));
del_page_from_lru_list(page, lruvec);
mod_node_page_state(page_pgdat(page),
NR_ISOLATED_ANON + page_is_file_lru(page),
thp_nr_pages(page));
Expand Down
3 changes: 1 addition & 2 deletions mm/mlock.c
Expand Up @@ -278,8 +278,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
*/
if (TestClearPageLRU(page)) {
lruvec = relock_page_lruvec_irq(page, lruvec);
del_page_from_lru_list(page, lruvec,
page_lru(page));
del_page_from_lru_list(page, lruvec);
continue;
} else
__munlock_isolation_failed(page);
Expand Down
26 changes: 10 additions & 16 deletions mm/swap.c
Expand Up @@ -85,7 +85,8 @@ static void __page_cache_release(struct page *page)
lruvec = lock_page_lruvec_irqsave(page, &flags);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
del_page_from_lru_list(page, lruvec);
page_off_lru(page);
unlock_page_lruvec_irqrestore(lruvec, flags);
}
__ClearPageWaiters(page);
Expand Down Expand Up @@ -229,7 +230,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{
if (!PageUnevictable(page)) {
del_page_from_lru_list(page, lruvec, page_lru(page));
del_page_from_lru_list(page, lruvec);
ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec);
__count_vm_events(PGROTATED, thp_nr_pages(page));
Expand Down Expand Up @@ -308,10 +309,9 @@ void lru_note_cost_page(struct page *page)
static void __activate_page(struct page *page, struct lruvec *lruvec)
{
if (!PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
int nr_pages = thp_nr_pages(page);

del_page_from_lru_list(page, lruvec, lru);
del_page_from_lru_list(page, lruvec);
SetPageActive(page);
add_page_to_lru_list(page, lruvec);
trace_mm_lru_activate(page);
Expand Down Expand Up @@ -518,8 +518,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
*/
static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
{
int lru;
bool active;
bool active = PageActive(page);
int nr_pages = thp_nr_pages(page);

if (PageUnevictable(page))
Expand All @@ -529,10 +528,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
if (page_mapped(page))
return;

active = PageActive(page);
lru = page_lru_base_type(page);

del_page_from_lru_list(page, lruvec, lru + active);
del_page_from_lru_list(page, lruvec);
ClearPageActive(page);
ClearPageReferenced(page);

Expand Down Expand Up @@ -563,10 +559,9 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
{
if (PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
int nr_pages = thp_nr_pages(page);

del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
del_page_from_lru_list(page, lruvec);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list(page, lruvec);
Expand All @@ -581,11 +576,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
{
if (PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
int nr_pages = thp_nr_pages(page);

del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
del_page_from_lru_list(page, lruvec);
ClearPageActive(page);
ClearPageReferenced(page);
/*
Expand Down Expand Up @@ -919,7 +912,8 @@ void release_pages(struct page **pages, int nr)

VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
del_page_from_lru_list(page, lruvec);
page_off_lru(page);
}

__ClearPageWaiters(page);
Expand Down
4 changes: 2 additions & 2 deletions mm/vmscan.c
Expand Up @@ -1772,7 +1772,7 @@ int isolate_lru_page(struct page *page)

get_page(page);
lruvec = lock_page_lruvec_irq(page);
del_page_from_lru_list(page, lruvec, page_lru(page));
del_page_from_lru_list(page, lruvec);
unlock_page_lruvec_irq(lruvec);
ret = 0;
}
Expand Down Expand Up @@ -4294,8 +4294,8 @@ void check_move_unevictable_pages(struct pagevec *pvec)
lruvec = relock_page_lruvec_irq(page, lruvec);
if (page_evictable(page) && PageUnevictable(page)) {
VM_BUG_ON_PAGE(PageActive(page), page);
del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec);
pgrescued += nr_pages;
}
Expand Down

0 comments on commit 6bec42c

Please sign in to comment.