@@ -290,7 +290,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return 0;
batch = tlb->active;
}
VM_BUG_ON(batch->nr > batch->max);
VM_BUG_ON_PAGE(batch->nr > batch->max, page);

return batch->max - batch->nr;
}
@@ -2722,7 +2722,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto unwritable_page;
}
} else
VM_BUG_ON(!PageLocked(old_page));
VM_BUG_ON_PAGE(!PageLocked(old_page), old_page);

/*
* Since we dropped the lock we need to revalidate
@@ -3010,7 +3010,10 @@ EXPORT_SYMBOL(unmap_mapping_range);
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
* We return with pte unmapped and unlocked.
*
* We return with the mmap_sem locked or unlocked in the same cases
* as does filemap_fault().
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
@@ -3370,7 +3373,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
VM_BUG_ON(!PageLocked(vmf.page));
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);

/*
* Should we do an early C-O-W break?
@@ -3407,7 +3410,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto unwritable_page;
}
} else
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
page_mkwrite = 1;
}
}
@@ -3535,6 +3538,12 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}

/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults).
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
@@ -3556,7 +3565,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
* We return with pte unmapped and unlocked.
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
@@ -3746,7 +3757,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
* We return with pte unmapped and unlocked.
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
@@ -3815,6 +3829,9 @@ static int handle_pte_fault(struct mm_struct *mm,

/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
@@ -3910,6 +3927,12 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}

/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
@@ -484,7 +484,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON_PAGE(PageUnevictable(page), page);
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
@@ -843,7 +843,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* free the metadata, so the page can be freed.
*/
if (!page->mapping) {
VM_BUG_ON(PageAnon(page));
VM_BUG_ON_PAGE(PageAnon(page), page);
if (page_has_private(page)) {
try_to_free_buffers(page);
goto out_unlock;
@@ -91,6 +91,26 @@ void mlock_vma_page(struct page *page)
}
}

/*
* Isolate a page from LRU with optional get_page() pin.
* Assumes lru_lock already held and page already pinned.
*/
static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
{
if (PageLRU(page)) {
struct lruvec *lruvec;

lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
if (getpage)
get_page(page);
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_lru(page));
return true;
}

return false;
}

/*
* Finish munlock after successful page isolation
*
@@ -127,14 +147,17 @@ static void __munlock_isolated_page(struct page *page)
static void __munlock_isolation_failed(struct page *page)
{
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
__count_vm_event(UNEVICTABLE_PGSTRANDED);
else
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}

/**
* munlock_vma_page - munlock a vma page
* @page - page to be unlocked
* @page - page to be unlocked, either a normal page or THP page head
*
* returns the size of the page as a page mask (0 for normal page,
* HPAGE_PMD_NR - 1 for THP head page)
*
* called from munlock()/munmap() path with page supposedly on the LRU.
* When we munlock a page, because the vma where we found the page is being
@@ -149,34 +172,57 @@ static void __munlock_isolation_failed(struct page *page)
*/
unsigned int munlock_vma_page(struct page *page)
{
unsigned int page_mask = 0;
unsigned int nr_pages;
struct zone *zone = page_zone(page);

/* For try_to_munlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));

if (TestClearPageMlocked(page)) {
unsigned int nr_pages = hpage_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
page_mask = nr_pages - 1;
if (!isolate_lru_page(page))
__munlock_isolated_page(page);
else
__munlock_isolation_failed(page);
/*
* Serialize with any parallel __split_huge_page_refcount() which
* might otherwise copy PageMlocked to part of the tail pages before
* we clear it in the head page. It also stabilizes hpage_nr_pages().
*/
spin_lock_irq(&zone->lru_lock);

nr_pages = hpage_nr_pages(page);
if (!TestClearPageMlocked(page))
goto unlock_out;

__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);

if (__munlock_isolate_lru_page(page, true)) {
spin_unlock_irq(&zone->lru_lock);
__munlock_isolated_page(page);
goto out;
}
__munlock_isolation_failed(page);

return page_mask;
unlock_out:
spin_unlock_irq(&zone->lru_lock);

out:
return nr_pages - 1;
}

/**
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
* @nonblocking:
*
* This takes care of making the pages present too.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held for at least read.
* vma->vm_mm->mmap_sem must be held.
*
* If @nonblocking is NULL, it may be held for read or write and will
* be unperturbed.
*
* If @nonblocking is non-NULL, it must held for read only and may be
* released. If it's released, *@nonblocking will be set to 0.
*/
long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
@@ -187,9 +233,9 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,

VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);

gup_flags = FOLL_TOUCH | FOLL_MLOCK;
/*
@@ -242,8 +288,8 @@ static int __mlock_posix_error_return(long retval)
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
int *pgrescued)
{
VM_BUG_ON(PageLRU(page));
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);

if (page_mapcount(page) <= 1 && page_evictable(page)) {
pagevec_add(pvec, page);
@@ -287,50 +333,45 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
int delta_munlocked = -nr;
int delta_munlocked;
struct pagevec pvec_putback;
int pgrescued = 0;

pagevec_init(&pvec_putback, 0);

/* Phase 1: page isolation */
spin_lock_irq(&zone->lru_lock);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];

if (TestClearPageMlocked(page)) {
struct lruvec *lruvec;
int lru;

if (PageLRU(page)) {
lruvec = mem_cgroup_page_lruvec(page, zone);
lru = page_lru(page);
/*
* We already have pin from follow_page_mask()
* so we can spare the get_page() here.
*/
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, lru);
} else {
__munlock_isolation_failed(page);
goto skip_munlock;
}

} else {
skip_munlock:
/*
* We won't be munlocking this page in the next phase
* but we still need to release the follow_page_mask()
* pin.
* We already have pin from follow_page_mask()
* so we can spare the get_page() here.
*/
pvec->pages[i] = NULL;
put_page(page);
delta_munlocked++;
if (__munlock_isolate_lru_page(page, false))
continue;
else
__munlock_isolation_failed(page);
}

/*
* We won't be munlocking this page in the next phase
* but we still need to release the follow_page_mask()
* pin. We cannot do it under lru_lock however. If it's
* the last pin, __page_cache_release() would deadlock.
*/
pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
}
delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);

/* Now we can release pins of pages that we are not munlocking */
pagevec_release(&pvec_putback);

/* Phase 2: page munlock */
pagevec_init(&pvec_putback, 0);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];

@@ -490,11 +531,11 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
zoneid, start, end);
__munlock_pagevec(&pvec, zone);
goto next;
if (pagevec_add(&pvec, page) == 0)
__munlock_pagevec(&pvec, zone);
}
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
/* It's a bug to munlock in the middle of a THP page */
VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
page_increm = 1 + page_mask;
start += page_increm * PAGE_SIZE;
next:
cond_resched_rcu_qs();
@@ -751,7 +792,7 @@ static int do_mlockall(int flags)

/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
cond_resched();
cond_resched_rcu_qs();
}
out:
return 0;
@@ -407,8 +407,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
BUG_ON(vma != ignore &&
vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
VM_BUG_ON_VMA(vma != ignore &&
vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
vma);
}
}

@@ -440,7 +441,7 @@ static void validate_mm(struct mm_struct *mm)
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
BUG_ON(bug);
VM_BUG_ON_MM(bug, mm);
}
#else
#define validate_mm_rb(root, ignore) do { } while (0)
@@ -2806,7 +2807,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
* safe. It is only safe to keep the vm_pgoff
* linear if there are no pages mapped yet.
*/
VM_BUG_ON(faulted_in_anon_vma);
VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
*vmap = vma = new_vma;
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
@@ -191,7 +191,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (pmd_trans_huge(*old_pmd)) {
int err = 0;
if (extent == HPAGE_PMD_SIZE) {
VM_BUG_ON(vma->vm_file || !vma->anon_vma);
VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
vma);
/* See comment in move_ptes() */
if (need_rmap_locks)
anon_vma_lock_write(vma->anon_vma);
@@ -535,12 +535,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
return 0;

if (page_is_guard(buddy) && page_order(buddy) == order) {
VM_BUG_ON(page_count(buddy) != 0);
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return 1;
}

if (PageBuddy(buddy) && page_order(buddy) == order) {
VM_BUG_ON(page_count(buddy) != 0);
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return 1;
}
return 0;
@@ -592,8 +592,8 @@ static inline void __free_one_page(struct page *page,

page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

VM_BUG_ON(page_idx & ((1 << order) - 1));
VM_BUG_ON(bad_range(zone, page));
VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);

while (order < MAX_ORDER-1) {
buddy_idx = __find_buddy_index(page_idx, order);
@@ -859,7 +859,7 @@ static inline void expand(struct zone *zone, struct page *page,
area--;
high--;
size >>= 1;
VM_BUG_ON(bad_range(zone, &page[size]));
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);

#ifdef CONFIG_DEBUG_PAGEALLOC
if (high < debug_guardpage_minorder()) {
@@ -1006,7 +1006,7 @@ int move_freepages(struct zone *zone,

for (page = start_page; page <= end_page;) {
/* Make sure we are not inadvertently changing nodes */
VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);

if (!pfn_valid_within(page_to_pfn(page))) {
page++;
@@ -1496,8 +1496,8 @@ void split_page(struct page *page, unsigned int order)
{
int i;

VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page));
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);

#ifdef CONFIG_KMEMCHECK
/*
@@ -1648,7 +1648,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);

VM_BUG_ON(bad_range(zone, page));
VM_BUG_ON_PAGE(bad_range(zone, page), page);
if (prep_new_page(page, order, gfp_flags))
goto again;
return page;
@@ -6207,7 +6207,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn);
VM_BUG_ON(!zone_spans_pfn(zone, pfn));
VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);

for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (flags & value)
@@ -6725,3 +6725,4 @@ void dump_page(struct page *page)
dump_page_flags(page->flags);
mem_cgroup_print_bad_page(page);
}
EXPORT_SYMBOL_GPL(dump_page);
@@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (!walk->mm)
return -EINVAL;

VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);

pgd = pgd_offset(walk->mm, addr);
do {
@@ -531,7 +531,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
unsigned long address = __vma_address(page, vma);

/* page should be within @vma mapping range */
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);

return address;
}
@@ -966,9 +966,9 @@ void page_move_anon_rmap(struct page *page,
{
struct anon_vma *anon_vma = vma->anon_vma;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!anon_vma);
VM_BUG_ON(page->index != linear_page_index(vma, address));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);

anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
@@ -1067,7 +1067,7 @@ void do_page_add_anon_rmap(struct page *page,
if (unlikely(PageKsm(page)))
return;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
/* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
@@ -1088,7 +1088,7 @@ void do_page_add_anon_rmap(struct page *page,
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
if (PageTransHuge(page))
@@ -295,8 +295,8 @@ static int shmem_add_to_page_cache(struct page *page,
{
int error;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);

page_cache_get(page);
page->mapping = mapping;
@@ -1567,7 +1567,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
if (mode)
new.inuse = page->objects;

VM_BUG_ON(new.frozen);
VM_BUG_ON_PAGE(new.frozen, &new);
new.frozen = 1;

if (!__cmpxchg_double_slab(s, page,
@@ -1820,7 +1820,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
set_freepointer(s, freelist, prior);
new.counters = counters;
new.inuse--;
VM_BUG_ON(!new.frozen);
VM_BUG_ON_PAGE(!new.frozen, &new);

} while (!__cmpxchg_double_slab(s, page,
prior, counters,
@@ -1848,7 +1848,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,

old.freelist = page->freelist;
old.counters = page->counters;
VM_BUG_ON(!old.frozen);
VM_BUG_ON_PAGE(!old.frozen, &old);

/* Determine target state of the slab */
new.counters = old.counters;
@@ -1960,7 +1960,7 @@ static void unfreeze_partials(struct kmem_cache *s,

old.freelist = page->freelist;
old.counters = page->counters;
VM_BUG_ON(!old.frozen);
VM_BUG_ON_PAGE(!old.frozen, &old);

new.counters = old.counters;
new.freelist = old.freelist;
@@ -2256,7 +2256,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
counters = page->counters;

new.counters = counters;
VM_BUG_ON(!new.frozen);
VM_BUG_ON_PAGE(!new.frozen, &new);

new.inuse = page->objects;
new.frozen = freelist != NULL;
@@ -2350,7 +2350,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* page is pointing to the page from which the objects are obtained.
* That page must be frozen for per cpu allocations to work.
*/
VM_BUG_ON(!c->page->frozen);
VM_BUG_ON_PAGE(!c->page->frozen, c->page);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
@@ -58,7 +58,7 @@ static void __page_cache_release(struct page *page)

spin_lock_irqsave(&zone->lru_lock, flags);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -594,8 +594,8 @@ EXPORT_SYMBOL(__lru_cache_add);
*/
void lru_cache_add(struct page *page)
{
VM_BUG_ON(PageActive(page) && PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
__lru_cache_add(page);
}

@@ -836,7 +836,7 @@ void release_pages(struct page **pages, int nr, int cold)
}

lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
@@ -879,9 +879,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
{
const int file = 0;

VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail));
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
VM_BUG_ON(NR_CPUS != 1 &&
!spin_is_locked(&lruvec_zone(lruvec)->lru_lock));

@@ -920,7 +920,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
int active = PageActive(page);
enum lru_list lru = page_lru(page);

VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageLRU(page), page);

SetPageLRU(page);
add_page_to_lru_list(page, lruvec, lru);
@@ -87,9 +87,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
int error;
struct address_space *address_space;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapCache(page));
VM_BUG_ON(!PageSwapBacked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);

page_cache_get(page);
SetPageSwapCache(page);
@@ -143,9 +143,9 @@ void __delete_from_swap_cache(struct page *page)
swp_entry_t entry;
struct address_space *address_space;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapCache(page));
VM_BUG_ON(PageWriteback(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(PageWriteback(page), page);

entry.val = page_private(page);
address_space = swap_address_space(entry);
@@ -169,8 +169,8 @@ int add_to_swap(struct page *page, struct list_head *list)
swp_entry_t entry;
int err;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageUptodate(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageUptodate(page), page);

entry = get_swap_page();
if (!entry.val)
@@ -887,7 +887,7 @@ int reuse_swap_page(struct page *page)
{
int count;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (unlikely(PageKsm(page)))
return 0;
count = page_mapcount(page);
@@ -907,7 +907,7 @@ int reuse_swap_page(struct page *page)
*/
int try_to_free_swap(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);

if (!PageSwapCache(page))
return 0;
@@ -2814,15 +2814,15 @@ struct swap_info_struct *page_swap_info(struct page *page)
*/
struct address_space *__page_file_mapping(struct page *page)
{
VM_BUG_ON(!PageSwapCache(page));
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);

pgoff_t __page_file_index(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
VM_BUG_ON(!PageSwapCache(page));
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return swp_offset(swap);
}
EXPORT_SYMBOL_GPL(__page_file_index);
@@ -644,7 +644,7 @@ void putback_lru_page(struct page *page)
bool is_unevictable;
int was_unevictable = PageUnevictable(page);

VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageLRU(page), page);

redo:
ClearPageUnevictable(page);
@@ -834,8 +834,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!trylock_page(page))
goto keep;

VM_BUG_ON(PageActive(page));
VM_BUG_ON(page_zone(page) != zone);
VM_BUG_ON_PAGE(PageActive(page), page);
VM_BUG_ON_PAGE(page_zone(page) != zone, page);

sc->nr_scanned++;

@@ -1119,14 +1119,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
try_to_free_swap(page);
VM_BUG_ON(PageActive(page));
VM_BUG_ON_PAGE(PageActive(page), page);
SetPageActive(page);
pgactivate++;
keep_locked:
unlock_page(page);
keep:
list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
}

mem_cgroup_uncharge_list(&free_pages);
@@ -1317,7 +1317,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);

VM_BUG_ON(!PageLRU(page));
VM_BUG_ON_PAGE(!PageLRU(page), page);

switch (__isolate_lru_page(page, mode)) {
case 0:
@@ -1372,7 +1372,7 @@ int isolate_lru_page(struct page *page)
{
int ret = -EBUSY;

VM_BUG_ON(!page_count(page));
VM_BUG_ON_PAGE(!page_count(page), page);

if (PageLRU(page)) {
struct zone *zone = page_zone(page);
@@ -1443,7 +1443,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
struct page *page = lru_to_page(page_list);
int lru;

VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1665,7 +1665,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
page = lru_to_page(list);
lruvec = mem_cgroup_page_lruvec(page, zone);

VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);

nr_pages = hpage_nr_pages(page);
@@ -3840,7 +3840,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
if (page_evictable(page)) {
enum lru_list lru = page_lru_base_type(page);

VM_BUG_ON(PageActive(page));
VM_BUG_ON_PAGE(PageActive(page), page);
ClearPageUnevictable(page);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, lru);