Skip to content
Permalink
Browse files
mm/memcg: Convert mem_cgroup_charge() to take a folio
Convert all callers of mem_cgroup_charge() to call page_folio() on the
page they're currently passing in.  Many of them will be converted to
use folios themselves soon.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
  • Loading branch information
Matthew Wilcox (Oracle) authored and intel-lab-lkp committed Jul 12, 2021
1 parent 500bbee commit 3aa23c53058c0abac2b7fd5d8c80f9b458a2665f
Show file tree
Hide file tree
Showing 11 changed files with 32 additions and 29 deletions.
@@ -704,7 +704,7 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
page_counter_read(&memcg->memory);
}

int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
int mem_cgroup_charge(struct folio *, struct mm_struct *, gfp_t);
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -1190,8 +1190,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
return false;
}

static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
@@ -167,7 +167,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
addr + PAGE_SIZE);

if (new_page) {
err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
err = mem_cgroup_charge(page_folio(new_page), vma->vm_mm,
GFP_KERNEL);
if (err)
return err;
}
@@ -872,7 +872,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
page->index = offset;

if (!huge) {
error = mem_cgroup_charge(page, NULL, gfp);
error = mem_cgroup_charge(page_folio(page), NULL, gfp);
if (error)
goto error;
charged = true;
@@ -603,7 +603,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,

VM_BUG_ON_PAGE(!PageCompound(page), page);

if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE);
@@ -1087,7 +1087,7 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}

if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out_nolock;
}
@@ -1658,7 +1658,7 @@ static void collapse_file(struct mm_struct *mm,
goto out;
}

if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out;
}
@@ -2580,7 +2580,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
return page; /* let do_swap_page report the error */

new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
if (new_page &&
mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
put_page(new_page);
new_page = NULL;
}
@@ -6681,10 +6681,9 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
atomic_long_read(&parent->memory.children_low_usage)));
}

static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
static int __mem_cgroup_charge(struct folio *folio, struct mem_cgroup *memcg,
gfp_t gfp)
{
struct folio *folio = page_folio(page);
unsigned int nr_pages = folio_nr_pages(folio);
int ret;

@@ -6697,27 +6696,27 @@ static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,

local_irq_disable();
mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, page_to_nid(page));
memcg_check_events(memcg, folio_nid(folio));
local_irq_enable();
out:
return ret;
}

/**
* mem_cgroup_charge - charge a newly allocated page to a cgroup
* @page: page to charge
* @mm: mm context of the victim
* @gfp_mask: reclaim mode
* mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
* @folio: Folio to charge.
* @mm: mm context of the allocating task.
* @gfp: reclaim mode
*
* Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary. if @mm is NULL, try to
* Try to charge @folio to the memcg that @mm belongs to, reclaiming
* pages according to @gfp if necessary. If @mm is NULL, try to
* charge to the active memcg.
*
* Do not use this for pages allocated for swapin.
* Do not use this for folios allocated for swapin.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
{
struct mem_cgroup *memcg;
int ret;
@@ -6726,7 +6725,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
return 0;

memcg = get_mem_cgroup_from_mm(mm);
ret = __mem_cgroup_charge(page, memcg, gfp_mask);
ret = __mem_cgroup_charge(folio, memcg, gfp);
css_put(&memcg->css);

return ret;
@@ -6747,6 +6746,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry)
{
struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
unsigned short id;
int ret;
@@ -6761,7 +6761,7 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
memcg = get_mem_cgroup_from_mm(mm);
rcu_read_unlock();

ret = __mem_cgroup_charge(page, memcg, gfp);
ret = __mem_cgroup_charge(folio, memcg, gfp);

css_put(&memcg->css);
return ret;
@@ -990,7 +990,7 @@ page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
if (!new_page)
return NULL;

if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
put_page(new_page);
return NULL;
}
@@ -3019,7 +3019,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
}
}

if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
goto oom_free_new;
cgroup_throttle_swaprate(new_page, GFP_KERNEL);

@@ -3768,7 +3768,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!page)
goto oom;

if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL);

@@ -4183,7 +4183,8 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (!vmf->cow_page)
return VM_FAULT_OOM;

if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
GFP_KERNEL)) {
put_page(vmf->cow_page);
return VM_FAULT_OOM;
}
@@ -2811,7 +2811,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,

if (unlikely(anon_vma_prepare(vma)))
goto abort;
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
goto abort;

/*
@@ -685,7 +685,7 @@ static int shmem_add_to_page_cache(struct page *page,
page->index = index;

if (!PageSwapCache(page)) {
error = mem_cgroup_charge(page, charge_mm, gfp);
error = mem_cgroup_charge(page_folio(page), charge_mm, gfp);
if (error) {
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_FALLBACK);
@@ -164,7 +164,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
__SetPageUptodate(page);

ret = -ENOMEM;
if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
goto out_release;

ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,

0 comments on commit 3aa23c5

Please sign in to comment.