Skip to content
Permalink
Browse files
mm: thp: introduce folio_split_queue_lock{_irqsave}()
We should make thp deferred split queue lock safe when LRU pages
are reparented. Similar to folio_lruvec_lock{_irqsave, _irq}(), we
introduce folio_split_queue_lock{_irqsave}() to make the deferred
split queue lock easier to be reparented.

And in the next patch, we can use a similar approach (just like
lruvec lock does) to make thp deferred split queue lock safe when
the LRU pages reparented.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
  • Loading branch information
Muchun Song authored and intel-lab-lkp committed Aug 14, 2021
1 parent 5bfd087 commit 3460bcf13b968edf6f4621c0e0dcde46500957e5
Showing 1 changed file with 69 additions and 24 deletions.
@@ -499,25 +499,70 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
}

#ifdef CONFIG_MEMCG
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
static inline struct mem_cgroup *split_queue_memcg(struct deferred_split *queue)
{
struct mem_cgroup *memcg = page_memcg(compound_head(page));
struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
if (mem_cgroup_disabled())
return NULL;
return container_of(queue, struct mem_cgroup, deferred_split_queue);
}

if (memcg)
return &memcg->deferred_split_queue;
else
return &pgdat->deferred_split_queue;
static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
{
struct mem_cgroup *memcg = folio_memcg(folio);

return memcg ? &memcg->deferred_split_queue : NULL;
}
#else
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
static inline struct mem_cgroup *split_queue_memcg(struct deferred_split *queue)
{
struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
return NULL;
}

return &pgdat->deferred_split_queue;
static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
{
return NULL;
}
#endif

static struct deferred_split *folio_split_queue(struct folio *folio)
{
struct deferred_split *queue = folio_memcg_split_queue(folio);

return queue ? : &NODE_DATA(folio_nid(folio))->deferred_split_queue;
}

static struct deferred_split *folio_split_queue_lock(struct folio *folio)
{
struct deferred_split *queue;

queue = folio_split_queue(folio);
spin_lock(&queue->split_queue_lock);

return queue;
}

static struct deferred_split *
folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
{
struct deferred_split *queue;

queue = folio_split_queue(folio);
spin_lock_irqsave(&queue->split_queue_lock, *flags);

return queue;
}

static inline void split_queue_unlock(struct deferred_split *queue)
{
spin_unlock(&queue->split_queue_lock);
}

static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
unsigned long flags)
{
spin_unlock_irqrestore(&queue->split_queue_lock, flags);
}

void prep_transhuge_page(struct page *page)
{
/*
@@ -2610,8 +2655,9 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
*/
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct page *head = compound_head(page);
struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct folio *folio = page_folio(page);
struct page *head = &folio->page;
struct deferred_split *ds_queue;
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int extra_pins, ret;
@@ -2689,13 +2735,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}

/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
ds_queue = folio_split_queue_lock(folio);
if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
spin_unlock(&ds_queue->split_queue_lock);
split_queue_unlock(ds_queue);
if (mapping) {
int nr = thp_nr_pages(head);

@@ -2710,7 +2756,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
__split_huge_page(page, list, end);
ret = 0;
} else {
spin_unlock(&ds_queue->split_queue_lock);
split_queue_unlock(ds_queue);
fail:
if (mapping)
xa_unlock(&mapping->i_pages);
@@ -2733,24 +2779,22 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)

void free_transhuge_page(struct page *page)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
struct deferred_split *ds_queue;
unsigned long flags;

spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
ds_queue = folio_split_queue_lock_irqsave(page_folio(page), &flags);
if (!list_empty(page_deferred_list(page))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(page));
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
split_queue_unlock_irqrestore(ds_queue, flags);
free_compound_page(page);
}

void deferred_split_huge_page(struct page *page)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg = page_memcg(compound_head(page));
#endif
struct deferred_split *ds_queue;
struct mem_cgroup *memcg;
unsigned long flags;

VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -2768,7 +2812,8 @@ void deferred_split_huge_page(struct page *page)
if (PageSwapCache(page))
return;

spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
ds_queue = folio_split_queue_lock_irqsave(page_folio(page), &flags);
memcg = split_queue_memcg(ds_queue);
if (list_empty(page_deferred_list(page))) {
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
@@ -2779,7 +2824,7 @@ void deferred_split_huge_page(struct page *page)
deferred_split_shrinker.id);
#endif
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
split_queue_unlock_irqrestore(ds_queue, flags);
}

static unsigned long deferred_split_count(struct shrinker *shrink,

0 comments on commit 3460bcf

Please sign in to comment.