Skip to content
Permalink
Browse files
ANDROID: GKI: cma: redirect page allocation to CMA
CMA pages are designed to be used as fallback for movable allocations
and cannot be used for non-movable allocations. If CMA pages are
utilized poorly, non-movable allocations may end up getting starved if
all regular movable pages are allocated and the only pages left are
CMA. Always using CMA pages first creates unacceptable performance
problems. As a midway alternative, use CMA pages for certain
userspace allocations. The userspace pages can be migrated or dropped
quickly which giving decent utilization.

Change-Id: I6165dda01b705309eebabc6dfa67146b7a95c174
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Heesub Shin <heesub.shin@samsung.com>
[lauraa@codeaurora.org: Missing CONFIG_CMA guards, add commit text]
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
[lmark@codeaurora.org: resolve conflicts relating to MIGRATE_HIGHATOMIC]
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
[swatsrid@codeaurora.org: Fix merge conflicts]
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
(cherry picked from commit 46f8fca539686ce8493ff82206f9de2d07c9d72c)
Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Bug: 150378964
Bug: 142290962
[tkjos@google.com: ANDROID: Fix kernelci build-break on !CONFIG_CMA builds]
Signed-off-by: Todd Kjos <tkjos@google.com>
  • Loading branch information
Mark Salyzyn authored and toddkjos committed Apr 3, 2020
1 parent 2a30495 commit b9d3d8f1e991052edb89b0537b8f2e8b2aa941ac
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 16 deletions.
@@ -44,6 +44,7 @@ struct vm_area_struct;
#else
#define ___GFP_NOLOCKDEP 0
#endif
#define ___GFP_CMA 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */

/*
@@ -57,6 +58,7 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)

/**
@@ -217,8 +219,13 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)

/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_SHIFT (25)
#ifdef CONFIG_LOCKDEP
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
#else
#define __GFP_BITS_MASK (((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) & \
~0x800000u)
#endif

/**
* DOC: Useful GFP flag combinations
@@ -205,7 +205,12 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
#else
return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
vaddr);
#endif
}

static inline void clear_highpage(struct page *page)
@@ -444,6 +444,10 @@ struct zone {
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;

#ifdef CONFIG_CMA
bool cma_alloc;
#endif

#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
@@ -2725,14 +2725,32 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,

retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
if (migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma_fallback(zone, order);

if (!page && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;
}
if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;

trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}

static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
int migratetype,
unsigned int alloc_flags)
{
struct page *page = 0;

retry:
#ifdef CONFIG_CMA
if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
page = __rmqueue_cma_fallback(zone, order);
else
#endif
page = __rmqueue_smallest(zone, order, migratetype);

if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;

trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
@@ -2745,14 +2763,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, unsigned int alloc_flags)
int migratetype, unsigned int alloc_flags, int cma)
{
int i, alloced = 0;

spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
alloc_flags);
struct page *page;

if (cma)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
else
page = __rmqueue(zone, order, migratetype, alloc_flags);

if (unlikely(page == NULL))
break;

@@ -3216,15 +3240,16 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
struct list_head *list, gfp_t gfp_flags)
{
struct page *page;

do {
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, alloc_flags);
migratetype, alloc_flags,
gfp_flags & __GFP_CMA);
if (unlikely(list_empty(list)))
return NULL;
}
@@ -3250,7 +3275,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list,
gfp_flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
@@ -3291,8 +3317,14 @@ struct page *rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flags);
if (!page) {
if (gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
else
page = __rmqueue(zone, order, migratetype,
alloc_flags);
}
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page)
@@ -8442,6 +8474,9 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret < 0)
return ret;

#ifdef CONFIG_CMA
cc.zone->cma_alloc = 1;
#endif
/*
* In case of -EBUSY, we'd like to know which page causes problem.
* So, just fall through. test_pages_isolated() has a tracepoint
@@ -8523,6 +8558,9 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
#ifdef CONFIG_CMA
cc.zone->cma_alloc = 0;
#endif
return ret;
}
#endif /* CONFIG_CONTIG_ALLOC */

0 comments on commit b9d3d8f

Please sign in to comment.