Skip to content
Permalink
Browse files
mm/compaction:let proactive compaction order configurable
Currently the proactive compaction order is fixed to
COMPACTION_HPAGE_ORDER(9), it's OK in most machines with lots of
normal 4KB memory, but it's too high for the machines with small
normal memory, for example the machines with most memory configured
as 1GB hugetlbfs huge pages. In these machines the max order of
free pages is often below 9, and it's always below 9 even with hard
compaction. This will lead to proactive compaction be triggered very
frequently. In these machines we only care about order of 3 or 4.
This patch export the oder to proc and let it configurable
by user, and the default value is still COMPACTION_HPAGE_ORDER.

Signed-off-by: chukaiping <chukaiping@baidu.com>
  • Loading branch information
chukaiping authored and intel-lab-lkp committed Apr 12, 2021
1 parent 9d843e8 commit a203321bf356e9514ca678c96119df72d6bfa803
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 3 deletions.
@@ -83,6 +83,7 @@ static inline unsigned long compact_gap(unsigned int order)
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern unsigned int sysctl_compaction_proactiveness;
extern unsigned int sysctl_compaction_order;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
@@ -114,6 +114,7 @@ static int sixty = 60;
static int __maybe_unused neg_one = -1;
static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
static int __maybe_unused ten = 10;
static unsigned long zero_ul;
static unsigned long one_ul = 1;
static unsigned long long_max = LONG_MAX;
@@ -2870,6 +2871,15 @@ static struct ctl_table vm_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = &one_hundred,
},
{
.procname = "compaction_order",
.data = &sysctl_compaction_order,
.maxlen = sizeof(sysctl_compaction_order),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &ten,
},
{
.procname = "extfrag_threshold",
.data = &sysctl_extfrag_threshold,
@@ -1925,16 +1925,16 @@ static bool kswapd_is_running(pg_data_t *pgdat)

/*
* A zone's fragmentation score is the external fragmentation wrt to the
* COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
* sysctl_compaction_order. It returns a value in the range [0, 100].
*/
static unsigned int fragmentation_score_zone(struct zone *zone)
{
return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
return extfrag_for_order(zone, sysctl_compaction_order);
}

/*
* A weighted zone's fragmentation score is the external fragmentation
* wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
* wrt to the sysctl_compaction_order scaled by the zone's size. It
* returns a value in the range [0, 100].
*
* The scaling factor ensures that proactive compaction focuses on larger
@@ -2666,6 +2666,7 @@ int sysctl_compact_memory;
* background. It takes values in the range [0, 100].
*/
unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
unsigned int __read_mostly sysctl_compaction_order = COMPACTION_HPAGE_ORDER;

/*
* This is the entry point for compacting all nodes via

0 comments on commit a203321

Please sign in to comment.