Skip to content
Permalink
Browse files
mm: reimplement si_mem_available()
Reimplement si_mem_available() by reusing si_mem_available_node().

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
  • Loading branch information
Qi Zheng authored and intel-lab-lkp committed Dec 16, 2021
1 parent 138b37f commit 7980664f23d619d15a3931fe1ab7d1dbafad7c88
Showing 1 changed file with 7 additions and 38 deletions.
@@ -5775,46 +5775,15 @@ long si_mem_available_node(struct sysinfo *val, int nid)

long si_mem_available(void)
{
long available;
unsigned long pagecache;
unsigned long wmark_low = 0;
unsigned long pages[NR_LRU_LISTS];
unsigned long reclaimable;
struct zone *zone;
int lru;

for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);

for_each_zone(zone)
wmark_low += low_wmark_pages(zone);

/*
* Estimate the amount of memory available for userspace allocations,
* without causing swapping.
*/
available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;

/*
* Not all the page cache can be freed, otherwise the system will
* start swapping. Assume at least half of the page cache, or the
* low watermark worth of cache, needs to stay.
*/
pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
pagecache -= min(pagecache / 2, wmark_low);
available += pagecache;
long available = 0;
struct sysinfo i;
int nid;

/*
* Part of the reclaimable slab and other kernel memory consists of
* items that are in use, and cannot be freed. Cap this estimate at the
* low watermark.
*/
reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
available += reclaimable - min(reclaimable / 2, wmark_low);
for_each_online_node(nid) {
si_meminfo_node(&i, nid);
available += si_mem_available_node(&i, nid);
}

if (available < 0)
available = 0;
return available;
}
EXPORT_SYMBOL_GPL(si_mem_available);

0 comments on commit 7980664

Please sign in to comment.