Permalink
Cannot retrieve contributors at this time
Join GitHub today
GitHub is home to over 31 million developers working together to host and review code, manage projects, and build software together.
Sign up
a3/system/Z575/OSes/3archlinux/on_baremetal/filesystem_now/archlinux/home/xftroxgpx/build/1packages/4used/kernel/linuxgit/le9d.patch
Find file
Copy path
Fetching contributors…
| from: https://github.com/constantoverride/qubes-linux-kernel/blob/bbecb78ff38770367b0f1d18a1c860db401322c7/patches.addon/le9d.patch#L1 | |
| Warning: you need to run `echo 1 | sudo tee /proc/sys/vm/drop_caches` periodically, or else you can run out of memory when you wouldn't have otherwise without this patch. But hey at least it doesn't freakin' freeze/crash your system! | |
| So you can expect Xorg and xfwm4 to get killed due to some filesystem caching(eg. during `sha256sum -c`) or during firefox compilation with output in /tmp (tmpfs) filled with 12-13G out of 16G total computer RAM. | |
| I'm personally running on xfce startup: xfce4-terminal -x freepagecachepages_automatically 1000000 | |
| TODO: use this vmstat number(which is very different than /proc/meminfo one! eg. Active(file): 123524 kB): | |
| $ grep nr_active_file /proc/vmstat | |
| nr_active_file 29803 | |
| revision 3 | |
| preliminary patch to avoid disk thrashing (constant reading) under memory pressure before OOM-killer triggers | |
| more info: https://gist.github.com/constantoverride/84eba764f487049ed642eb2111a20830 | |
| diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h | |
| index 32699b2..7636498 100644 | |
| --- a/include/linux/mmzone.h | |
| +++ b/include/linux/mmzone.h | |
| @@ -208,7 +208,7 @@ enum lru_list { | |
| #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) | |
| -#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) | |
| +#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_INACTIVE_FILE; lru++) | |
| static inline int is_file_lru(enum lru_list lru) | |
| { | |
| diff --git a/mm/vmscan.c b/mm/vmscan.c | |
| index 03822f8..1f3ffb5 100644 | |
| --- a/mm/vmscan.c | |
| +++ b/mm/vmscan.c | |
| @@ -2086,9 +2086,9 @@ static unsigned long shrink_list(enum lr | |
| struct scan_control *sc) | |
| { | |
| if (is_active_lru(lru)) { | |
| - if (inactive_list_is_low(lruvec, is_file_lru(lru), | |
| - memcg, sc, true)) | |
| - shrink_active_list(nr_to_scan, lruvec, sc, lru); | |
| + //if (inactive_list_is_low(lruvec, is_file_lru(lru), | |
| + // memcg, sc, true)) | |
| + // shrink_active_list(nr_to_scan, lruvec, sc, lru); | |
| return 0; | |
| } | |
| @@ -2234,7 +2234,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, | |
| anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + | |
| lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); | |
| - file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + | |
| + file = //lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + | |
| lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); | |
| spin_lock_irq(&pgdat->lru_lock); | |
| @@ -2345,7 +2345,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc | |
| sc->priority == DEF_PRIORITY); | |
| blk_start_plug(&plug); | |
| - while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | |
| + while (nr[LRU_INACTIVE_ANON] || //nr[LRU_ACTIVE_FILE] || | |
| nr[LRU_INACTIVE_FILE]) { | |
| unsigned long nr_anon, nr_file, percentage; | |
| unsigned long nr_scanned; | |
| @@ -2372,7 +2372,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc | |
| * stop reclaiming one LRU and reduce the amount scanning | |
| * proportional to the original scan target. | |
| */ | |
| - nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; | |
| + nr_file = nr[LRU_INACTIVE_FILE] //+ nr[LRU_ACTIVE_FILE] | |
| + ; | |
| nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; | |
| /* | |
| @@ -2391,7 +2392,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc | |
| percentage = nr_anon * 100 / scan_target; | |
| } else { | |
| unsigned long scan_target = targets[LRU_INACTIVE_FILE] + | |
| - targets[LRU_ACTIVE_FILE] + 1; | |
| + //targets[LRU_ACTIVE_FILE] + | |
| + 1; | |
| lru = LRU_FILE; | |
| percentage = nr_file * 100 / scan_target; | |
| } | |
| @@ -2409,10 +2411,12 @@ static void shrink_node_memcg(struct pgl | |
| nr[lru] = targets[lru] * (100 - percentage) / 100; | |
| nr[lru] -= min(nr[lru], nr_scanned); | |
| + if (LRU_FILE != lru) { //avoid this block for LRU_ACTIVE_FILE | |
| lru += LRU_ACTIVE; | |
| nr_scanned = targets[lru] - nr[lru]; | |
| nr[lru] = targets[lru] * (100 - percentage) / 100; | |
| nr[lru] -= min(nr[lru], nr_scanned); | |
| + } | |
| scan_adjusted = true; | |
| } |