| @@ -0,0 +1,38 @@ | ||
| Set the effective priority of idleprio tasks to that of nice 19 tasks when | ||
| modifying vm reclaim behaviour. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| include/linux/sched.h | 2 +- | ||
| mm/vmscan.c | 2 ++ | ||
| 2 files changed, 3 insertions(+), 1 deletion(-) | ||
|
|
||
| Index: linux-3.2-ck1/include/linux/sched.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/linux/sched.h 2012-01-16 10:07:31.338097029 +1100 | ||
| +++ linux-3.2-ck1/include/linux/sched.h 2012-01-16 10:07:32.577096941 +1100 | ||
| @@ -39,9 +39,9 @@ | ||
| #define SCHED_BATCH 3 | ||
| /* SCHED_ISO: Implemented on BFS only */ | ||
| #define SCHED_IDLE 5 | ||
| +#define SCHED_IDLEPRIO SCHED_IDLE | ||
| #ifdef CONFIG_SCHED_BFS | ||
| #define SCHED_ISO 4 | ||
| -#define SCHED_IDLEPRIO SCHED_IDLE | ||
| #define SCHED_MAX (SCHED_IDLEPRIO) | ||
| #define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) | ||
| #endif | ||
| Index: linux-3.2-ck1/mm/vmscan.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/mm/vmscan.c 2012-01-16 10:07:32.406096951 +1100 | ||
| +++ linux-3.2-ck1/mm/vmscan.c 2012-01-16 10:07:32.578096942 +1100 | ||
| @@ -2103,6 +2103,8 @@ static inline int effective_sc_prio(stru | ||
| if (likely(p->mm)) { | ||
| if (rt_task(p)) | ||
| return -20; | ||
| + if (p->policy == SCHED_IDLEPRIO) | ||
| + return 19; | ||
| return task_nice(p); | ||
| } | ||
| return 0; |
| @@ -0,0 +1,277 @@ | ||
| When reading from large files through the generic file read functions into | ||
| page cache we can detect when a file is so large that it is unlikely to be | ||
| fully cached in ram. If that happens we can put it on the tail end of the | ||
| inactive lru list so it can be the first thing evicted next time we need ram. | ||
|
|
||
| Do lots of funny buggers with underscores to preserve most of the existing | ||
| APIs. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| include/linux/mm_inline.h | 15 ++++++++++++--- | ||
| include/linux/pagemap.h | 2 ++ | ||
| include/linux/swap.h | 8 +++++++- | ||
| mm/filemap.c | 12 +++++++++--- | ||
| mm/readahead.c | 32 ++++++++++++++++++++++++++++---- | ||
| mm/swap.c | 30 ++++++++++++++++++++++++------ | ||
| 6 files changed, 82 insertions(+), 17 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/include/linux/mm_inline.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/linux/mm_inline.h 2012-01-16 10:07:27.614097289 +1100 | ||
| +++ linux-3.2-ck1/include/linux/mm_inline.h 2012-01-16 10:07:32.751096930 +1100 | ||
| @@ -23,9 +23,12 @@ static inline int page_is_file_cache(str | ||
|
|
||
| static inline void | ||
| __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, | ||
| - struct list_head *head) | ||
| + struct list_head *head, bool tail) | ||
| { | ||
| - list_add(&page->lru, head); | ||
| + if (tail) | ||
| + list_add_tail(&page->lru, head); | ||
| + else | ||
| + list_add(&page->lru, head); | ||
| __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); | ||
| mem_cgroup_add_lru_list(page, l); | ||
| } | ||
| @@ -33,7 +36,13 @@ __add_page_to_lru_list(struct zone *zone | ||
| static inline void | ||
| add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) | ||
| { | ||
| - __add_page_to_lru_list(zone, page, l, &zone->lru[l].list); | ||
| + __add_page_to_lru_list(zone, page, l, &zone->lru[l].list, false); | ||
| +} | ||
| + | ||
| +static inline void | ||
| +add_page_to_lru_list_tail(struct zone *zone, struct page *page, enum lru_list l) | ||
| +{ | ||
| + __add_page_to_lru_list(zone, page, l, &zone->lru[l].list, 1); | ||
| } | ||
|
|
||
| static inline void | ||
| Index: linux-3.2-ck1/include/linux/swap.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/linux/swap.h 2012-01-16 10:07:32.052096979 +1100 | ||
| +++ linux-3.2-ck1/include/linux/swap.h 2012-01-16 10:07:32.751096930 +1100 | ||
| @@ -215,6 +215,7 @@ extern unsigned int nr_free_pagecache_pa | ||
|
|
||
|
|
||
| /* linux/mm/swap.c */ | ||
| +extern void ____lru_cache_add(struct page *, enum lru_list lru, bool tail); | ||
| extern void __lru_cache_add(struct page *, enum lru_list lru); | ||
| extern void lru_cache_add_lru(struct page *, enum lru_list lru); | ||
| extern void lru_add_page_tail(struct zone* zone, | ||
| @@ -238,9 +239,14 @@ static inline void lru_cache_add_anon(st | ||
| __lru_cache_add(page, LRU_INACTIVE_ANON); | ||
| } | ||
|
|
||
| +static inline void lru_cache_add_file_tail(struct page *page, bool tail) | ||
| +{ | ||
| + ____lru_cache_add(page, LRU_INACTIVE_FILE, tail); | ||
| +} | ||
| + | ||
| static inline void lru_cache_add_file(struct page *page) | ||
| { | ||
| - __lru_cache_add(page, LRU_INACTIVE_FILE); | ||
| + ____lru_cache_add(page, LRU_INACTIVE_FILE, false); | ||
| } | ||
|
|
||
| /* linux/mm/vmscan.c */ | ||
| Index: linux-3.2-ck1/mm/filemap.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/mm/filemap.c 2012-01-16 10:07:27.615097289 +1100 | ||
| +++ linux-3.2-ck1/mm/filemap.c 2012-01-16 10:07:32.752096930 +1100 | ||
| @@ -495,16 +495,22 @@ out: | ||
| } | ||
| EXPORT_SYMBOL(add_to_page_cache_locked); | ||
|
|
||
| -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| - pgoff_t offset, gfp_t gfp_mask) | ||
| +int __add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| + pgoff_t offset, gfp_t gfp_mask, bool tail) | ||
| { | ||
| int ret; | ||
|
|
||
| ret = add_to_page_cache(page, mapping, offset, gfp_mask); | ||
| if (ret == 0) | ||
| - lru_cache_add_file(page); | ||
| + lru_cache_add_file_tail(page, tail); | ||
| return ret; | ||
| } | ||
| + | ||
| +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| + pgoff_t offset, gfp_t gfp_mask) | ||
| +{ | ||
| + return __add_to_page_cache_lru(page, mapping, offset, gfp_mask, false); | ||
| +} | ||
| EXPORT_SYMBOL_GPL(add_to_page_cache_lru); | ||
|
|
||
| #ifdef CONFIG_NUMA | ||
| Index: linux-3.2-ck1/mm/swap.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/mm/swap.c 2012-01-16 10:07:27.615097289 +1100 | ||
| +++ linux-3.2-ck1/mm/swap.c 2012-01-16 10:07:32.753096930 +1100 | ||
| @@ -371,15 +371,23 @@ void mark_page_accessed(struct page *pag | ||
|
|
||
| EXPORT_SYMBOL(mark_page_accessed); | ||
|
|
||
| -void __lru_cache_add(struct page *page, enum lru_list lru) | ||
| +void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, bool tail); | ||
| + | ||
| +void ____lru_cache_add(struct page *page, enum lru_list lru, bool tail) | ||
| { | ||
| struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | ||
|
|
||
| page_cache_get(page); | ||
| if (!pagevec_add(pvec, page)) | ||
| - ____pagevec_lru_add(pvec, lru); | ||
| + ______pagevec_lru_add(pvec, lru, tail); | ||
| put_cpu_var(lru_add_pvecs); | ||
| } | ||
| +EXPORT_SYMBOL(____lru_cache_add); | ||
| + | ||
| +void __lru_cache_add(struct page *page, enum lru_list lru) | ||
| +{ | ||
| + ____lru_cache_add(page, lru, false); | ||
| +} | ||
| EXPORT_SYMBOL(__lru_cache_add); | ||
|
|
||
| /** | ||
| @@ -387,7 +395,7 @@ EXPORT_SYMBOL(__lru_cache_add); | ||
| * @page: the page to be added to the LRU. | ||
| * @lru: the LRU list to which the page is added. | ||
| */ | ||
| -void lru_cache_add_lru(struct page *page, enum lru_list lru) | ||
| +void __lru_cache_add_lru(struct page *page, enum lru_list lru, bool tail) | ||
| { | ||
| if (PageActive(page)) { | ||
| VM_BUG_ON(PageUnevictable(page)); | ||
| @@ -398,7 +406,12 @@ void lru_cache_add_lru(struct page *page | ||
| } | ||
|
|
||
| VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); | ||
| - __lru_cache_add(page, lru); | ||
| + ____lru_cache_add(page, lru, tail); | ||
| +} | ||
| + | ||
| +void lru_cache_add_lru(struct page *page, enum lru_list lru) | ||
| +{ | ||
| + __lru_cache_add_lru(page, lru, false); | ||
| } | ||
|
|
||
| /** | ||
| @@ -685,7 +698,7 @@ void lru_add_page_tail(struct zone* zone | ||
| head = page->lru.prev; | ||
| else | ||
| head = &zone->lru[lru].list; | ||
| - __add_page_to_lru_list(zone, page_tail, lru, head); | ||
| + __add_page_to_lru_list(zone, page_tail, lru, head, false); | ||
| } else { | ||
| SetPageUnevictable(page_tail); | ||
| add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); | ||
| @@ -714,13 +727,18 @@ static void ____pagevec_lru_add_fn(struc | ||
| * Add the passed pages to the LRU, then drop the caller's refcount | ||
| * on them. Reinitialises the caller's pagevec. | ||
| */ | ||
| -void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | ||
| +void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, bool tail) | ||
| { | ||
| VM_BUG_ON(is_unevictable_lru(lru)); | ||
|
|
||
| pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); | ||
| } | ||
|
|
||
| +void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | ||
| +{ | ||
| + ______pagevec_lru_add(pvec, lru, false); | ||
| +} | ||
| + | ||
| EXPORT_SYMBOL(____pagevec_lru_add); | ||
|
|
||
| /* | ||
| Index: linux-3.2-ck1/mm/readahead.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/mm/readahead.c 2012-01-16 10:07:27.615097289 +1100 | ||
| +++ linux-3.2-ck1/mm/readahead.c 2012-01-16 10:07:32.753096930 +1100 | ||
| @@ -17,6 +17,7 @@ | ||
| #include <linux/task_io_accounting_ops.h> | ||
| #include <linux/pagevec.h> | ||
| #include <linux/pagemap.h> | ||
| +#include <linux/swap.h> | ||
|
|
||
| /* | ||
| * Initialise a struct file's readahead state. Assumes that the caller has | ||
| @@ -107,7 +108,7 @@ int read_cache_pages(struct address_spac | ||
| EXPORT_SYMBOL(read_cache_pages); | ||
|
|
||
| static int read_pages(struct address_space *mapping, struct file *filp, | ||
| - struct list_head *pages, unsigned nr_pages) | ||
| + struct list_head *pages, unsigned nr_pages, bool tail) | ||
| { | ||
| struct blk_plug plug; | ||
| unsigned page_idx; | ||
| @@ -125,8 +126,8 @@ static int read_pages(struct address_spa | ||
| for (page_idx = 0; page_idx < nr_pages; page_idx++) { | ||
| struct page *page = list_to_page(pages); | ||
| list_del(&page->lru); | ||
| - if (!add_to_page_cache_lru(page, mapping, | ||
| - page->index, GFP_KERNEL)) { | ||
| + if (!__add_to_page_cache_lru(page, mapping, | ||
| + page->index, GFP_KERNEL, tail)) { | ||
| mapping->a_ops->readpage(filp, page); | ||
| } | ||
| page_cache_release(page); | ||
| @@ -139,6 +140,28 @@ out: | ||
| return ret; | ||
| } | ||
|
|
||
| +static inline int nr_mapped(void) | ||
| +{ | ||
| + return global_page_state(NR_FILE_MAPPED) + | ||
| + global_page_state(NR_ANON_PAGES); | ||
| +} | ||
| + | ||
| +/* | ||
| + * This examines how large in pages a file size is and returns 1 if it is | ||
| + * more than half the unmapped ram. Avoid doing read_page_state which is | ||
| + * expensive unless we already know it is likely to be large enough. | ||
| + */ | ||
| +static int large_isize(unsigned long nr_pages) | ||
| +{ | ||
| + if (nr_pages * 6 > vm_total_pages) { | ||
| + unsigned long unmapped_ram = vm_total_pages - nr_mapped(); | ||
| + | ||
| + if (nr_pages * 2 > unmapped_ram) | ||
| + return 1; | ||
| + } | ||
| + return 0; | ||
| +} | ||
| + | ||
| /* | ||
| * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all | ||
| * the pages first, then submits them all for I/O. This avoids the very bad | ||
| @@ -196,7 +219,8 @@ __do_page_cache_readahead(struct address | ||
| * will then handle the error. | ||
| */ | ||
| if (ret) | ||
| - read_pages(mapping, filp, &page_pool, ret); | ||
| + read_pages(mapping, filp, &page_pool, ret, | ||
| + large_isize(end_index)); | ||
| BUG_ON(!list_empty(&page_pool)); | ||
| out: | ||
| return ret; | ||
| Index: linux-3.2-ck1/include/linux/pagemap.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/linux/pagemap.h 2012-01-16 10:07:27.615097289 +1100 | ||
| +++ linux-3.2-ck1/include/linux/pagemap.h 2012-01-16 10:07:32.754096930 +1100 | ||
| @@ -456,6 +456,8 @@ int add_to_page_cache_locked(struct page | ||
| pgoff_t index, gfp_t gfp_mask); | ||
| int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| pgoff_t index, gfp_t gfp_mask); | ||
| +int __add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| + pgoff_t offset, gfp_t gfp_mask, bool tail); | ||
| extern void delete_from_page_cache(struct page *page); | ||
| extern void __delete_from_page_cache(struct page *page); | ||
| int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
| @@ -0,0 +1,45 @@ | ||
| The default dirty ratio is chosen to be a compromise between throughput and | ||
| overall system latency. On a desktop, if an application writes to disk a lot, | ||
| that application should be the one to slow down rather than the desktop as a | ||
| whole. At higher dirty ratio settings, an application could write a lot to | ||
| disk and then happily use lots of CPU time after that while the rest of the | ||
| system is busy waiting on that naughty application's disk writes to complete | ||
| before anything else happening. | ||
|
|
||
| Lower ratios mean that the application that do a lot of disk writes end up | ||
| being responsible for their own actions and they're the ones that slow down | ||
| rather than the system in general. | ||
|
|
||
| This does decrease overall write throughput, but to the benefit of the latency | ||
| of the system as a whole with disk writes. | ||
|
|
||
| Other checks may well round this up to 10 still at startup. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| mm/page-writeback.c | 4 ++-- | ||
| 1 file changed, 2 insertions(+), 2 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/mm/page-writeback.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/mm/page-writeback.c 2012-01-16 10:07:27.594097290 +1100 | ||
| +++ linux-3.2-ck1/mm/page-writeback.c 2012-01-16 10:07:32.967096915 +1100 | ||
| @@ -59,7 +59,7 @@ static long ratelimit_pages = 32; | ||
| /* | ||
| * Start background writeback (via writeback threads) at this percentage | ||
| */ | ||
| -int dirty_background_ratio = 10; | ||
| +int dirty_background_ratio = 1; | ||
|
|
||
| /* | ||
| * dirty_background_bytes starts at 0 (disabled) so that it is a function of | ||
| @@ -76,7 +76,7 @@ int vm_highmem_is_dirtyable; | ||
| /* | ||
| * The generator of dirty data starts writeback at this percentage | ||
| */ | ||
| -int vm_dirty_ratio = 20; | ||
| +int vm_dirty_ratio = 1; | ||
|
|
||
| /* | ||
| * vm_dirty_bytes starts at 0 (disabled) so that it is a function of |
| @@ -0,0 +1,46 @@ | ||
| The options to alter the vmsplit to enable more lowmem are hidden behind the | ||
| expert option. Make it more exposed for -ck users and make the help menu | ||
| more explicit about what each option means. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| arch/x86/Kconfig | 12 ++++++------ | ||
| 1 file changed, 6 insertions(+), 6 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/arch/x86/Kconfig | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/arch/x86/Kconfig 2012-01-16 10:07:27.563097292 +1100 | ||
| +++ linux-3.2-ck1/arch/x86/Kconfig 2012-01-16 10:07:33.128096904 +1100 | ||
| @@ -1076,7 +1076,7 @@ endchoice | ||
|
|
||
| choice | ||
| depends on EXPERIMENTAL | ||
| - prompt "Memory split" if EXPERT | ||
| + prompt "Memory split" | ||
| default VMSPLIT_3G | ||
| depends on X86_32 | ||
| ---help--- | ||
| @@ -1096,17 +1096,17 @@ choice | ||
| option alone! | ||
|
|
||
| config VMSPLIT_3G | ||
| - bool "3G/1G user/kernel split" | ||
| + bool "Default 896MB lowmem (3G/1G user/kernel split)" | ||
| config VMSPLIT_3G_OPT | ||
| depends on !X86_PAE | ||
| - bool "3G/1G user/kernel split (for full 1G low memory)" | ||
| + bool "1GB lowmem (3G/1G user/kernel split)" | ||
| config VMSPLIT_2G | ||
| - bool "2G/2G user/kernel split" | ||
| + bool "2GB lowmem (2G/2G user/kernel split)" | ||
| config VMSPLIT_2G_OPT | ||
| depends on !X86_PAE | ||
| - bool "2G/2G user/kernel split (for full 2G low memory)" | ||
| + bool "2GB lowmem (2G/2G user/kernel split)" | ||
| config VMSPLIT_1G | ||
| - bool "1G/3G user/kernel split" | ||
| + bool "3GB lowmem (1G/3G user/kernel split)" | ||
| endchoice | ||
|
|
||
| config PAGE_OFFSET |
| @@ -0,0 +1,21 @@ | ||
| Set default HZ to 1000 which is what most desktop users should still be using. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| kernel/Kconfig.hz | 2 +- | ||
| 1 file changed, 1 insertion(+), 1 deletion(-) | ||
|
|
||
| Index: linux-3.2-ck1/kernel/Kconfig.hz | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/kernel/Kconfig.hz 2012-01-16 10:07:27.544097294 +1100 | ||
| +++ linux-3.2-ck1/kernel/Kconfig.hz 2012-01-16 10:07:33.286096892 +1100 | ||
| @@ -4,7 +4,7 @@ | ||
|
|
||
| choice | ||
| prompt "Timer frequency" | ||
| - default HZ_250 | ||
| + default HZ_1000 | ||
| help | ||
| Allows the configuration of the timer frequency. It is customary | ||
| to have the timer interrupt run at 1000 Hz but 100 Hz may be more |
| @@ -0,0 +1,53 @@ | ||
| Make 250HZ not be the default to discourage desktop users from choosing this | ||
| option since 1000 will provide better latencies with only miniscule amounts | ||
| of extra overhead and power consumption. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| kernel/Kconfig.hz | 17 ++++++++++------- | ||
| 1 file changed, 10 insertions(+), 7 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/kernel/Kconfig.hz | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/kernel/Kconfig.hz 2012-01-16 10:07:33.286096892 +1100 | ||
| +++ linux-3.2-ck1/kernel/Kconfig.hz 2012-01-16 10:07:33.453096881 +1100 | ||
| @@ -23,13 +23,14 @@ choice | ||
| with lots of processors that may show reduced performance if | ||
| too many timer interrupts are occurring. | ||
|
|
||
| - config HZ_250 | ||
| + config HZ_250_NODEFAULT | ||
| bool "250 HZ" | ||
| help | ||
| - 250 Hz is a good compromise choice allowing server performance | ||
| - while also showing good interactive responsiveness even | ||
| - on SMP and NUMA systems. If you are going to be using NTSC video | ||
| - or multimedia, selected 300Hz instead. | ||
| + 250 HZ is a lousy compromise choice allowing server interactivity | ||
| + while also showing desktop throughput and no extra power saving on | ||
| + laptops. No good for anything. | ||
| + | ||
| + Recommend 100 or 1000 instead. | ||
|
|
||
| config HZ_300 | ||
| bool "300 HZ" | ||
| @@ -43,14 +44,16 @@ choice | ||
| bool "1000 HZ" | ||
| help | ||
| 1000 Hz is the preferred choice for desktop systems and other | ||
| - systems requiring fast interactive responses to events. | ||
| + systems requiring fast interactive responses to events. Laptops | ||
| + can also benefit from this choice without sacrificing battery life | ||
| + if dynticks is also enabled. | ||
|
|
||
| endchoice | ||
|
|
||
| config HZ | ||
| int | ||
| default 100 if HZ_100 | ||
| - default 250 if HZ_250 | ||
| + default 250 if HZ_250_NODEFAULT | ||
| default 300 if HZ_300 | ||
| default 1000 if HZ_1000 | ||
|
|
| @@ -0,0 +1,181 @@ | ||
| There's some really badly broken software out there that is entirely | ||
| dependant on HZ for its maximum performance. Raise the maximum HZ value | ||
| to some higher and slightly unreasonable values up to some higher and | ||
| completely obscene values. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| arch/x86/kernel/cpu/proc.c | 2 - | ||
| arch/x86/kernel/smpboot.c | 2 - | ||
| include/linux/nfsd/stats.h | 4 +- | ||
| include/net/inet_timewait_sock.h | 10 ++++-- | ||
| init/calibrate.c | 2 - | ||
| kernel/Kconfig.hz | 64 +++++++++++++++++++++++++++++++++++++++ | ||
| 6 files changed, 76 insertions(+), 8 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/arch/x86/kernel/cpu/proc.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/arch/x86/kernel/cpu/proc.c 2012-01-16 10:07:27.477097298 +1100 | ||
| +++ linux-3.2-ck1/arch/x86/kernel/cpu/proc.c 2012-01-16 10:07:33.618096869 +1100 | ||
| @@ -111,7 +111,7 @@ static int show_cpuinfo(struct seq_file | ||
|
|
||
| seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | ||
| c->loops_per_jiffy/(500000/HZ), | ||
| - (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
| + (c->loops_per_jiffy * 10 /(50000/HZ)) % 100); | ||
|
|
||
| #ifdef CONFIG_X86_64 | ||
| if (c->x86_tlbsize > 0) | ||
| Index: linux-3.2-ck1/arch/x86/kernel/smpboot.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/arch/x86/kernel/smpboot.c 2012-01-16 10:07:27.477097298 +1100 | ||
| +++ linux-3.2-ck1/arch/x86/kernel/smpboot.c 2012-01-16 10:07:33.619096868 +1100 | ||
| @@ -430,7 +430,7 @@ static void impress_friends(void) | ||
| "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
| num_online_cpus(), | ||
| bogosum/(500000/HZ), | ||
| - (bogosum/(5000/HZ))%100); | ||
| + (bogosum * 10/(50000/HZ))%100); | ||
|
|
||
| pr_debug("Before bogocount - setting activated=1.\n"); | ||
| } | ||
| Index: linux-3.2-ck1/include/linux/nfsd/stats.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/linux/nfsd/stats.h 2012-01-16 10:07:27.477097298 +1100 | ||
| +++ linux-3.2-ck1/include/linux/nfsd/stats.h 2012-01-16 10:07:33.619096868 +1100 | ||
| @@ -11,8 +11,8 @@ | ||
|
|
||
| #include <linux/nfs4.h> | ||
|
|
||
| -/* thread usage wraps very million seconds (approx one fortnight) */ | ||
| -#define NFSD_USAGE_WRAP (HZ*1000000) | ||
| +/* thread usage wraps every one hundred thousand seconds (approx one day) */ | ||
| +#define NFSD_USAGE_WRAP (HZ*100000) | ||
|
|
||
| #ifdef __KERNEL__ | ||
|
|
||
| Index: linux-3.2-ck1/include/net/inet_timewait_sock.h | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/include/net/inet_timewait_sock.h 2012-01-16 10:07:27.477097298 +1100 | ||
| +++ linux-3.2-ck1/include/net/inet_timewait_sock.h 2012-01-16 10:07:33.619096868 +1100 | ||
| @@ -38,8 +38,8 @@ struct inet_hashinfo; | ||
| * If time > 4sec, it is "slow" path, no recycling is required, | ||
| * so that we select tick to get range about 4 seconds. | ||
| */ | ||
| -#if HZ <= 16 || HZ > 4096 | ||
| -# error Unsupported: HZ <= 16 or HZ > 4096 | ||
| +#if HZ <= 16 || HZ > 16384 | ||
| +# error Unsupported: HZ <= 16 or HZ > 16384 | ||
| #elif HZ <= 32 | ||
| # define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| #elif HZ <= 64 | ||
| @@ -54,8 +54,12 @@ struct inet_hashinfo; | ||
| # define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| #elif HZ <= 2048 | ||
| # define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| -#else | ||
| +#elif HZ <= 4096 | ||
| # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| +#elif HZ <= 8192 | ||
| +# define INET_TWDR_RECYCLE_TICK (13 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| +#else | ||
| +# define INET_TWDR_RECYCLE_TICK (14 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) | ||
| #endif | ||
|
|
||
| /* TIME_WAIT reaping mechanism. */ | ||
| Index: linux-3.2-ck1/init/calibrate.c | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/init/calibrate.c 2012-01-16 10:07:27.477097298 +1100 | ||
| +++ linux-3.2-ck1/init/calibrate.c 2012-01-16 10:07:33.619096868 +1100 | ||
| @@ -278,7 +278,7 @@ void __cpuinit calibrate_delay(void) | ||
| if (!printed) | ||
| pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
| lpj/(500000/HZ), | ||
| - (lpj/(5000/HZ)) % 100, lpj); | ||
| + (lpj * 10 /(50000 / HZ)) % 100, lpj); | ||
|
|
||
| loops_per_jiffy = lpj; | ||
| printed = true; | ||
| Index: linux-3.2-ck1/kernel/Kconfig.hz | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/kernel/Kconfig.hz 2012-01-16 10:07:33.453096881 +1100 | ||
| +++ linux-3.2-ck1/kernel/Kconfig.hz 2012-01-16 10:07:33.619096868 +1100 | ||
| @@ -48,6 +48,63 @@ choice | ||
| can also benefit from this choice without sacrificing battery life | ||
| if dynticks is also enabled. | ||
|
|
||
| + config HZ_1500 | ||
| + bool "1500 HZ" | ||
| + help | ||
| + 1500 Hz is an insane value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_2000 | ||
| + bool "2000 HZ" | ||
| + help | ||
| + 2000 Hz is an insane value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_3000 | ||
| + bool "3000 HZ" | ||
| + help | ||
| + 3000 Hz is an insane value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_4000 | ||
| + bool "4000 HZ" | ||
| + help | ||
| + 4000 Hz is an insane value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_5000 | ||
| + bool "5000 HZ" | ||
| + help | ||
| + 5000 Hz is an obscene value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_7500 | ||
| + bool "7500 HZ" | ||
| + help | ||
| + 7500 Hz is an obscene value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + config HZ_10000 | ||
| + bool "10000 HZ" | ||
| + help | ||
| + 10000 Hz is an obscene value to use to run broken software that is Hz | ||
| + limited. | ||
| + | ||
| + Being over 1000, driver breakage is likely. | ||
| + | ||
| + | ||
| endchoice | ||
|
|
||
| config HZ | ||
| @@ -56,6 +113,13 @@ config HZ | ||
| default 250 if HZ_250_NODEFAULT | ||
| default 300 if HZ_300 | ||
| default 1000 if HZ_1000 | ||
| + default 1500 if HZ_1500 | ||
| + default 2000 if HZ_2000 | ||
| + default 3000 if HZ_3000 | ||
| + default 4000 if HZ_4000 | ||
| + default 5000 if HZ_5000 | ||
| + default 7500 if HZ_7500 | ||
| + default 10000 if HZ_10000 | ||
|
|
||
| config SCHED_HRTICK | ||
| def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) |
| @@ -0,0 +1,40 @@ | ||
| Enable preempt by default and make people steer away from voluntary. | ||
|
|
||
| -ck | ||
|
|
||
| --- | ||
| kernel/Kconfig.preempt | 7 ++++--- | ||
| 1 file changed, 4 insertions(+), 3 deletions(-) | ||
|
|
||
| Index: linux-3.2-ck1/kernel/Kconfig.preempt | ||
| =================================================================== | ||
| --- linux-3.2-ck1.orig/kernel/Kconfig.preempt 2012-01-16 10:07:27.461097300 +1100 | ||
| +++ linux-3.2-ck1/kernel/Kconfig.preempt 2012-01-16 10:07:33.836096853 +1100 | ||
| @@ -1,7 +1,7 @@ | ||
|
|
||
| choice | ||
| prompt "Preemption Model" | ||
| - default PREEMPT_NONE | ||
| + default PREEMPT | ||
|
|
||
| config PREEMPT_NONE | ||
| bool "No Forced Preemption (Server)" | ||
| @@ -17,7 +17,7 @@ config PREEMPT_NONE | ||
| latencies. | ||
|
|
||
| config PREEMPT_VOLUNTARY | ||
| - bool "Voluntary Kernel Preemption (Desktop)" | ||
| + bool "Voluntary Kernel Preemption (Nothing)" | ||
| help | ||
| This option reduces the latency of the kernel by adding more | ||
| "explicit preemption points" to the kernel code. These new | ||
| @@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY | ||
| applications to run more 'smoothly' even when the system is | ||
| under load. | ||
|
|
||
| - Select this if you are building a kernel for a desktop system. | ||
| + Select this for no system in particular (choose Preemptible | ||
| + instead on a desktop if you know what's good for you). | ||
|
|
||
| config PREEMPT | ||
| bool "Preemptible Kernel (Low-Latency Desktop)" |