Permalink
Browse files

Implement ck1 patchset

  • Loading branch information...
1 parent 8167fe4 commit 7d890b1f48f183c202b402925f26ecada456ce6f Ziggy committed with Sep 4, 2012
View
@@ -10,6 +10,10 @@ NAME = Sneaky Weasel
# Comments in this file are targeted only to the developer, do not
# expect to learn how to build the kernel reading this file.
+CKVERSION = -ck1
+CKNAME = BFS Powered
+EXTRAVERSION := $(EXTRAVERSION)$(CKVERSION)
+
# Do not:
# o use make's built-in rules and variables
# (this increases performance and avoids hard-to-debug behaviour);
@@ -64,11 +64,6 @@ static struct timer_list spusched_timer;
static struct timer_list spuloadavg_timer;
/*
- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
- */
-#define NORMAL_PRIO 120
-
-/*
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
* tick for every 10 CPU scheduler ticks.
*/
View
@@ -1072,7 +1072,7 @@ endchoice
choice
depends on EXPERIMENTAL
- prompt "Memory split" if EXPERT
+ prompt "Memory split"
default VMSPLIT_3G
depends on X86_32
---help---
@@ -1092,17 +1092,17 @@ choice
option alone!
config VMSPLIT_3G
- bool "3G/1G user/kernel split"
+ bool "Default 896MB lowmem (3G/1G user/kernel split)"
config VMSPLIT_3G_OPT
depends on !X86_PAE
- bool "3G/1G user/kernel split (for full 1G low memory)"
+ bool "1GB lowmem (3G/1G user/kernel split)"
config VMSPLIT_2G
- bool "2G/2G user/kernel split"
+ bool "2GB lowmem (2G/2G user/kernel split)"
config VMSPLIT_2G_OPT
depends on !X86_PAE
- bool "2G/2G user/kernel split (for full 2G low memory)"
+ bool "2GB lowmem (2G/2G user/kernel split)"
config VMSPLIT_1G
- bool "1G/3G user/kernel split"
+ bool "3GB lowmem (1G/3G user/kernel split)"
endchoice
config PAGE_OFFSET
@@ -107,7 +107,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
- (c->loops_per_jiffy/(5000/HZ)) % 100);
+ (c->loops_per_jiffy * 10 /(50000/HZ)) % 100);
#ifdef CONFIG_X86_64
if (c->x86_tlbsize > 0)
@@ -430,7 +430,7 @@ static void impress_friends(void)
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
+ (bogosum * 10/(50000/HZ))%100);
pr_debug("Before bogocount - setting activated=1.\n");
}
@@ -28,6 +28,7 @@
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
+#include <linux/sched.h>
#include <mach/perflock.h>
#include <linux/syscore_ops.h>
@@ -1530,6 +1531,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ if (likely(retval != -EINVAL)) {
+ if (target_freq == policy->max)
+ cpu_nonscaling(policy->cpu);
+ else
+ cpu_scaling(policy->cpu);
+ }
return retval;
}
@@ -29,8 +29,8 @@
* It helps to keep variable names smaller, simpler
*/
-#define DEF_FREQUENCY_UP_THRESHOLD (80)
-#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define DEF_FREQUENCY_UP_THRESHOLD (63)
+#define DEF_FREQUENCY_DOWN_THRESHOLD (26)
/*
* The polling frequency of this governor depends on the capability of
@@ -31,8 +31,8 @@
* It helps to keep variable names smaller, simpler
*/
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
-#define DEF_FREQUENCY_UP_THRESHOLD (90)
+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (26)
+#define DEF_FREQUENCY_UP_THRESHOLD (63)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
@@ -576,10 +576,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/*
* Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
+ * than 37% (default), then we try to increase frequency
* Every sampling_rate, we look for a the lowest
* frequency which can sustain the load while keeping idle time over
- * 30%. If such a frequency exist, we try to decrease to this frequency.
+ * 63%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
View
@@ -418,7 +418,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
static int proc_pid_schedstat(struct task_struct *task, char *buffer)
{
return sprintf(buffer, "%llu %llu %lu\n",
- (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)tsk_seruntime(task),
(unsigned long long)task->sched_info.run_delay,
task->sched_info.pcount);
}
View
@@ -64,6 +64,8 @@ static inline int task_ioprio_class(struct io_context *ioc)
static inline int task_nice_ioprio(struct task_struct *task)
{
+ if (iso_task(task))
+ return 0;
return (task_nice(task) + 20) / 5;
}
View
@@ -164,7 +164,7 @@ static inline u64 get_jiffies_64(void)
* Have the 32 bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
/*
* Change timeval to jiffies, trying to avoid the
View
@@ -23,17 +23,26 @@ static inline int page_is_file_cache(struct page *page)
static inline void
__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
- struct list_head *head)
+ struct list_head *head, int tail)
{
- list_add(&page->lru, head);
+ if (tail)
+ list_add_tail(&page->lru, head);
+ else
+ list_add(&page->lru, head);
__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
mem_cgroup_add_lru_list(page, l);
}
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
- __add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
+ __add_page_to_lru_list(zone, page, l, &zone->lru[l].list, 0);
+}
+
+static inline void
+add_page_to_lru_list_tail(struct zone *zone, struct page *page, enum lru_list l)
+{
+ __add_page_to_lru_list(zone, page, l, &zone->lru[l].list, 1);
}
static inline void
View
@@ -15,6 +15,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
+#include <linux/timer.h>
#include <generated/bounds.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -184,12 +185,14 @@ enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
+ WMARK_LOTS,
NR_WMARK
};
#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+#define lots_wmark_pages(z) (z->watermark[WMARK_LOTS])
struct per_cpu_pages {
int count; /* number of pages in the list */
@@ -361,7 +364,7 @@ struct zone {
ZONE_PADDING(_pad1_)
/* Fields commonly accessed by the page reclaim scanner */
- spinlock_t lru_lock;
+ spinlock_t lru_lock;
struct zone_lru {
struct list_head list;
} lru[NR_LRU_LISTS];
@@ -663,6 +666,7 @@ typedef struct pglist_data {
wait_queue_head_t kswapd_wait;
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
+ struct timer_list watermark_timer;
enum zone_type classzone_idx;
} pg_data_t;
@@ -11,8 +11,8 @@
#include <linux/nfs4.h>
-/* thread usage wraps very million seconds (approx one fortnight) */
-#define NFSD_USAGE_WRAP (HZ*1000000)
+/* thread usage wraps every one hundred thousand seconds (approx one day) */
+#define NFSD_USAGE_WRAP (HZ*100000)
#ifdef __KERNEL__
View
@@ -458,6 +458,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
+int __add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask, int tail);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
View
@@ -39,6 +39,8 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+#define SCHED_IDLEPRIO SCHED_IDLE
+
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
@@ -269,8 +271,6 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
-extern int runqueue_is_locked(int cpu);
-
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
@@ -1227,9 +1227,12 @@ struct task_struct {
#ifdef CONFIG_SMP
struct task_struct *wake_entry;
- int on_cpu;
#endif
- int on_rq;
+#if defined(CONFIG_SMP)
+ bool on_cpu;
+#endif
+#endif
+ bool on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
@@ -1576,6 +1579,42 @@ struct task_struct {
#endif
};
+extern int runqueue_is_locked(int cpu);
+static inline void cpu_scaling(int cpu)
+{
+}
+
+static inline void cpu_nonscaling(int cpu)
+{
+}
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+ p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"CFS CPU scheduler.\n");
+}
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return false;
+}
+
+static inline void remove_cpu(int cpu)
+{
+}
+
+/* Anyone feel like implementing this? */
+static inline int above_background_load(void)
+{
+ return 1;
+}
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -1593,10 +1632,11 @@ struct task_struct {
*/
#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
+#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+#define NORMAL_PRIO DEFAULT_PRIO
static inline int rt_prio(int prio)
{
@@ -1949,7 +1989,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2578,7 +2618,7 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped);
*/
#ifdef CONFIG_SMP
-static inline unsigned int task_cpu(const struct task_struct *p)
+static inline int task_cpu(const struct task_struct *p)
{
return task_thread_info(p)->cpu;
}
@@ -2587,12 +2627,12 @@ extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
-static inline unsigned int task_cpu(const struct task_struct *p)
+static inline int task_cpu(const struct task_struct *p)
{
return 0;
}
-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+static inline void set_task_cpu(struct task_struct *p, int cpu)
{
}
@@ -2706,5 +2746,3 @@ static inline unsigned long rlimit_max(unsigned int limit)
}
#endif /* __KERNEL__ */
-
-#endif
Oops, something went wrong.

0 comments on commit 7d890b1

Please sign in to comment.