@@ -34,7 +34,7 @@
static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
static DEFINE_PER_CPU(int, hrtimer_status);
/* menu hrtimer mode */
enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT};
enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};

/*
* Concepts and ideas behind the menu governor
@@ -116,6 +116,13 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT};
*
*/

/*
* The C-state residency is so long that is is worthwhile to exit
* from the shallow C-state and re-enter into a deeper C-state.
*/
static unsigned int perfect_cstate_ms __read_mostly = 30;
module_param(perfect_cstate_ms, uint, 0000);

struct menu_device {
int last_state_idx;
int needs_update;
@@ -204,6 +211,16 @@ EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
{
int cpu = smp_processor_id();
struct menu_device *data = &per_cpu(menu_devices, cpu);

/* In general case, the expected residency is much larger than
* deepest C-state target residency, but prediction logic still
* predicts a small predicted residency, so the prediction
* history is totally broken if the timer is triggered.
* So reset the correction factor.
*/
if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;

per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;

@@ -364,6 +381,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
/* not deepest C-state chosen for low predicted residency */
if (low_predicted) {
unsigned int timer_us = 0;
unsigned int perfect_us = 0;

/*
* Set a timer to detect whether this sleep is much
@@ -374,13 +392,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
timer_us = 2 * (data->predicted_us + MAX_DEVIATION);

perfect_us = perfect_cstate_ms * 1000;

if (repeat && (4 * timer_us < data->expected_us)) {
RCU_NONIDLE(hrtimer_start(hrtmr,
ns_to_ktime(1000 * timer_us),
HRTIMER_MODE_REL_PINNED));
/* In repeat case, menu hrtimer is started */
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
} else if (perfect_us < data->expected_us) {
/*
* The next timer is long. This could be because
* we did not make a useful prediction.
* In that case, it makes sense to re-enter
* into a deeper C-state after some time.
*/
RCU_NONIDLE(hrtimer_start(hrtmr,
ns_to_ktime(1000 * timer_us),
HRTIMER_MODE_REL_PINNED));
/* In general case, menu hrtimer is started */
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
}

}

return data->last_state_idx;
@@ -94,7 +94,6 @@ static int lcdc_suspend(struct msm_panel_data *fb_panel)
clk_disable_unprepare(lcdc->pad_pclk);
clk_disable_unprepare(lcdc->pclk);
clk_disable_unprepare(lcdc->mdp_clk);
printk(KERN_INFO "RobPanelHookTestOff\n");

return 0;
}
@@ -109,7 +108,6 @@ static int lcdc_resume(struct msm_panel_data *fb_panel)
clk_prepare_enable(lcdc->pclk);
clk_prepare_enable(lcdc->pad_pclk);
mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN);
printk(KERN_INFO "RobPanelHookTestOn\n");

return 0;
}
@@ -607,7 +607,7 @@ extern struct cpufreq_governor cpufreq_gov_cafactive;
#define CPUFREQ_TABLE_END ~1

struct cpufreq_frequency_table {
unsigned int driver_data; /* driver specific data, not used by core */
unsigned int index; /* any */
unsigned int frequency; /* kHz - doesn't need to be in ascending
* order */
};
@@ -0,0 +1,64 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Minsung Kim <ms925.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/

#ifndef __LINUX_CPUFREQ_LIMIT_H__
#define __LINUX_CPUFREQ_LIMIT_H__

struct cpufreq_limit_handle;

#ifdef CONFIG_CPU_FREQ_LIMIT

#define MAX_FREQ_LIMIT 1890000
#define MIN_FREQ_LIMIT 384000

struct cpufreq_limit_handle *cpufreq_limit_get(unsigned long min_freq,
unsigned long max_freq, char *label);
int cpufreq_limit_put(struct cpufreq_limit_handle *handle);

static inline
struct cpufreq_limit_handle *cpufreq_limit_min_freq(unsigned long min_freq,
char *label)
{
return cpufreq_limit_get(min_freq, 0, label);
}

static inline
struct cpufreq_limit_handle *cpufreq_limit_max_freq(unsigned long max_freq,
char *label)
{
return cpufreq_limit_get(0, max_freq, label);
}
#else
static inline
struct cpufreq_limit_handle *cpufreq_limit_get(unsigned long min_freq,
unsigned long max_freq char *label)
{
return NULL;
}

int cpufreq_limit_put(struct cpufreq_limit_handle *handle)
{
return 0;
}

static inline
struct cpufreq_limit_handle *cpufreq_limit_min_freq(unsigned long min_freq,
char *label)
{
return NULL;
}

static inline
struct cpufreq_limit_handle *cpufreq_limit_max_freq(unsigned long max_freq,
char *label)
{
return NULL;
}
#endif
#endif /* __LINUX_CPUFREQ_LIMIT_H__ */
@@ -343,6 +343,19 @@ static inline void lockup_detector_init(void)
}
#endif

#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif

/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))

@@ -2624,16 +2637,7 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);

extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
extern void signal_wake_up(struct task_struct *t, int resume_stopped);

/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
@@ -117,45 +117,11 @@ void __ptrace_unlink(struct task_struct *child)
* TASK_KILLABLE sleeps.
*/
if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
ptrace_signal_wake_up(child, true);
signal_wake_up(child, task_is_traced(child));

spin_unlock(&child->sighand->siglock);
}

/* Ensure that nothing can wake it up, even SIGKILL */
static bool ptrace_freeze_traced(struct task_struct *task)
{
bool ret = false;

/* Lockless, nobody but us can set this flag */
if (task->jobctl & JOBCTL_LISTENING)
return ret;

spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);

return ret;
}

static void ptrace_unfreeze_traced(struct task_struct *task)
{
if (task->state != __TASK_TRACED)
return;

WARN_ON(!task->ptrace || task->parent != current);

spin_lock_irq(&task->sighand->siglock);
if (__fatal_signal_pending(task))
wake_up_state(task, __TASK_TRACED);
else
task->state = TASK_TRACED;
spin_unlock_irq(&task->sighand->siglock);
}

/**
* ptrace_check_attach - check whether ptracee is ready for ptrace operation
* @child: ptracee to check for
@@ -185,29 +151,24 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
* be changed by us so it's not changing right after this.
*/
read_lock(&tasklist_lock);
if (child->ptrace && child->parent == current) {
WARN_ON(child->state == __TASK_TRACED);
if ((child->ptrace & PT_PTRACED) && child->parent == current) {
/*
* child->sighand can't be NULL, release_task()
* does ptrace_unlink() before __exit_signal().
*/
if (ignore_state || ptrace_freeze_traced(child))
spin_lock_irq(&child->sighand->siglock);
WARN_ON_ONCE(task_is_stopped(child));
if (ignore_state || (task_is_traced(child) &&
!(child->jobctl & JOBCTL_LISTENING)))
ret = 0;
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);

if (!ret && !ignore_state) {
if (!wait_task_inactive(child, __TASK_TRACED)) {
/*
* This can only happen if may_ptrace_stop() fails and
* ptrace_stop() changes ->state back to TASK_RUNNING,
* so we should not worry about leaking __TASK_TRACED.
*/
WARN_ON(child->state == __TASK_TRACED);
ret = -ESRCH;
}
}
if (!ret && !ignore_state)
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;

/* All systems go.. */
return ret;
}

@@ -351,7 +312,7 @@ static int ptrace_attach(struct task_struct *task, long request,
*/
if (task_is_stopped(task) &&
task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
signal_wake_up_state(task, __TASK_STOPPED);
signal_wake_up(task, 1);

spin_unlock(&task->sighand->siglock);

@@ -788,7 +749,7 @@ int ptrace_request(struct task_struct *child, long request,
* tracee into STOP.
*/
if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);

unlock_task_sighand(child, &flags);
ret = 0;
@@ -814,7 +775,7 @@ int ptrace_request(struct task_struct *child, long request,
* start of this trap and now. Trigger re-trap.
*/
if (child->jobctl & JOBCTL_TRAP_NOTIFY)
ptrace_signal_wake_up(child, true);
signal_wake_up(child, true);
ret = 0;
}
unlock_task_sighand(child, &flags);
@@ -951,8 +912,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out_put_task_struct;

ret = arch_ptrace(child, request, addr, data);
if (ret || request != PTRACE_DETACH)
ptrace_unfreeze_traced(child);

out_put_task_struct:
put_task_struct(child);
@@ -1092,11 +1051,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,

ret = ptrace_check_attach(child, request == PTRACE_KILL ||
request == PTRACE_INTERRUPT);
if (!ret) {
if (!ret)
ret = compat_arch_ptrace(child, request, addr, data);
if (ret || request != PTRACE_DETACH)
ptrace_unfreeze_traced(child);
}

out_put_task_struct:
put_task_struct(child);
@@ -680,17 +680,23 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* No need to set need_resched since signal event passing
* goes through ->blocked
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
void signal_wake_up(struct task_struct *t, int resume)
{
unsigned int mask;

set_tsk_thread_flag(t, TIF_SIGPENDING);

/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* For SIGKILL, we want to wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
mask = TASK_INTERRUPTIBLE;
if (resume)
mask |= TASK_WAKEKILL;
if (!wake_up_state(t, mask))
kick_process(t);
}

@@ -839,7 +845,7 @@ static void ptrace_trap_notify(struct task_struct *t)
assert_spin_locked(&t->sighand->siglock);

task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
}

/*
@@ -1804,10 +1810,6 @@ static inline int may_ptrace_stop(void)
* If SIGKILL was already sent before the caller unlocked
* ->siglock we must see ->core_state != NULL. Otherwise it
* is safe to enter schedule().
*
* This is almost outdated, a task with the pending SIGKILL can't
* block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
* after SIGKILL was already dequeued.
*/
if (unlikely(current->mm->core_state) &&
unlikely(current->mm == current->parent->mm))
@@ -1933,7 +1935,6 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done)
do_notify_parent_cldstop(current, false, why);

/* tasklist protects us from ptrace_freeze_traced() */
__set_current_state(TASK_RUNNING);
if (clear_code)
current->exit_code = 0;