Skip to content

Commit

Permalink
fast_dormancy acer
Browse files Browse the repository at this point in the history
  • Loading branch information
cleaton committed Feb 10, 2011
1 parent 7f01970 commit f8d575d
Show file tree
Hide file tree
Showing 12 changed files with 1,086 additions and 12 deletions.
6 changes: 6 additions & 0 deletions .config
Expand Up @@ -438,6 +438,9 @@ CONFIG_CONSOLE_EARLYSUSPEND=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_NET=y




#
# Networking options
#
Expand Down Expand Up @@ -481,6 +484,9 @@ CONFIG_ANDROID_PARANOID_NETWORK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
CONFIG_NETFILTER_ADVANCED=y
CONFIG_IP_FILTER=y
CONFIG_FAST_DORMANCY=y


#
# Core Netfilter Configuration
Expand Down
177 changes: 177 additions & 0 deletions fs/fs_struct.c
@@ -0,0 +1,177 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/path.h>
#include <linux/slab.h>
#include <linux/fs_struct.h>

/*
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
* It can block.
*/
void set_fs_root(struct fs_struct *fs, struct path *path)
{
struct path old_root;

write_lock(&fs->lock);
old_root = fs->root;
fs->root = *path;
path_get(path);
write_unlock(&fs->lock);
if (old_root.dentry)
path_put(&old_root);
}

/*
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
* It can block.
*/
void set_fs_pwd(struct fs_struct *fs, struct path *path)
{
struct path old_pwd;

write_lock(&fs->lock);
old_pwd = fs->pwd;
fs->pwd = *path;
path_get(path);
write_unlock(&fs->lock);

if (old_pwd.dentry)
path_put(&old_pwd);
}

void chroot_fs_refs(struct path *old_root, struct path *new_root)
{
struct task_struct *g, *p;
struct fs_struct *fs;
int count = 0;

read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
fs = p->fs;
if (fs) {
write_lock(&fs->lock);
if (fs->root.dentry == old_root->dentry
&& fs->root.mnt == old_root->mnt) {
path_get(new_root);
fs->root = *new_root;
count++;
}
if (fs->pwd.dentry == old_root->dentry
&& fs->pwd.mnt == old_root->mnt) {
path_get(new_root);
fs->pwd = *new_root;
count++;
}
write_unlock(&fs->lock);
}
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
while (count--)
path_put(old_root);
}

void free_fs_struct(struct fs_struct *fs)
{
path_put(&fs->root);
path_put(&fs->pwd);
kmem_cache_free(fs_cachep, fs);
}

void exit_fs(struct task_struct *tsk)
{
struct fs_struct *fs = tsk->fs;

if (fs) {
int kill;
task_lock(tsk);
write_lock(&fs->lock);
tsk->fs = NULL;
kill = !--fs->users;
write_unlock(&fs->lock);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
}
}

struct fs_struct *copy_fs_struct(struct fs_struct *old)
{
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
fs->users = 1;
fs->in_exec = 0;
rwlock_init(&fs->lock);
fs->umask = old->umask;
read_lock(&old->lock);
fs->root = old->root;
path_get(&old->root);
fs->pwd = old->pwd;
path_get(&old->pwd);
read_unlock(&old->lock);
}
return fs;
}

int unshare_fs_struct(void)
{
struct fs_struct *fs = current->fs;
struct fs_struct *new_fs = copy_fs_struct(fs);
int kill;

if (!new_fs)
return -ENOMEM;

task_lock(current);
write_lock(&fs->lock);
kill = !--fs->users;
current->fs = new_fs;
write_unlock(&fs->lock);
task_unlock(current);

if (kill)
free_fs_struct(fs);

return 0;
}
EXPORT_SYMBOL_GPL(unshare_fs_struct);

int current_umask(void)
{
return current->fs->umask;
}
EXPORT_SYMBOL(current_umask);

/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
.lock = __RW_LOCK_UNLOCKED(init_fs.lock),
.umask = 0022,
};

void daemonize_fs_struct(void)
{
struct fs_struct *fs = current->fs;

if (fs) {
int kill;

task_lock(current);

write_lock(&init_fs.lock);
init_fs.users++;
write_unlock(&init_fs.lock);

write_lock(&fs->lock);
current->fs = &init_fs;
kill = !--fs->users;
write_unlock(&fs->lock);

task_unlock(current);
if (kill)
free_fs_struct(fs);
}
}
9 changes: 9 additions & 0 deletions kernel/power/Kconfig
Expand Up @@ -19,6 +19,15 @@ config PM
will issue the hlt instruction if nothing is to be done, thereby
sending the processor to sleep and saving power.

config IP_FILTER
bool "Enable IP filter support"
depends on PM

config FAST_DORMANCY
bool "Enable FAST DORMANCY support"
depends on PM


config PM_DEBUG
bool "Power Management Debug Support"
depends on PM
Expand Down
2 changes: 2 additions & 0 deletions kernel/power/Makefile
Expand Up @@ -17,3 +17,5 @@ obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
obj-$(CONFIG_NPA) += npa.o npa_pm_qos.o
obj-$(CONFIG_NPA_REMOTE) += npa_remote.o

obj-$(CONFIG_IP_FILTER) += ip_filter.o
obj-$(CONFIG_FAST_DORMANCY) += fast_dormancy.o
77 changes: 77 additions & 0 deletions kernel/power/earlysuspend.c
Expand Up @@ -20,14 +20,19 @@
#include <linux/syscalls.h> /* sys_sync */
#include <linux/wakelock.h>
#include <linux/workqueue.h>
#include <linux/delay.h>

#include "power.h"

enum {
DEBUG_USER_STATE = 1U << 0,
DEBUG_SUSPEND = 1U << 2,
};
#ifdef CONFIG_MACH_ACER_A1
static int debug_mask = DEBUG_USER_STATE | DEBUG_SUSPEND;
#else
static int debug_mask = DEBUG_USER_STATE;
#endif
module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);

static DEFINE_MUTEX(early_suspend_lock);
Expand All @@ -44,6 +49,35 @@ enum {
};
static int state;

// create a work queue to monitor the "suspend" thread while DUT enter suspend
#ifdef CONFIG_MACH_ACER_A1
static int early_suspend_is_working = 0;

struct task_struct *suspend_thread_task = NULL;
EXPORT_SYMBOL(suspend_thread_task);

static void monitor_suspend_work_queue(struct work_struct *work);
static DECLARE_WORK(Monitor_Work_Queue, monitor_suspend_work_queue);
struct workqueue_struct *suspend_work_queue_monitored = NULL;

int is_suspend_mode(void)
{
return !!(state & SUSPENDED);
}
EXPORT_SYMBOL(is_suspend_mode);

static void monitor_suspend_work_queue(struct work_struct *work)
{
while (early_suspend_is_working) {
msleep(5000);
if (suspend_thread_task && !has_wake_lock(WAKE_LOCK_SUSPEND)) {
pr_info("Suspend Thread is blocked.\r\n");
sched_show_task(suspend_thread_task);
}
}
}
#endif // CONFIG_MACH_ACER_A1

void register_early_suspend(struct early_suspend *handler)
{
struct list_head *pos;
Expand Down Expand Up @@ -93,10 +127,20 @@ static void early_suspend(struct work_struct *work)

if (debug_mask & DEBUG_SUSPEND)
pr_info("early_suspend: call handlers\n");
#ifdef CONFIG_MACH_ACER_A1
list_for_each_entry(pos, &early_suspend_handlers, link) {
if (pos->suspend != NULL) {
if (debug_mask & DEBUG_SUSPEND)
pr_info("[SUSPEND_DEBUG] early suspend ... [0x%8x]\r\n", (unsigned int) pos->suspend);
pos->suspend(pos);
}
}
#else // CONFIG_MACH_ACER_A1
list_for_each_entry(pos, &early_suspend_handlers, link) {
if (pos->suspend != NULL)
pos->suspend(pos);
}
#endif // CONFIG_MACH_ACER_A1
mutex_unlock(&early_suspend_lock);

if (debug_mask & DEBUG_SUSPEND)
Expand Down Expand Up @@ -131,11 +175,25 @@ static void late_resume(struct work_struct *work)
}
if (debug_mask & DEBUG_SUSPEND)
pr_info("late_resume: call handlers\n");
#ifdef CONFIG_MACH_ACER_A1
list_for_each_entry_reverse(pos, &early_suspend_handlers, link) {
if (pos->resume != NULL) {
if (debug_mask & DEBUG_SUSPEND)
pr_info("[SUSPEND_DEBUG] late resume ... [0x%8x]\r\n", (unsigned int) pos->resume);
pos->resume(pos);
}
}
#else // CONFIG_MACH_ACER_A1
list_for_each_entry_reverse(pos, &early_suspend_handlers, link)
if (pos->resume != NULL)
pos->resume(pos);
#endif // CONFIG_MACH_ACER_A1
if (debug_mask & DEBUG_SUSPEND)
pr_info("late_resume: done\n");

#ifdef CONFIG_MACH_ACER_A1
early_suspend_is_working = 0;
#endif
abort:
mutex_unlock(&early_suspend_lock);
}
Expand All @@ -145,23 +203,42 @@ void request_suspend_state(suspend_state_t new_state)
unsigned long irqflags;
int old_sleep;

#ifdef CONFIG_MACH_ACER_A1
if (suspend_work_queue_monitored == NULL)
suspend_work_queue_monitored = create_singlethread_workqueue("monitor_suspend");
#endif

spin_lock_irqsave(&state_lock, irqflags);
old_sleep = state & SUSPEND_REQUESTED;
if (debug_mask & DEBUG_USER_STATE) {
struct timespec ts;
struct rtc_time tm;
getnstimeofday(&ts);
rtc_time_to_tm(ts.tv_sec, &tm);
#ifdef CONFIG_MACH_ACER_A1
pr_info("request_suspend_state: %s (%d->%d) at %lld "
"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC) pid=%d\n",
new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
requested_suspend_state, new_state,
ktime_to_ns(ktime_get()),
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec, current->pid);
#else
pr_info("request_suspend_state: %s (%d->%d) at %lld "
"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
requested_suspend_state, new_state,
ktime_to_ns(ktime_get()),
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
#endif
}
if (!old_sleep && new_state != PM_SUSPEND_ON) {
state |= SUSPEND_REQUESTED;
#ifdef CONFIG_MACH_ACER_A1
early_suspend_is_working = 1;
queue_work(suspend_work_queue_monitored, &Monitor_Work_Queue);
#endif
queue_work(suspend_work_queue, &early_suspend_work);
} else if (old_sleep && new_state == PM_SUSPEND_ON) {
state &= ~SUSPEND_REQUESTED;
Expand Down

0 comments on commit f8d575d

Please sign in to comment.