This file was deleted.

This file was deleted.

@@ -34,7 +34,7 @@ obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_SNAPPY_COMPRESS) += snappy/
obj-$(CONFIG_SNAPPY_DECOMPRESS) += snappy/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_QCACHE) += qcache/
obj-$(CONFIG_ZSMALLOC) += zsmalloc/
@@ -30,7 +30,6 @@
*
*/

#define REALLY_WANT_TRACEPOINTS
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -43,8 +42,8 @@
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/ratelimit.h>
#if defined(CONFIG_RUNTIME_COMPCACHE) || defined(CONFIG_ZSWAP)
#include <linux/swap.h>
#if defined (CONFIG_SWAP) && (defined (CONFIG_ZSWAP) || defined (CONFIG_ZRAM))
#include <linux/fs.h>
#endif /* CONFIG_RUNTIME_COMPCACHE || CONFIG_ZSWAP */

@@ -96,21 +95,39 @@ static unsigned long lowmem_deathpending_timeout;
printk(x); \
} while (0)

static int test_task_flag(struct task_struct *p, int flag)
#if defined(CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE)
static void dump_tasks_info(void)
{
struct task_struct *t = p;
struct task_struct *p;
struct task_struct *task;

for_each_thread(p,t) {
task_lock(t);
if (test_tsk_thread_flag(t, flag)) {
task_unlock(t);
return 1;
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
/* check unkillable tasks */
if (is_global_init(p))
continue;
if (p->flags & PF_KTHREAD)
continue;

task = find_lock_task_mm(p);
if (!task) {
/*
* This is a kthread or all of p's threads have already
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
continue;
}
task_unlock(t);
}

return 0;
pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
task->pid, task_uid(task), task->tgid,
task->mm->total_vm, get_mm_rss(task->mm),
task_cpu(task), task->signal->oom_adj,
task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
}
#endif

int can_use_cma_pages(gfp_t gfp_mask)
{
@@ -151,6 +168,13 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
#else
struct task_struct *selected = NULL;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 0);
#else
static DEFINE_RATELIMIT_STATE(lmk_rs, 6*DEFAULT_RATELIMIT_INTERVAL, 0);
#endif
#endif
int rem = 0;
int tasksize;
@@ -170,14 +194,17 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
#endif
int array_size = ARRAY_SIZE(lowmem_adj);
unsigned long nr_to_scan = sc->nr_to_scan;
#if defined (CONFIG_CMA)
struct reclaim_state *reclaim_state = current->reclaim_state;
#ifndef CONFIG_CMA
int other_free = global_page_state(NR_FREE_PAGES);
#else
int other_free = global_page_state(NR_FREE_PAGES) -
global_page_state(NR_FREE_CMA_PAGES);
#else
int other_free = global_page_state(NR_FREE_PAGES);
#endif
int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM);

#if defined(CONFIG_RUNTIME_COMPCACHE) || defined(CONFIG_ZSWAP)
other_file -= total_swapcache_pages;
#endif /* CONFIG_RUNTIME_COMPCACHE || CONFIG_ZSWAP */
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
@@ -190,11 +217,16 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
}
}
if (nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
nr_to_scan, sc->gfp_mask, rem);

if (nr_to_scan > 0)
mutex_unlock(&scan_mutex);
@@ -222,24 +254,20 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
if (tsk->flags & PF_KTHREAD)
continue;

/* if task no longer has any memory ignore it */
if (test_task_flag(tsk, TIF_MM_RELEASED))
p = find_lock_task_mm(tsk);
if (!p)
continue;

if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
if (test_task_flag(tsk, TIF_MEMDIE)) {
if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
/* give the system time to free up the memory */
msleep_interruptible(20);
mutex_unlock(&scan_mutex);
return 0;
}
}

p = find_lock_task_mm(tsk);
if (!p)
continue;


oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
@@ -248,8 +276,10 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
tasksize = get_mm_rss(p->mm);
#if defined(CONFIG_ZSWAP)
if (atomic_read(&zswap_stored_pages)) {
lowmem_print(3, "shown tasksize : %d\n", tasksize);
tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS)
/ atomic_read(&zswap_stored_pages);
lowmem_print(3, "real tasksize : %d\n", tasksize);
}
#endif

@@ -292,6 +322,10 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
max_selected_oom_idx = i;
}
}

lowmem_print(2, "select %d (%s), adj %d, \
size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
}
#else
if (selected) {
@@ -318,25 +352,67 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
#ifdef CONFIG_SAMP_HOTNESS
selected_hotness_adj = hotness_adj;
#endif
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
#endif
}
#ifdef ENHANCED_LMK_ROUTINE
for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
if (selected[i]) {
#ifdef CONFIG_SAMP_HOTNESS
lowmem_print(1, "send sigkill to %d (%s), adj %d,\
size %d, free memory = %d, reclaimable memory = %d ,hotness %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i],
other_free, other_file,
selected_hotness_adj);
#else
lowmem_print(1, "send sigkill to %d (%s), adj %d,\
size %d, free memory = %d, reclaimable memory = %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i],
other_free, other_file);
#endif
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected[i], 0);
set_tsk_thread_flag(selected[i], TIF_MEMDIE);
rem -= selected_tasksize[i];
if(reclaim_state)
reclaim_state->reclaimed_slab += selected_tasksize[i];
#ifdef LMK_COUNT_READ
lmk_count++;
#endif
}
}
#else
if (selected) {
#ifdef CONFIG_SAMP_HOTNESS
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d ,hotness %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize,selected_hotness_adj);
#else
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
#endif
rcu_read_unlock();
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
msleep_interruptible(20);
} else
if(reclaim_state)
reclaim_state->reclaimed_slab = selected_tasksize;
#ifdef LMK_COUNT_READ
lmk_count++;
#endif
} else
rcu_read_unlock();
}
#endif

lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
nr_to_scan, sc->gfp_mask, rem);
mutex_unlock(&scan_mutex);
return rem;
}
@@ -374,9 +450,24 @@ static int android_oom_handler(struct notifier_block *nb,
int selected_tasksize = 0;
int selected_oom_score_adj;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1);
#endif

unsigned long *freed = data;

/* show status */
pr_warning("%s invoked Android-oom-killer: "
"oom_adj=%d, oom_score_adj=%d\n",
current->comm, current->signal->oom_adj,
current->signal->oom_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
dump_stack();
show_mem(SHOW_MEM_FILTER_NODES);
if (__ratelimit(&oom_rs))
dump_tasks_info();
#endif

min_score_adj = 0;
#ifdef MULTIPLE_OOM_KILLER
for (i = 0; i < OOM_DEPTH; i++)
@@ -405,15 +496,13 @@ static int android_oom_handler(struct notifier_block *nb,
task_unlock(p);
continue;
}
if (fatal_signal_pending(p)) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(p->mm);
task_unlock(p);
if (tasksize <= 0)
continue;

lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n",
p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
if (all_selected_oom < OOM_DEPTH) {
for (i = 0; i < OOM_DEPTH; i++) {
@@ -446,6 +535,10 @@ static int android_oom_handler(struct notifier_block *nb,
max_selected_oom_idx = i;
}
}

lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
size %d, to kill\n",
max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
}
#else
if (selected) {
@@ -458,11 +551,18 @@ static int android_oom_handler(struct notifier_block *nb,
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_score_adj, tasksize);
#endif
}
#ifdef MULTIPLE_OOM_KILLER
for (i = 0; i < OOM_DEPTH; i++) {
if (selected[i]) {
lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
size %d\n",
selected[i]->pid, selected[i]->comm,
selected_oom_score_adj[i],
selected_tasksize[i]);
send_sig(SIGKILL, selected[i], 0);
rem -= selected_tasksize[i];
*freed += (unsigned long)selected_tasksize[i];
@@ -474,6 +574,9 @@ static int android_oom_handler(struct notifier_block *nb,
}
#else
if (selected) {
lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
@@ -485,6 +588,7 @@ static int android_oom_handler(struct notifier_block *nb,
#endif
read_unlock(&tasklist_lock);

lowmem_print(2, "oom: get memory %lu", *freed);
return rem;
}

@@ -543,10 +647,13 @@ static void lowmem_autodetect_oom_adj_values(void)
if (oom_score_adj <= OOM_ADJUST_MAX)
return;

lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
for (i = 0; i < array_size; i++) {
oom_adj = lowmem_adj[i];
oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
lowmem_adj[i] = oom_score_adj;
lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
oom_adj, oom_score_adj);
}
}

@@ -604,6 +711,8 @@ module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
#ifdef LMK_COUNT_READ
module_param_named(lmkcount, lmk_count, uint, S_IRUGO);
#endif


#ifdef OOM_COUNT_READ
module_param_named(oomcount, oom_count, uint, S_IRUGO);
#endif
@@ -0,0 +1,54 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
depends on USB
depends on VIRT_TO_BUS
help
Say Y or M here if your system has a Dual Role HighSpeed
USB controller based on the DesignWare HSOTG IP Core.

If you choose to build this driver as dynamically linked
modules, the core module will be called dwc2.ko, the
PCI bus interface module (if you have a PCI bus system)
will be called dwc2_pci.ko and the platform interface module
(for controllers directly connected to the CPU) will be called
dwc2_platform.ko.

NOTE: This driver at present only implements the Host mode
of the controller. The existing s3c-hsotg driver supports
Peripheral mode, but only for the Samsung S3C platforms.
There are plans to merge the s3c-hsotg driver with this
driver in the near future to create a dual-role driver.

if USB_DWC2

config USB_DWC2_DEBUG
bool "Enable Debugging Messages"
help
Say Y here to enable debugging messages in the DWC2 Driver.

config USB_DWC2_VERBOSE
bool "Enable Verbose Debugging Messages"
depends on USB_DWC2_DEBUG
help
Say Y here to enable verbose debugging messages in the DWC2 Driver.
WARNING: Enabling this will quickly fill your message log.
If in doubt, say N.

config USB_DWC2_TRACK_MISSED_SOFS
bool "Enable Missed SOF Tracking"
help
Say Y here to enable logging of missed SOF events to the dmesg log.
WARNING: This feature is still experimental.
If in doubt, say N.

config USB_DWC2_DEBUG_PERIODIC
bool "Enable Debugging Messages For Periodic Transfers"
depends on USB_DWC2_DEBUG || USB_DWC2_VERBOSE
default y
help
Say N here to disable (verbose) debugging messages to be
logged for periodic transfers. This allows better debugging of
non-periodic transfers, but of course the debug logs will be
incomplete. Note that this also disables some debug messages
for which the transfer type cannot be deduced.
endif
@@ -0,0 +1,25 @@
ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG

obj-$(CONFIG_USB_DWC2) += dwc2.o

dwc2-y += core.o core_intr.o

# NOTE: This driver at present only implements the Host mode
# of the controller. The existing s3c-hsotg driver supports
# Peripheral mode, but only for the Samsung S3C platforms.
# There are plans to merge the s3c-hsotg driver with this
# driver in the near future to create a dual-role driver. Once
# that is done, Host mode will become an optional feature that
# is selected with a config option.

dwc2-y += hcd.o hcd_intr.o
dwc2-y += hcd_queue.o hcd_ddma.o

ifneq ($(CONFIG_PCI),)
obj-$(CONFIG_USB_DWC2) += dwc2_pci.o
endif
obj-$(CONFIG_USB_DWC2) += dwc2_platform.o

dwc2_pci-y += pci.o
dwc2_platform-y += platform.o

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,180 @@
/*
* pci.c - DesignWare HS OTG Controller PCI driver
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

/*
* Provides the initialization and cleanup entry points for the DWC_otg PCI
* driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/usb.h>

#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>

#include "core.h"
#include "hcd.h"

#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_PRODUCT_ID_HAPS_HSOTG 0xabc0

static const char dwc2_driver_name[] = "dwc2";

static struct dwc2_core_params dwc2_module_params = {
.otg_cap = -1,
.otg_ver = -1,
.dma_enable = -1,
.dma_desc_enable = 0,
.speed = -1,
.enable_dynamic_fifo = -1,
.en_multiple_tx_fifo = -1,
.host_rx_fifo_size = 1024,
.host_nperio_tx_fifo_size = 256,
.host_perio_tx_fifo_size = 1024,
.max_transfer_size = 65535,
.max_packet_count = 511,
.host_channels = -1,
.phy_type = -1,
.phy_utmi_width = 16, /* 16 bits - NOT DETECTABLE */
.phy_ulpi_ddr = -1,
.phy_ulpi_ext_vbus = -1,
.i2c_enable = -1,
.ulpi_fs_ls = -1,
.host_support_fs_ls_low_power = -1,
.host_ls_low_power_phy_clk = -1,
.ts_dline = -1,
.reload_ctl = -1,
.ahb_single = -1,
};

/**
* dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
* DWC_otg driver
*
* @dev: Bus device
*
* This routine is called, for example, when the rmmod command is executed. The
* device may or may not be electrically present. If it is present, the driver
* stops device processing. Any resources used on behalf of this device are
* freed.
*/
static void dwc2_driver_remove(struct pci_dev *dev)
{
struct dwc2_hsotg *hsotg = pci_get_drvdata(dev);

dev_dbg(&dev->dev, "%s(%p)\n", __func__, dev);

dwc2_hcd_remove(hsotg);
pci_disable_device(dev);
}

/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
* @dev: Bus device
*
* This routine creates the driver components required to control the device
* (core, HCD, and PCD) and initializes the device. The driver components are
* stored in a dwc2_hsotg structure. A reference to the dwc2_hsotg is saved
* in the device private data. This allows the driver to access the dwc2_hsotg
* structure on subsequent calls to driver methods for this device.
*/
static int dwc2_driver_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct dwc2_hsotg *hsotg;
int retval;

dev_dbg(&dev->dev, "%s(%p)\n", __func__, dev);

hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;

pci_set_power_state(dev, PCI_D0);

hsotg->dev = &dev->dev;
hsotg->regs = devm_request_and_ioremap(&dev->dev, &dev->resource[0]);
if (!hsotg->regs)
return -ENOMEM;

dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)pci_resource_start(dev, 0), hsotg->regs);

if (pci_enable_device(dev) < 0)
return -ENODEV;

pci_set_master(dev);

retval = dwc2_hcd_init(hsotg, dev->irq, &dwc2_module_params);
if (retval) {
pci_disable_device(dev);
return retval;
}

pci_set_drvdata(dev, hsotg);
dev_dbg(&dev->dev, "hsotg=%p\n", hsotg);

return retval;
}

static DEFINE_PCI_DEVICE_TABLE(dwc2_pci_ids) = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);

static struct pci_driver dwc2_pci_driver = {
.name = dwc2_driver_name,
.id_table = dwc2_pci_ids,
.probe = dwc2_driver_probe,
.remove = dwc2_driver_remove,
};

module_pci_driver(dwc2_pci_driver);

MODULE_DESCRIPTION("DESIGNWARE HS OTG PCI Bus Glue");
MODULE_AUTHOR("Synopsys, Inc.");
MODULE_LICENSE("Dual BSD/GPL");
@@ -0,0 +1,148 @@
/*
* platform.c - DesignWare HS OTG Controller platform driver
*
* Copyright (C) Matthijs Kooijman <matthijs@stdin.nl>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>

#include "core.h"
#include "hcd.h"

static const char dwc2_driver_name[] = "dwc2";

/**
* dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
* DWC_otg driver
*
* @dev: Platform device
*
* This routine is called, for example, when the rmmod command is executed. The
* device may or may not be electrically present. If it is present, the driver
* stops device processing. Any resources used on behalf of this device are
* freed.
*/
static int dwc2_driver_remove(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);

dwc2_hcd_remove(hsotg);

return 0;
}

/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
* @dev: Platform device
*
* This routine creates the driver components required to control the device
* (core, HCD, and PCD) and initializes the device. The driver components are
* stored in a dwc2_hsotg structure. A reference to the dwc2_hsotg is saved
* in the device private data. This allows the driver to access the dwc2_hsotg
* structure on subsequent calls to driver methods for this device.
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
int irq;
struct dwc2_core_params params;

/* Default all params to autodetect */
dwc2_set_all_params(&params, -1);

hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;

hsotg->dev = &dev->dev;

/*
* Use reasonable defaults so platforms don't have to provide these.
*/
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
if (!dev->dev.coherent_dma_mask)
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);

irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
return -EINVAL;
}

res = platform_get_resource(dev, IORESOURCE_MEM, 0);
hsotg->regs = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(hsotg->regs))
return PTR_ERR(hsotg->regs);

dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);

retval = dwc2_hcd_init(hsotg, irq, &params);
if (retval)
return retval;

platform_set_drvdata(dev, hsotg);

return retval;
}

static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "snps,dwc2" },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);

static struct platform_driver dwc2_platform_driver = {
.driver = {
.name = (char *)dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
},
.probe = dwc2_driver_probe,
.remove = dwc2_driver_remove,
};

module_platform_driver(dwc2_platform_driver);

MODULE_DESCRIPTION("DESIGNWARE HS OTG Platform Glue");
MODULE_AUTHOR("Matthijs Kooijman <matthijs@stdin.nl>");
MODULE_LICENSE("Dual BSD/GPL");
@@ -1,6 +1,8 @@
config ZCACHE
bool "Dynamic compression of swap pages and clean pagecache pages"
depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && ZSMALLOC=y
# X86 dependency is because zsmalloc uses non-portable pte/tlb
# functions
depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y
select CRYPTO_LZO
default n
help
@@ -73,32 +73,47 @@ void tmem_register_pamops(struct tmem_pamops *m)
*/

/* searches for object==oid in pool, returns locked object if found */
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
static struct tmem_obj
*__tmem_obj_find(struct tmem_hashbucket*hb, struct tmem_oid *oidp,
struct rb_node **parent, struct rb_node ***link)
{
struct rb_node *rbnode;
struct tmem_obj *obj;

rbnode = hb->obj_rb_root.rb_node;
while (rbnode) {
BUG_ON(RB_EMPTY_NODE(rbnode));
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
struct rb_node *_parent = NULL, **rbnode;
struct tmem_obj *obj = NULL;

rbnode = &hb->obj_rb_root.rb_node;
while (*rbnode) {
BUG_ON(RB_EMPTY_NODE(*rbnode));
_parent = *rbnode;
obj = rb_entry(*rbnode, struct tmem_obj,
rb_tree_node);
switch (tmem_oid_compare(oidp, &obj->oid)) {
case 0: /* equal */
case 0:
goto out;
case -1:
rbnode = rbnode->rb_left;
rbnode = &(*rbnode)->rb_left;
break;
case 1:
rbnode = rbnode->rb_right;
rbnode = &(*rbnode)->rb_right;
break;
}
}

if (parent)
*parent = _parent;
if (link)
*link = rbnode;
obj = NULL;
out:
return obj;
}

/* searches for object==oid in pool, returns locked object if found */
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
{
return __tmem_obj_find(hb, oidp, NULL, NULL);
}

static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);

/* free an object that has no more pampds in it */
@@ -131,8 +146,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
{
struct rb_root *root = &hb->obj_rb_root;
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tmem_obj *this;
struct rb_node **new = NULL, *parent = NULL;

BUG_ON(pool == NULL);
atomic_inc(&pool->obj_count);
@@ -144,22 +158,10 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
obj->pampd_count = 0;
(*tmem_pamops.new_obj)(obj);
SET_SENTINEL(obj, OBJ);
while (*new) {
BUG_ON(RB_EMPTY_NODE(*new));
this = rb_entry(*new, struct tmem_obj, rb_tree_node);
parent = *new;
switch (tmem_oid_compare(oidp, &this->oid)) {
case 0:
BUG(); /* already present; should never happen! */
break;
case -1:
new = &(*new)->rb_left;
break;
case 1:
new = &(*new)->rb_right;
break;
}
}

if (__tmem_obj_find(hb, oidp, &parent, &new))
BUG();

rb_link_node(&obj->rb_tree_node, parent, new);
rb_insert_color(&obj->rb_tree_node, root);
}
0 drivers/staging/zcache/tmem.h 100644 → 100755
Empty file.

Large diffs are not rendered by default.

17 drivers/staging/zram/Kconfig 100644 → 100755
@@ -1,6 +1,12 @@
config ZRAM
tristate "Compressed RAM block device support"
default y
# X86 dependency is because zsmalloc uses non-portable pte/tlb
# functions
depends on BLOCK && SYSFS && X86
select ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
@@ -11,7 +17,7 @@ config ZRAM
disks and maybe many more.

See zram.txt for more information.
Project home: <https://compcache.googlecode.com/>
Project home: http://compcache.googlecode.com/

config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
@@ -20,3 +26,10 @@ config ZRAM_DEBUG
help
This option adds additional debugging code to the compressed
RAM block device driver.

config ZRAM_FOR_ANDROID
bool "Optimize zram behavior for android"
depends on ZRAM && ANDROID
default n
help
This option enables modified zram behavior optimized for android
0 drivers/staging/zram/Makefile 100644 → 100755
Empty file.
0 drivers/staging/zram/zram.txt 100644 → 100755
Empty file.

Large diffs are not rendered by default.

@@ -26,6 +26,18 @@
*/
static const unsigned max_num_devices = 32;

/*
* Stored at beginning of each compressed object.
*
* It stores back-reference to table entry which points to this
* object. This is required to support memory defragmentation.
*/
struct zobj_header {
#if 0
u32 table_idx;
#endif
};

/*-- Configurable parameters */

/* Default zram disk size: 25% of total RAM */
@@ -39,8 +51,8 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;

/*
* NOTE: max_zpage_size must be less than or equal to:
* ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
* always return failure.
* ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
* otherwise, xv_malloc() would always return failure.
*/

/*-- End of configurable params */
@@ -56,6 +68,9 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;

/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
/* Page is stored uncompressed */
ZRAM_UNCOMPRESSED,

/* Page consists entirely of zeros */
ZRAM_ZERO,

@@ -66,11 +81,11 @@ enum zram_pageflags {

/* Allocated for each disk page */
struct table {
unsigned long handle;
void *handle;
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
} __aligned(4);
} __attribute__((aligned(4)));

struct zram_stats {
u64 compr_size; /* compressed size of pages stored */
@@ -83,7 +98,12 @@ struct zram_stats {
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
u32 bad_compress; /* % of pages with compression ratio>=75% */
u32 pages_expand; /* % of incompressible pages */
};

struct zram_slot_free {
unsigned long index;
struct zram_slot_free *next;
};

struct zram {
@@ -92,8 +112,9 @@ struct zram {
void *compress_buffer;
struct table *table;
spinlock_t stat64_lock; /* protect 64-bit stats */
struct rw_semaphore lock; /* protect compression buffers and table
* against concurrent read and writes */
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -104,6 +125,7 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
spinlock_t slot_free_lock;

struct zram_stats stats;
};
@@ -15,7 +15,6 @@
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/mm.h>
#include <linux/kernel.h>

#include "zram_drv.h"

@@ -46,20 +45,21 @@ static ssize_t disksize_show(struct device *dev,
static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
u64 disksize;
struct zram *zram = dev_to_zram(dev);

disksize = memparse(buf, NULL);
if (!disksize)
return -EINVAL;
ret = kstrtoull(buf, 10, &disksize);
if (ret)
return ret;

down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
pr_info("Cannot change disksize for initialized device\n");
return -EBUSY;
}
#ifdef CONFIG_ZRAM
#ifdef CONFIG_ZRAM_FOR_ANDROID
if (!disksize) {
disksize = default_disksize_perc_ram *
((totalram_pages << PAGE_SHIFT) / 100);
@@ -91,27 +91,38 @@ static ssize_t reset_store(struct device *dev,
zram = dev_to_zram(dev);
bdev = bdget_disk(zram->disk, 0);

if (!bdev)
return -ENOMEM;

/* Do not reset an active device! */
if (bdev->bd_holders)
return -EBUSY;
if (bdev->bd_holders) {
ret = -EBUSY;
goto out;
}

ret = kstrtou16(buf, 10, &do_reset);
if (ret)
return ret;
goto out;

if (!do_reset)
return -EINVAL;
if (!do_reset) {
ret = -EINVAL;
goto out;
}

/* Make sure all pending I/O is finished */
if (bdev)
fsync_bdev(bdev);
fsync_bdev(bdev);
bdput(bdev);

down_write(&zram->init_lock);
if (zram->init_done)
__zram_reset_device(zram);
up_write(&zram->init_lock);

return len;

out:
bdput(bdev);
return ret;
}

static ssize_t num_reads_show(struct device *dev,
3 drivers/staging/zsmalloc/Kconfig 100644 → 100755
@@ -1,5 +1,6 @@
config ZSMALLOC
bool "Memory allocator for compressed pages"
tristate "Memory allocator for compressed pages"
depends on MMU
default n
help
zsmalloc is a slab-based memory allocator designed to store
0 drivers/staging/zsmalloc/Makefile 100644 → 100755
Empty file.

Large diffs are not rendered by default.

@@ -18,17 +18,24 @@
/*
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages
* NOTE: These only make a difference when a mapped object spans pages.
* They also have no effect when PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
ZS_MM_RO, /* read-only (no copy-out at unmap time) */
ZS_MM_WO /* write-only (no copy-in at map time) */
};
/*
* NOTE: ZS_MM_WO should only be used for initializing new
* (uninitialized) allocations. Partial writes to already
* initialized allocations should use ZS_MM_RW to preserve the
* existing data.
*/

struct zs_pool;

struct zs_pool *zs_create_pool(gfp_t flags);
struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);

unsigned long zs_malloc(struct zs_pool *pool, size_t size);
@@ -0,0 +1,155 @@
/*
* zsmalloc memory allocator
*
* Copyright (C) 2011 Nitin Gupta
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements.
*
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*/

#ifndef _ZS_MALLOC_INT_H_
#define _ZS_MALLOC_INT_H_

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/types.h>

/*
* This must be power of 2 and greater than of equal to sizeof(link_free).
* These two conditions ensure that any 'struct link_free' itself doesn't
* span more than 1 page which avoids complex case of mapping 2 pages simply
* to restore link_free pointer values.
*/
#define ZS_ALIGN 8

/*
* A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
* pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
*/
#define ZS_MAX_ZSPAGE_ORDER 2
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)

/*
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (void *) handle value.
*
* Note that object index <obj_idx> is relative to system
* page <PFN> it is stored in, so for each sub-page belonging
* to a zspage, obj_idx starts with 0.
*
* This is made more complicated by various memory models and PAE.
*/

#ifndef MAX_PHYSMEM_BITS
#ifdef CONFIG_HIGHMEM64G
#define MAX_PHYSMEM_BITS 36
#else /* !CONFIG_HIGHMEM64G */
/*
* If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
* be PAGE_SHIFT
*/
#define MAX_PHYSMEM_BITS BITS_PER_LONG
#endif
#endif
#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)

#define MAX(a, b) ((a) >= (b) ? (a) : (b))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
#define ZS_MIN_ALLOC_SIZE \
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE

/*
* On systems with 4K page size, this gives 254 size classes! There is a
* trader-off here:
* - Large number of size classes is potentially wasteful as free page are
* spread across these classes
* - Small number of size classes causes large internal fragmentation
* - Probably its better to use specific size classes (empirically
* determined). NOTE: all those class sizes must be set as multiple of
* ZS_ALIGN to make sure link_free itself never has to span 2 pages.
*
* ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
* (reason above)
*/
#define ZS_SIZE_CLASS_DELTA 16
#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
ZS_SIZE_CLASS_DELTA + 1)

/*
* We do not maintain any list for completely empty or full pages
*/
enum fullness_group {
ZS_ALMOST_FULL,
ZS_ALMOST_EMPTY,
_ZS_NR_FULLNESS_GROUPS,

ZS_EMPTY,
ZS_FULL
};

/*
* We assign a page to ZS_ALMOST_EMPTY fullness group when:
* n <= N / f, where
* n = number of allocated objects
* N = total number of objects zspage can store
* f = 1/fullness_threshold_frac
*
* Similarly, we assign zspage to:
* ZS_ALMOST_FULL when n > N / f
* ZS_EMPTY when n == 0
* ZS_FULL when n == N
*
* (see: fix_fullness_group())
*/
static const int fullness_threshold_frac = 4;

struct mapping_area {
char *vm_buf; /* copy buffer for objects that span pages */
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
};

struct size_class {
/*
* Size of objects stored in this class. Must be multiple
* of ZS_ALIGN.
*/
int size;
unsigned int index;

/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;

spinlock_t lock;

/* stats */
u64 pages_allocated;

struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
};

/*
* Placed within free objects to form a singly linked list.
* For every zspage, first_page->freelist gives head of this list.
*
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
/* Handle of next free chunk (encodes <PFN, obj_idx>) */
void *next;
};

struct zs_pool {
struct size_class size_class[ZS_SIZE_CLASSES];

gfp_t flags; /* allocation flags used when growing pool */
const char *name;
};

#endif
@@ -302,7 +302,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* only warn once */
if (__this_cpu_read(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
#ifdef CONFIG_MACH_MSM8960_MMI
#ifdef CONFIG_MACH_JFDT
touch_hw_watchdog();
#endif
printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -18,9 +18,9 @@
#include <linux/bio.h>
#include <linux/swapops.h>
#include <linux/writeback.h>
#include <linux/frontswap.h>
#include <linux/aio.h>
#include <linux/blkdev.h>
#include <linux/frontswap.h>
#include <asm/pgtable.h>

static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -85,6 +85,7 @@
#include <linux/cpu.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/types.h>

@@ -79,12 +79,16 @@ static u64 zswap_duplicate_entry;
**********************************/
/* Enable/disable zswap (enabled by default, fixed at boot for now) */
static bool zswap_enabled = 1;
module_param_named(enabled, zswap_enabled, bool, 0);
module_param_named(enabled, zswap_enabled, bool, 0644);

/* Compressor to be used by zswap (fixed at boot for now) */
#ifdef CONFIG_CRYPTO_LZ4
#define ZSWAP_COMPRESSOR_DEFAULT "lz4"
#else
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
#endif
static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
module_param_named(compressor, zswap_compressor, charp, 0);
module_param_named(compressor, zswap_compressor, charp, 0644);

/* The maximum percentage of memory that the compressed pool can occupy */
static unsigned int zswap_max_pool_percent = 20;
@@ -371,18 +375,18 @@ static int zswap_cpu_init(void)
{
unsigned long cpu;

cpu_notifier_register_begin();
get_online_cpus();
for_each_online_cpu(cpu)
if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
goto cleanup;
__register_cpu_notifier(&zswap_cpu_notifier_block);
cpu_notifier_register_done();
register_cpu_notifier(&zswap_cpu_notifier_block);
put_online_cpus();
return 0;

cleanup:
for_each_online_cpu(cpu)
__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
cpu_notifier_register_done();
put_online_cpus();
return -ENOMEM;
}

@@ -887,16 +891,6 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
/* update stats */
atomic_inc(&zswap_stored_pages);

/* Debugging code for zswap kernel panic */
{
/* check whether page is file page */
if (!PageAnon(page) && !PageSwapCache(page)) {
struct address_space *mapping = page_file_mapping(page);
printk(KERN_ALERT
"BUG: file page is swapped out (mapping = %p)\n", mapping);
}
}

return 0;

freepage: