Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

msm: pmem: Squashed update to version from msm 2.6.35 kernel

Conflicts:

	drivers/misc/pmem.c
  • Loading branch information...
commit 825d8f6f184cfa5b996adcbc420a0571c95c0cf6 1 parent 894da50
@arco arco authored committed
View
8 arch/arm/include/asm/mach/map.h
@@ -32,7 +32,13 @@ struct map_desc {
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
-struct mem_type;
+struct mem_type {
+ unsigned int prot_pte;
+ unsigned int prot_l1;
+ unsigned int prot_sect;
+ unsigned int domain;
+};
+
extern const struct mem_type *get_mem_type(unsigned int type);
/*
* external interface to remap single page with appropriate type
View
8 arch/arm/mm/mm.h
@@ -15,13 +15,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
return pmd_off(pgd_offset_k(virt), virt);
}
-struct mem_type {
- unsigned int prot_pte;
- unsigned int prot_l1;
- unsigned int prot_sect;
- unsigned int domain;
-};
-
+struct mem_type;
const struct mem_type *get_mem_type(unsigned int type);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
View
197 drivers/misc/pmem.c
@@ -1,7 +1,7 @@
/* drivers/android/pmem.c
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -19,11 +19,15 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
+#include <linux/mm_types.h>
#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/android_pmem.h>
#include <linux/mempolicy.h>
#include <linux/kobject.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_MEMORY_HOTPLUG
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
@@ -32,7 +36,8 @@
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
-#include <linux/pm_runtime.h>
+#include <asm/mach/map.h>
+#include <asm/page.h>
#define PMEM_MAX_USER_SPACE_DEVICES (10)
#define PMEM_MAX_KERNEL_SPACE_DEVICES (2)
@@ -168,6 +173,8 @@ struct pmem_info {
/* index of the garbage page in the pmem space */
int garbage_index;
+ /* reserved virtual address range */
+ struct vm_struct *area;
enum pmem_allocator_type allocator_type;
@@ -237,9 +244,33 @@ struct pmem_info {
long (*ioctl)(struct file *, unsigned int, unsigned long);
int (*release)(struct inode *, struct file *);
+ /* reference count of allocations */
+ atomic_t allocation_cnt;
+ /*
+ * request function for a region when the allocation count goes
+ * from 0 -> 1
+ */
+ void (*mem_request)(void *);
+ /*
+ * release function for a region when the allocation count goes
+ * from 1 -> 0
+ */
+ void (*mem_release)(void *);
+ /*
+ * private data for the request/release callback
+ */
+ void *region_data;
+ /*
+ * map and unmap as needed
+ */
+ int map_on_demand;
};
#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
+static void ioremap_pmem(int id);
+static void pmem_put_region(int id);
+static int pmem_get_region(int id);
+
static struct pmem_info pmem[PMEM_MAX_DEVICES];
static int id_count;
static struct {
@@ -251,7 +282,7 @@ static struct {
#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
{ PMEM_KERNEL_SMI_DATA_NAME,
PMEM_MEMTYPE_SMI,
- PMEM_MEMTYPE_EBI1, /* Fall back to EBI1 automatically */
+ PMEM_INVALID_MEMTYPE, /* MUST be set invalid if no fallback */
-1 },
#endif
{ PMEM_KERNEL_EBI1_DATA_NAME,
@@ -560,6 +591,72 @@ static struct kobj_type pmem_system_ktype = {
.default_attrs = pmem_system_attrs,
};
+static int pmem_allocate_from_id(const int id, const unsigned long size,
+ const unsigned int align)
+{
+ int ret;
+ ret = pmem_get_region(id);
+
+ if (ret)
+ return -1;
+
+ ret = pmem[id].allocate(id, size, align);
+
+ if (ret < 0)
+ pmem_put_region(id);
+
+ return ret;
+}
+
+static int pmem_free_from_id(const int id, const int index)
+{
+ pmem_put_region(id);
+ return pmem[id].free(id, index);
+
+}
+
+static int pmem_get_region(int id)
+{
+ /* Must be called with arena mutex locked */
+ atomic_inc(&pmem[id].allocation_cnt);
+ if (!pmem[id].vbase) {
+ DLOG("PMEMDEBUG: mapping for %s", pmem[id].name);
+ if (pmem[id].mem_request)
+ pmem[id].mem_request(pmem[id].region_data);
+ ioremap_pmem(id);
+
+ }
+
+ if (pmem[id].vbase)
+ return 0;
+ else {
+ if (pmem[id].mem_release)
+ pmem[id].mem_release(pmem[id].region_data);
+ atomic_dec(&pmem[id].allocation_cnt);
+ return 1;
+ }
+}
+
+static void pmem_put_region(int id)
+{
+ /* Must be called with arena mutex locked */
+ if (atomic_dec_and_test(&pmem[id].allocation_cnt)) {
+ DLOG("PMEMDEBUG: unmapping for %s", pmem[id].name);
+ BUG_ON(!pmem[id].vbase);
+ if (pmem[id].map_on_demand) {
+ /* unmap_kernel_range() flushes the caches
+ * and removes the page table entries
+ */
+ unmap_kernel_range((unsigned long)pmem[id].vbase,
+ pmem[id].size);
+ pmem[id].vbase = NULL;
+ if (pmem[id].mem_release)
+ pmem[id].mem_release(pmem[id].region_data);
+
+ }
+ }
+}
+
static int get_id(struct file *file)
{
return MINOR(file->f_dentry->d_inode->i_rdev);
@@ -878,7 +975,7 @@ static int pmem_release(struct inode *inode, struct file *file)
/* if it is not a connected file and it has an allocation, free it */
if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
mutex_lock(&pmem[id].arena_mutex);
- ret = pmem[id].free(id, data->index);
+ ret = pmem_free_from_id(id, data->index);
mutex_unlock(&pmem[id].arena_mutex);
}
@@ -1556,7 +1653,7 @@ static struct vm_operations_struct vm_ops = {
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pmem_data *data = file->private_data;
- int index;
+ int index = -1;
unsigned long vma_size = vma->vm_end - vma->vm_start;
int ret = 0, id = get_id(file);
@@ -1593,7 +1690,7 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
/* if file->private_data == unalloced, alloc*/
if (data->index == -1) {
mutex_lock(&pmem[id].arena_mutex);
- index = pmem[id].allocate(id,
+ index = pmem_allocate_from_id(id,
vma->vm_end - vma->vm_start,
SZ_4K);
mutex_unlock(&pmem[id].arena_mutex);
@@ -2531,7 +2628,7 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
mutex_lock(&pmem[id].arena_mutex);
- data->index = pmem[id].allocate(id,
+ data->index = pmem_allocate_from_id(id,
arg,
SZ_4K);
mutex_unlock(&pmem[id].arena_mutex);
@@ -2578,9 +2675,9 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
mutex_lock(&pmem[id].arena_mutex);
- data->index = pmem[id].allocate(id,
- alloc.size,
- alloc.align);
+ data->index = pmem_allocate_from_id(id,
+ alloc.size,
+ alloc.align);
mutex_unlock(&pmem[id].arena_mutex);
ret = data->index == -1 ? -ENOMEM :
data->index;
@@ -2614,15 +2711,40 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static void ioremap_pmem(int id)
{
- if (pmem[id].cached)
- pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
-#ifdef ioremap_ext_buffered
- else if (pmem[id].buffered)
- pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
- pmem[id].size);
-#endif
- else
- pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+ unsigned long addr;
+ const struct mem_type *type;
+
+ DLOG("PMEMDEBUG: ioremaping for %s\n", pmem[id].name);
+ if (pmem[id].map_on_demand) {
+ addr = (unsigned long)pmem[id].area->addr;
+ if (pmem[id].cached)
+ type = get_mem_type(MT_DEVICE_CACHED);
+ else
+ type = get_mem_type(MT_DEVICE);
+ DLOG("PMEMDEBUG: Remap phys %lx to virt %lx on %s\n",
+ pmem[id].base, addr, pmem[id].name);
+ if (ioremap_page_range(addr, addr + pmem[id].size,
+ pmem[id].base, __pgprot(type->prot_pte))) {
+ pr_err("pmem: Failed to map pages\n");
+ BUG();
+ }
+ pmem[id].vbase = pmem[id].area->addr;
+ /* Flush the cache after installing page table entries to avoid
+ * aliasing when these pages are remapped to user space.
+ */
+ flush_cache_vmap(addr, addr + pmem[id].size);
+ } else {
+ if (pmem[id].cached)
+ pmem[id].vbase = ioremap_cached(pmem[id].base,
+ pmem[id].size);
+ #ifdef ioremap_ext_buffered
+ else if (pmem[id].buffered)
+ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+ pmem[id].size);
+ #endif
+ else
+ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+ }
}
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -2797,6 +2919,7 @@ int pmem_setup(struct android_pmem_platform_data *pdata,
int (*release)(struct inode *, struct file *))
{
int i, index = 0, kapi_memtype_idx = -1, id, is_kernel_memtype = 0;
+ struct vm_struct *pmem_vma = NULL;
if (id_count >= PMEM_MAX_DEVICES) {
pr_alert("pmem: %s: unable to register driver(%s) - no more "
@@ -3051,23 +3174,35 @@ int pmem_setup(struct android_pmem_platform_data *pdata,
if (pmem[id].memory_state == MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED)
return 0;
- if ((!is_kernel_memtype) &&
- (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM)) {
- ioremap_pmem(id);
- if (pmem[id].vbase == 0) {
- pr_err("pmem: ioremap failed for device %s\n",
- pmem[id].name);
- goto error_cant_remap;
+ pmem[id].map_on_demand = pdata->map_on_demand;
+ if (pmem[id].map_on_demand) {
+ pmem_vma = get_vm_area(pmem[id].size, VM_IOREMAP);
+ if (!pmem_vma) {
+ pr_err("pmem: Failed to allocate virtual space for "
+ "%s\n", pdata->name);
+ goto out_put_kobj;
}
- }
-
+ pr_err("pmem: Reserving virtual address range %lx - %lx for"
+ " %s\n", (unsigned long) pmem_vma->addr,
+ (unsigned long) pmem_vma->addr + pmem[id].size,
+ pdata->name);
+ pmem[id].area = pmem_vma;
+ } else
+ pmem[id].area = NULL;
pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+ atomic_set(&pmem[id].allocation_cnt, 0);
+
+ if (pdata->setup_region)
+ pmem[id].region_data = pdata->setup_region();
+
+ if (pdata->request_region)
+ pmem[id].mem_request = pdata->request_region;
+
+ if (pdata->release_region)
+ pmem[id].mem_release = pdata->release_region;
return 0;
-error_cant_remap:
- if (!is_kernel_memtype)
- misc_deregister(&pmem[id].dev);
err_cant_register_device:
out_put_kobj:
kobject_put(&pmem[id].kobj);
View
18 include/linux/android_pmem.h
@@ -156,6 +156,24 @@ struct android_pmem_platform_data
unsigned buffered;
/* This PMEM is on memory that may be powered off */
unsigned unstable;
+ /*
+ * function to be called when the number of allocations goes from
+ * 0 -> 1
+ */
+ void (*request_region)(void *);
+ /*
+ * function to be called when the number of allocations goes from
+ * 1 -> 0
+ */
+ void (*release_region)(void *);
+ /*
+ * function to be called upon pmem registration
+ */
+ void *(*setup_region)(void);
+ /*
+ * indicates that this region should be mapped/unmaped as needed
+ */
+ int map_on_demand;
};
int pmem_setup(struct android_pmem_platform_data *pdata,
Please sign in to comment.
Something went wrong with that request. Please try again.