Skip to content

Commit

Permalink
Revert lowmem and vmalloc patches
Browse files Browse the repository at this point in the history
Revert "mm: make is_vmalloc_addr lockless"

This reverts commit 675539b2812b0db12490210ed079185f933a1b3b.

Revert "Kconfig: Add menu choice option to reclaim virtual memory"

This reverts commit a447197635ef394d8e9270bbcc7049f8beb20e75.

Revert "Kconfig: Add config option to support vmalloc savings"

This reverts commit f5130adb4c3abd7cbe977f7304bf6fb3ced84fc7.

Revert "mm: Update is_vmalloc_addr to account for vmalloc savings"

This reverts commit 62da64efd769329b19ca1602d5bb5c4ff564a682.

Revert "msm: Increase the kernel virtual area to include lowmem"

This reverts commit 274df3aaf5836c671823318d20ccfac962b4e411.

Revert "msm: Allow lowmem to be non contiguous and mixed."

This reverts commit 258fe02b4797883aef9823a8d1c336ada71a7516.
  • Loading branch information
arco authored and Christopher83 committed Jan 18, 2014
1 parent 56bf48f commit 072719f
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 168 deletions.
30 changes: 1 addition & 29 deletions arch/arm/Kconfig
Expand Up @@ -1908,37 +1908,9 @@ config ARCH_MEMORY_REMOVE
config ENABLE_DMM
def_bool n

choice
prompt "Virtual Memory Reclaim"
default NO_VM_RECLAIM
help
Select the method of reclaiming virtual memory

config DONT_MAP_HOLE_AFTER_MEMBANK0
def_bool n
depends on SPARSEMEM
bool "Map around the largest hole"
help
Do not map the memory belonging to the largest hole
into the virtual space. This results in more lowmem.
If multiple holes are present, only the largest hole
in the first 256MB of memory is not mapped.

config ENABLE_VMALLOC_SAVING
bool "Reclaim memory for each subsystem"
help
Enable this config to reclaim the virtual space belonging
to any subsystem which is expected to have a lifetime of
the entire system. This feature allows lowmem to be non-
contiguous.

config NO_VM_RECLAIM
bool "Do not reclaim memory"
help
Do not reclaim any memory. This might result in less lowmem
and wasting virtual memory space which could otherwise be
reclaimed by using any of the other two config options.

endchoice

config HOLES_IN_ZONE
def_bool n
Expand Down
50 changes: 5 additions & 45 deletions arch/arm/mm/mmu.c
Expand Up @@ -808,7 +808,6 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
int rc = 0;

if (!nr)
return;
Expand All @@ -819,13 +818,11 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
rc = vm_area_check_early(vm);
if (!rc)
vm_area_add_early(vm++);
vm_area_add_early(vm++);
}
}

Expand Down Expand Up @@ -1334,21 +1331,12 @@ EXPORT_SYMBOL(mem_text_write_kernel_word);
static void __init map_lowmem(void)
{
struct memblock_region *reg;
struct vm_struct *vm;
phys_addr_t start;
phys_addr_t end;
unsigned long vaddr;
unsigned long pfn;
unsigned long length;
unsigned int type;
int nr = 0;

/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map;
nr++;
start = reg->base;
end = start + reg->size;

if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
Expand Down Expand Up @@ -1401,34 +1389,6 @@ static void __init map_lowmem(void)

create_mapping(&map);
}

vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));

for_each_memblock(memory, reg) {

start = reg->base;
end = start + reg->size;

if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;

pfn = __phys_to_pfn(start);
vaddr = __phys_to_virt(start);
length = end - start;
type = MT_MEMORY;

vm->addr = (void *)(vaddr & PAGE_MASK);
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(pfn);
vm->flags = VM_LOWMEM | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(type);
vm->caller = map_lowmem;
vm_area_add_early(vm);
mark_vmalloc_reserved_area(vm->addr, vm->size);
vm++;
}
}

/*
Expand Down
12 changes: 6 additions & 6 deletions include/linux/mm.h
Expand Up @@ -302,16 +302,16 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/

#ifdef CONFIG_MMU
extern int is_vmalloc_addr(const void *x);
#else
static inline int is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;

return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
return 0;
}
#endif

}
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
Expand Down
8 changes: 0 additions & 8 deletions include/linux/vmalloc.h
Expand Up @@ -14,7 +14,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
#define VM_LOWMEM 0x00000040 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */

/*
Expand Down Expand Up @@ -134,13 +133,6 @@ extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
#else
static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
{ };
#endif

#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
Expand Down
81 changes: 1 addition & 80 deletions mm/vmalloc.c
Expand Up @@ -272,47 +272,6 @@ static unsigned long cached_align;

static unsigned long vmap_area_pcpu_hole;

#ifdef CONFIG_ENABLE_VMALLOC_SAVING
#define POSSIBLE_VMALLOC_START PAGE_OFFSET

#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \
PAGE_SHIFT)
#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT)
#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE)

DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);

void mark_vmalloc_reserved_area(void *x, unsigned long size)
{
unsigned long addr = (unsigned long)x;

bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
}

int is_vmalloc_addr(const void *x)
{
unsigned long addr = (unsigned long)x;

if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
return 0;

if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
return 0;

return 1;
}
#else
int is_vmalloc_addr(const void *x)
{
unsigned long addr = (unsigned long)x;

return addr >= VMALLOC_START && addr < VMALLOC_END;
}
#endif
EXPORT_SYMBOL(is_vmalloc_addr);



static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
Expand Down Expand Up @@ -1157,31 +1116,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
return mem;
}
EXPORT_SYMBOL(vm_map_ram);
/**
* vm_area_check_early - check if vmap area is already mapped
* @vm: vm_struct to be checked
*
* This function is used to check if the vmap area has been
* mapped already. @vm->addr, @vm->size and @vm->flags should
* contain proper values.
*
*/
int __init vm_area_check_early(struct vm_struct *vm)
{
struct vm_struct *tmp, **p;

BUG_ON(vmap_initialized);
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
if (tmp->addr >= vm->addr) {
if (tmp->addr < vm->addr + vm->size)
return 1;
} else {
if (tmp->addr + tmp->size > vm->addr)
return 1;
}
}
return 0;
}
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
Expand Down Expand Up @@ -1457,26 +1392,15 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
#endif

}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
void *caller)
{
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
-1, GFP_KERNEL, caller);
#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
#endif
-1, GFP_KERNEL, caller);
}

static struct vm_struct *find_vm_area(const void *addr)
Expand Down Expand Up @@ -2676,9 +2600,6 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_VPAGES)
seq_printf(m, " vpages");

if (v->flags & VM_LOWMEM)
seq_printf(m, " lowmem");

show_numa_info(m, v);
seq_putc(m, '\n');
return 0;
Expand Down

0 comments on commit 072719f

Please sign in to comment.