Skip to content

Commit

Permalink
mm: use helper functions for allocating and freeing vm_area structs
Browse files Browse the repository at this point in the history
The vm_area_struct is one of the most fundamental memory management
objects, but the management of it is entirely open-coded evertwhere,
ranging from allocation and freeing (using kmem_cache_[z]alloc and
kmem_cache_free) to initializing all the fields.

We want to unify this in order to end up having some unified
initialization of the vmas, and the first step to this is to at least
have basic allocation functions.

Right now those functions are literally just wrappers around the
kmem_cache_*() calls.  This is a purely mechanical conversion:

    # new vma:
    kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL) -> vm_area_alloc()

    # copy old vma
    kmem_cache_alloc(vm_area_cachep, GFP_KERNEL) -> vm_area_dup(old)

    # free vma
    kmem_cache_free(vm_area_cachep, vma) -> vm_area_free(vma)

to the point where the old vma passed in to the vm_area_dup() function
isn't even used yet (because I've left all the old manual initialization
alone).

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
torvalds committed Jul 21, 2018
1 parent 191a3af commit 3928d4f
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 27 deletions.
4 changes: 2 additions & 2 deletions arch/ia64/kernel/perfmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -2278,7 +2278,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
DPRINT(("smpl_buf @%p\n", smpl_buf));

/* allocate vma */
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
Expand Down Expand Up @@ -2346,7 +2346,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
return 0;

error:
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
error_kmem:
pfm_rvfree(smpl_buf, size);

Expand Down
8 changes: 4 additions & 4 deletions arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
Expand All @@ -125,15 +125,15 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
}

/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
Expand All @@ -144,7 +144,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
Expand Down
4 changes: 2 additions & 2 deletions fs/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;

bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
bprm->vma = vma = vm_area_alloc();
if (!vma)
return -ENOMEM;

Expand Down Expand Up @@ -326,7 +326,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
up_write(&mm->mmap_sem);
err_free:
bprm->vma = NULL;
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return err;
}

Expand Down
4 changes: 3 additions & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions).
*/

extern struct kmem_cache *vm_area_cachep;
struct vm_area_struct *vm_area_alloc(void);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);

#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
Expand Down
21 changes: 18 additions & 3 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,11 +303,26 @@ struct kmem_cache *files_cachep;
struct kmem_cache *fs_cachep;

/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
static struct kmem_cache *vm_area_cachep;

/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;

struct vm_area_struct *vm_area_alloc(void)
{
return kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
}

struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
{
return kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
}

void vm_area_free(struct vm_area_struct *vma)
{
kmem_cache_free(vm_area_cachep, vma);
}

static void account_kernel_stack(struct task_struct *tsk, int account)
{
void *stack = task_stack_page(tsk);
Expand Down Expand Up @@ -455,7 +470,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
tmp = vm_area_dup(mpnt);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
Expand Down Expand Up @@ -539,7 +554,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
fail_nomem_anon_vma_fork:
mpol_put(vma_policy(tmp));
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
vm_area_free(tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
Expand Down
22 changes: 11 additions & 11 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return next;
}

Expand Down Expand Up @@ -911,7 +911,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
vm_area_free(next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
Expand Down Expand Up @@ -1729,7 +1729,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (!vma) {
error = -ENOMEM;
goto unacct_error;
Expand Down Expand Up @@ -1832,7 +1832,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
unacct_error:
if (charged)
vm_unacct_memory(charged);
Expand Down Expand Up @@ -2620,7 +2620,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return err;
}

new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
new = vm_area_dup(vma);
if (!new)
return -ENOMEM;

Expand Down Expand Up @@ -2669,7 +2669,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vma:
kmem_cache_free(vm_area_cachep, new);
vm_area_free(new);
return err;
}

Expand Down Expand Up @@ -2984,7 +2984,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
/*
* create a vma struct for an anonymous mapping
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
Expand Down Expand Up @@ -3202,7 +3202,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
} else {
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
new_vma = vm_area_dup(vma);
if (!new_vma)
goto out;
*new_vma = *vma;
Expand All @@ -3226,7 +3226,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
kmem_cache_free(vm_area_cachep, new_vma);
vm_area_free(new_vma);
out:
return NULL;
}
Expand Down Expand Up @@ -3350,7 +3350,7 @@ static struct vm_area_struct *__install_special_mapping(
int ret;
struct vm_area_struct *vma;

vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);

Expand All @@ -3376,7 +3376,7 @@ static struct vm_area_struct *__install_special_mapping(
return vma;

out:
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return ERR_PTR(ret);
}

Expand Down
8 changes: 4 additions & 4 deletions mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
put_nommu_region(vma->vm_region);
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
}

/*
Expand Down Expand Up @@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file,
if (!region)
goto error_getting_region;

vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma = vm_area_alloc();
if (!vma)
goto error_getting_vma;

Expand Down Expand Up @@ -1368,7 +1368,7 @@ unsigned long do_mmap(struct file *file,
kmem_cache_free(vm_region_jar, region);
if (vma->vm_file)
fput(vma->vm_file);
kmem_cache_free(vm_area_cachep, vma);
vm_area_free(vma);
return ret;

sharing_violation:
Expand Down Expand Up @@ -1469,7 +1469,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (!region)
return -ENOMEM;

new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
new = vm_area_dup(vma);
if (!new) {
kmem_cache_free(vm_region_jar, region);
return -ENOMEM;
Expand Down

0 comments on commit 3928d4f

Please sign in to comment.