Skip to content

Commit

Permalink
backport SLUB from Linux 3.3 (thanks to faux123)
Browse files Browse the repository at this point in the history
Conflicts:
	include/linux/page-flags.h

Change-Id: I185253455ab5cb45014be09ee0058d056ceb0d47
  • Loading branch information
arter97 authored and didhiy committed Feb 11, 2014
1 parent 4b3492a commit 7d944b3
Show file tree
Hide file tree
Showing 5 changed files with 1,024 additions and 436 deletions.
109 changes: 67 additions & 42 deletions include/linux/mm_types.h
Expand Up @@ -32,63 +32,79 @@ struct address_space;
* who is mapping it.
*/
struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
atomic_t _count; /* Usage count, see below. */
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* slub first free object */
};

union {
/* Used for cmpxchg_double in slub */
unsigned long counters;

struct {

union {
atomic_t _mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped
* & limit reverse map searches.
*/

struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
atomic_t _count; /* Usage count, see below. */
};
};
};

/* Third double word block */
union {
/*
* Count of ptes mapped in
* mms, to show when page is
* mapped & limit reverse map
* searches.
*
* Used also for tail pages
* refcounting instead of
* _count. Tail pages cannot
* be mapped and keeping the
* tail page _count zero at
* all times guarantees
* get_page_unless_zero() will
* never succeed on tail
* pages.
*/
atomic_t _mapcount;

struct { /* SLUB */
u16 inuse;
u16 objects;
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
#ifdef CONFIG_64BIT
int pages; /* Nr of partial slabs left */
int pobjects; /* Approximate # of objects */
#else
short int pages;
short int pobjects;
#endif
};
};

/* Remainder is not double word aligned */
union {
struct {
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache;
* indicates order in the buddy
* system if PG_buddy is set.
*/
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
};
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
spinlock_t ptl;
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* SLUB: freelist req. slab lock */
};
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/

/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
Expand All @@ -114,7 +130,16 @@ struct page {
*/
void *shadow;
#endif
};
}
/*
* If another subsystem starts using the double word pairing for atomic
* operations on struct page then it must change the #if to ensure
* proper alignment of the page struct.
*/
#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
__attribute__((__aligned__(2*sizeof(unsigned long))))
#endif
;

typedef unsigned long __nocast vm_flags_t;

Expand Down
5 changes: 0 additions & 5 deletions include/linux/page-flags.h
Expand Up @@ -124,9 +124,6 @@ enum pageflags {

/* SLOB */
PG_slob_free = PG_private,

/* SLUB */
PG_slub_frozen = PG_active,
};

#ifndef __GENERATING_BOUNDS_H
Expand Down Expand Up @@ -212,8 +209,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)

/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
Expand Down
20 changes: 20 additions & 0 deletions include/linux/slub_def.h
Expand Up @@ -24,21 +24,27 @@ enum stat_item {
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
ALLOC_NODE_MISMATCH, /* Switching cpu slab */
FREE_SLAB, /* Slab freed to the page allocator */
CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
CPU_PARTIAL_FREE, /* USed cpu partial on free */
NR_SLUB_STAT_ITEMS };

struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
Expand Down Expand Up @@ -76,6 +82,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int cpu_partial; /* Number of per cpu partial objects to keep around */
struct kmem_cache_order_objects oo;

/* Allocation and freeing of slabs */
Expand Down Expand Up @@ -228,6 +235,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
return ret;
}

/**
* Calling this on allocated memory will check that the memory
* is expected to be in use, and print warnings if not.
*/
#ifdef CONFIG_SLUB_DEBUG
extern bool verify_mem_not_deleted(const void *x);
#else
static inline bool verify_mem_not_deleted(const void *x)
{
return true;
}
#endif

#ifdef CONFIG_TRACING
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
Expand Down
2 changes: 1 addition & 1 deletion mm/slab.c
Expand Up @@ -4535,7 +4535,7 @@ static const struct file_operations proc_slabstats_operations = {

static int __init slab_proc_init(void)
{
proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
#ifdef CONFIG_DEBUG_SLAB_LEAK
proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
#endif
Expand Down

0 comments on commit 7d944b3

Please sign in to comment.