Skip to content

Commit

Permalink
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/penberg/linux

Pull slab update from Pekka Enberg:
 "Highlights:

  - Fix for boot-time problems on some architectures due to
    init_lock_keys() not respecting kmalloc_caches boundaries
    (Christoph Lameter)

  - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim)

  - Fix for excessive slab freelist draining (Wanpeng Li)

  - SLUB and SLOB cleanups and fixes (various people)"

I ended up editing the branch, and this avoids two commits at the end
that were immediately reverted, and I instead just applied the oneliner
fix in between myself.

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
  slub: Check for page NULL before doing the node_match check
  mm/slab: Give s_next and s_stop slab-specific names
  slob: Check for NULL pointer before calling ctor()
  slub: Make cpu partial slab support configurable
  slab: add kmalloc() to kernel API documentation
  slab: fix init_lock_keys
  slob: use DIV_ROUND_UP where possible
  slub: do not put a slab to cpu partial list when cpu_partial is 0
  mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo
  mm/slub: Drop unnecessary nr_partials
  mm/slab: Fix /proc/slabinfo unwriteable for slab
  mm/slab: Sharing s_next and s_stop between slab and slub
  mm/slab: Fix drain freelist excessively
  slob: Rework #ifdeffery in slab.h
  mm, slab: moved kmem_cache_alloc_node comment to correct place
  • Loading branch information
torvalds committed Jul 14, 2013
2 parents 41d9884 + c25f195 commit 54be820
Show file tree
Hide file tree
Showing 8 changed files with 121 additions and 69 deletions.
57 changes: 42 additions & 15 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,7 @@ struct kmem_cache {
struct list_head list; /* List of all slab caches on the system */
};

#define KMALLOC_MAX_SIZE (1UL << 30)

#include <linux/slob_def.h>

#else /* CONFIG_SLOB */
#endif /* CONFIG_SLOB */

/*
* Kmalloc array related definitions
Expand All @@ -195,7 +191,9 @@ struct kmem_cache {
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#else
#endif

#ifdef CONFIG_SLUB
/*
* SLUB allocates up to order 2 pages directly and otherwise
* passes the request to the page allocator.
Expand All @@ -207,6 +205,19 @@ struct kmem_cache {
#endif
#endif

#ifdef CONFIG_SLOB
/*
* SLOB passes all page size and larger requests to the page allocator.
* No kmalloc array is necessary since objects of different sizes can
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_MAX 30
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
#endif

/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
Expand All @@ -221,6 +232,7 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif

#ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
Expand Down Expand Up @@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
#endif /* !CONFIG_SLOB */

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#elif defined(CONFIG_SLUB)
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#else
#error "Unknown slab allocator"
#endif

#ifdef CONFIG_SLOB
#include <linux/slob_def.h>
#endif

/*
Expand All @@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
*/
static __always_inline int kmalloc_size(int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
return 1 << n;

Expand All @@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)

if (n == 2 && KMALLOC_MIN_SIZE <= 64)
return 192;

#endif
return 0;
}
#endif /* !CONFIG_SLOB */

/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
Expand Down Expand Up @@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);

/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
* @size: element size.
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* The @flags argument may be one of:
Expand Down Expand Up @@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
* There are other flags available as well, but these are not intended
* for general use, and so are not documented here. For a full list of
* potential flags, always refer to linux/gfp.h.
*
* kmalloc is the normal method of allocating memory
* in the kernel.
*/
static __always_inline void *kmalloc(size_t size, gfp_t flags);

/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
Expand All @@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
/**
* kmalloc_node - allocate memory from a specific node
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kcalloc).
* @flags: the type of memory to allocate (see kmalloc).
* @node: node to allocate from.
*
* kmalloc() for non-local nodes, used to allocate from a specific node
Expand Down
8 changes: 0 additions & 8 deletions include/linux/slob_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}

/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kcalloc).
*
* kmalloc is the normal method of allocating memory
* in the kernel.
*/
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __kmalloc_node(size, flags, NUMA_NO_NODE);
Expand Down
11 changes: 11 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1596,6 +1596,17 @@ config SLOB

endchoice

config SLUB_CPU_PARTIAL
default y
depends on SLUB
bool "SLUB per cpu partial cache"
help
Per cpu partial caches accellerate objects allocation and freeing
that is local to a processor at the price of more indeterminism
in the latency of the free. On overflow these caches will be cleared
which requires the taking of locks that may cause latency spikes.
Typically one would choose no for a realtime system.

config MMAP_ALLOW_UNINITIALIZED
bool "Allow mmapped anonymous memory to be uninitialized"
depends on EXPERT && !MMU
Expand Down
51 changes: 23 additions & 28 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ static void init_node_lock_keys(int q)
if (slab_state < UP)
return;

for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
struct kmem_cache_node *n;
struct kmem_cache *cache = kmalloc_caches[i];

Expand Down Expand Up @@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node)
return 0;
}

static inline int slabs_tofree(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
return (n->free_objects + cachep->num - 1) / cachep->num;
}

static void __cpuinit cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
Expand Down Expand Up @@ -1241,7 +1247,7 @@ static void __cpuinit cpuup_canceled(long cpu)
n = cachep->node[node];
if (!n)
continue;
drain_freelist(cachep, n, n->free_objects);
drain_freelist(cachep, n, slabs_tofree(cachep, n));
}
}

Expand Down Expand Up @@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;

drain_freelist(cachep, n, n->free_objects);
drain_freelist(cachep, n, slabs_tofree(cachep, n));

if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
Expand Down Expand Up @@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
if (!n)
continue;

drain_freelist(cachep, n, n->free_objects);
drain_freelist(cachep, n, slabs_tofree(cachep, n));

ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);
Expand Down Expand Up @@ -3338,18 +3344,6 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
return obj;
}

/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
* @nodeid: node number of the target node.
* @caller: return address of caller, used for debug information
*
* Identical to kmem_cache_alloc but it will allocate memory on the given
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long caller)
Expand Down Expand Up @@ -3643,6 +3637,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif

#ifdef CONFIG_NUMA
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
* @nodeid: node number of the target node.
*
* Identical to kmem_cache_alloc but it will allocate memory on the given
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
Expand Down Expand Up @@ -4431,20 +4436,10 @@ static int leaks_show(struct seq_file *m, void *p)
return 0;
}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}

static void s_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}

static const struct seq_operations slabstats_op = {
.start = leaks_start,
.next = s_next,
.stop = s_stop,
.next = slab_next,
.stop = slab_stop,
.show = leaks_show,
};

Expand Down
3 changes: 3 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,3 +271,6 @@ struct kmem_cache_node {
#endif

};

void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
18 changes: 13 additions & 5 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,13 @@ void __init create_kmalloc_caches(unsigned long flags)


#ifdef CONFIG_SLABINFO

#ifdef CONFIG_SLAB
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
#else
#define SLABINFO_RIGHTS S_IRUSR
#endif

void print_slabinfo_header(struct seq_file *m)
{
/*
Expand Down Expand Up @@ -531,12 +538,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return seq_list_start(&slab_caches, *pos);
}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}

static void s_stop(struct seq_file *m, void *p)
void slab_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}
Expand Down Expand Up @@ -613,8 +620,8 @@ static int s_show(struct seq_file *m, void *p)
*/
static const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.next = slab_next,
.stop = slab_stop,
.show = s_show,
};

Expand All @@ -633,7 +640,8 @@ static const struct file_operations proc_slabinfo_operations = {

static int __init slab_proc_init(void)
{
proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
&proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
Expand Down
4 changes: 2 additions & 2 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ static inline void clear_slob_page_free(struct page *sp)
}

#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)

/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
Expand Down Expand Up @@ -554,7 +554,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
flags, node);
}

if (c->ctor)
if (b && c->ctor)
c->ctor(b);

kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
Expand Down
Loading

0 comments on commit 54be820

Please sign in to comment.