mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
mm/sl[aou]b: Move kmallocXXX functions to common code
The kmalloc* functions of all slab allcoators are similar now so lets move them into slab.h. This requires some function naming changes in slob. As a results of this patch there is a common set of functions for all allocators. Also means that kmalloc_large() is now available in general to perform large order allocations that go directly via the page allocator. kmalloc_large() can be substituted if kmalloc() throws warnings because of too large allocations. kmalloc_large() has exactly the same semantics as kmalloc but can only used for allocations > PAGE_SIZE. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
9de1bc8752
commit
f1b6eb6e6b
7 changed files with 158 additions and 278 deletions
|
@ -4,6 +4,8 @@
|
|||
* (C) SGI 2006, Christoph Lameter
|
||||
* Cleaned up and restructured to ease the addition of alternative
|
||||
* implementations of SLAB allocators.
|
||||
* (C) Linux Foundation 2008-2013
|
||||
* Unified interface for all slab allocators
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_SLAB_H
|
||||
|
@ -94,6 +96,7 @@
|
|||
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
||||
(unsigned long)ZERO_SIZE_PTR)
|
||||
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
struct mem_cgroup;
|
||||
/*
|
||||
|
@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
|
|||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
void *__kmalloc(size_t size, gfp_t flags);
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||
#else
|
||||
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
|
||||
{
|
||||
return kmem_cache_alloc(s, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size);
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
{
|
||||
return kmem_cache_alloc_trace(s, gfpflags, size);
|
||||
}
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#else /* CONFIG_TRACING */
|
||||
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
|
||||
gfp_t flags, size_t size)
|
||||
{
|
||||
return kmem_cache_alloc(s, flags);
|
||||
}
|
||||
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
{
|
||||
return kmem_cache_alloc_node(s, gfpflags, node);
|
||||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
#ifdef CONFIG_SLAB
|
||||
#include <linux/slab_def.h>
|
||||
#endif
|
||||
|
@ -297,10 +351,61 @@ static __always_inline int kmalloc_index(size_t size)
|
|||
#include <linux/slub_def.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SLOB
|
||||
#include <linux/slob_def.h>
|
||||
static __always_inline void *
|
||||
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
flags |= (__GFP_COMP | __GFP_KMEMCG);
|
||||
ret = (void *) __get_free_pages(flags, order);
|
||||
kmemleak_alloc(ret, size, 1, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
||||
{
|
||||
return kmalloc_order(size, flags, order);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
return kmalloc_order_trace(size, flags, order);
|
||||
}
|
||||
|
||||
/**
|
||||
* kmalloc - allocate memory
|
||||
* @size: how many bytes of memory are required.
|
||||
* @flags: the type of memory to allocate (see kcalloc).
|
||||
*
|
||||
* kmalloc is the normal method of allocating memory
|
||||
* for objects smaller than page size in the kernel.
|
||||
*/
|
||||
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
if (__builtin_constant_p(size)) {
|
||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||
return kmalloc_large(size, flags);
|
||||
#ifndef CONFIG_SLOB
|
||||
if (!(flags & GFP_DMA)) {
|
||||
int index = kmalloc_index(size);
|
||||
|
||||
if (!index)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
return kmem_cache_alloc_trace(kmalloc_caches[index],
|
||||
flags, size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine size used for the nth kmalloc cache.
|
||||
* return size or 0 if a kmalloc cache for that
|
||||
|
@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
#ifndef CONFIG_SLOB
|
||||
if (__builtin_constant_p(size) &&
|
||||
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLAB_CACHE_DMA)) {
|
||||
int i = kmalloc_index(size);
|
||||
|
||||
if (!i)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
return kmem_cache_alloc_node_trace(kmalloc_caches[i],
|
||||
flags, node, size);
|
||||
}
|
||||
#endif
|
||||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
|
@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
|||
return kmalloc_array(n, size, flags | __GFP_ZERO);
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
|
||||
/**
|
||||
* kmalloc_node - allocate memory from a specific node
|
||||
* @size: how many bytes of memory are required.
|
||||
* @flags: the type of memory to allocate (see kmalloc).
|
||||
* @node: node to allocate from.
|
||||
*
|
||||
* kmalloc() for non-local nodes, used to allocate from a specific node
|
||||
* if available. Equivalent to kmalloc() in the non-NUMA single-node
|
||||
* case.
|
||||
*/
|
||||
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
return kmalloc(size, flags);
|
||||
}
|
||||
|
||||
static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
||||
|
||||
static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
||||
gfp_t flags, int node)
|
||||
{
|
||||
return kmem_cache_alloc(cachep, flags);
|
||||
}
|
||||
#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
|
||||
|
||||
/*
|
||||
* kmalloc_track_caller is a special version of kmalloc that records the
|
||||
* calling function of the routine calling it for slab leak tracking instead
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue