mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-02 04:24:05 +00:00
SLUB: update comments
Update comments throughout SLUB to reflect the new developments. Fix up various awkward sentences. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
26a7bd0302
commit
672bba3a4b
1 changed files with 119 additions and 123 deletions
242
mm/slub.c
242
mm/slub.c
|
@ -66,11 +66,11 @@
|
||||||
* SLUB assigns one slab for allocation to each processor.
|
* SLUB assigns one slab for allocation to each processor.
|
||||||
* Allocations only occur from these slabs called cpu slabs.
|
* Allocations only occur from these slabs called cpu slabs.
|
||||||
*
|
*
|
||||||
* Slabs with free elements are kept on a partial list.
|
* Slabs with free elements are kept on a partial list and during regular
|
||||||
* There is no list for full slabs. If an object in a full slab is
|
* operations no list for full slabs is used. If an object in a full slab is
|
||||||
* freed then the slab will show up again on the partial lists.
|
* freed then the slab will show up again on the partial lists.
|
||||||
* Otherwise there is no need to track full slabs unless we have to
|
* We track full slabs for debugging purposes though because otherwise we
|
||||||
* track full slabs for debugging purposes.
|
* cannot scan all objects.
|
||||||
*
|
*
|
||||||
* Slabs are freed when they become empty. Teardown and setup is
|
* Slabs are freed when they become empty. Teardown and setup is
|
||||||
* minimal so we rely on the page allocators per cpu caches for
|
* minimal so we rely on the page allocators per cpu caches for
|
||||||
|
@ -92,8 +92,8 @@
|
||||||
*
|
*
|
||||||
* - The per cpu array is updated for each new slab and and is a remote
|
* - The per cpu array is updated for each new slab and and is a remote
|
||||||
* cacheline for most nodes. This could become a bouncing cacheline given
|
* cacheline for most nodes. This could become a bouncing cacheline given
|
||||||
* enough frequent updates. There are 16 pointers in a cacheline.so at
|
* enough frequent updates. There are 16 pointers in a cacheline, so at
|
||||||
* max 16 cpus could compete. Likely okay.
|
* max 16 cpus could compete for the cacheline which may be okay.
|
||||||
*
|
*
|
||||||
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
|
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
|
||||||
*
|
*
|
||||||
|
@ -137,6 +137,7 @@
|
||||||
|
|
||||||
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
|
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
|
||||||
SLAB_POISON | SLAB_STORE_USER)
|
SLAB_POISON | SLAB_STORE_USER)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set of flags that will prevent slab merging
|
* Set of flags that will prevent slab merging
|
||||||
*/
|
*/
|
||||||
|
@ -171,7 +172,7 @@ static struct notifier_block slab_notifier;
|
||||||
static enum {
|
static enum {
|
||||||
DOWN, /* No slab functionality available */
|
DOWN, /* No slab functionality available */
|
||||||
PARTIAL, /* kmem_cache_open() works but kmalloc does not */
|
PARTIAL, /* kmem_cache_open() works but kmalloc does not */
|
||||||
UP, /* Everything works */
|
UP, /* Everything works but does not show up in sysfs */
|
||||||
SYSFS /* Sysfs up */
|
SYSFS /* Sysfs up */
|
||||||
} slab_state = DOWN;
|
} slab_state = DOWN;
|
||||||
|
|
||||||
|
@ -245,9 +246,9 @@ static void print_section(char *text, u8 *addr, unsigned int length)
|
||||||
/*
|
/*
|
||||||
* Slow version of get and set free pointer.
|
* Slow version of get and set free pointer.
|
||||||
*
|
*
|
||||||
* This requires touching the cache lines of kmem_cache.
|
* This version requires touching the cache lines of kmem_cache which
|
||||||
* The offset can also be obtained from the page. In that
|
* we avoid to do in the fast alloc free paths. There we obtain the offset
|
||||||
* case it is in the cacheline that we already need to touch.
|
* from the page struct.
|
||||||
*/
|
*/
|
||||||
static void *get_freepointer(struct kmem_cache *s, void *object)
|
static void *get_freepointer(struct kmem_cache *s, void *object)
|
||||||
{
|
{
|
||||||
|
@ -429,26 +430,34 @@ static inline int check_valid_pointer(struct kmem_cache *s,
|
||||||
* Bytes of the object to be managed.
|
* Bytes of the object to be managed.
|
||||||
* If the freepointer may overlay the object then the free
|
* If the freepointer may overlay the object then the free
|
||||||
* pointer is the first word of the object.
|
* pointer is the first word of the object.
|
||||||
|
*
|
||||||
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
|
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
|
||||||
* 0xa5 (POISON_END)
|
* 0xa5 (POISON_END)
|
||||||
*
|
*
|
||||||
* object + s->objsize
|
* object + s->objsize
|
||||||
* Padding to reach word boundary. This is also used for Redzoning.
|
* Padding to reach word boundary. This is also used for Redzoning.
|
||||||
* Padding is extended to word size if Redzoning is enabled
|
* Padding is extended by another word if Redzoning is enabled and
|
||||||
* and objsize == inuse.
|
* objsize == inuse.
|
||||||
|
*
|
||||||
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
|
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
|
||||||
* 0xcc (RED_ACTIVE) for objects in use.
|
* 0xcc (RED_ACTIVE) for objects in use.
|
||||||
*
|
*
|
||||||
* object + s->inuse
|
* object + s->inuse
|
||||||
|
* Meta data starts here.
|
||||||
|
*
|
||||||
* A. Free pointer (if we cannot overwrite object on free)
|
* A. Free pointer (if we cannot overwrite object on free)
|
||||||
* B. Tracking data for SLAB_STORE_USER
|
* B. Tracking data for SLAB_STORE_USER
|
||||||
* C. Padding to reach required alignment boundary
|
* C. Padding to reach required alignment boundary or at mininum
|
||||||
* Padding is done using 0x5a (POISON_INUSE)
|
* one word if debuggin is on to be able to detect writes
|
||||||
|
* before the word boundary.
|
||||||
|
*
|
||||||
|
* Padding is done using 0x5a (POISON_INUSE)
|
||||||
*
|
*
|
||||||
* object + s->size
|
* object + s->size
|
||||||
|
* Nothing is used beyond s->size.
|
||||||
*
|
*
|
||||||
* If slabcaches are merged then the objsize and inuse boundaries are to
|
* If slabcaches are merged then the objsize and inuse boundaries are mostly
|
||||||
* be ignored. And therefore no slab options that rely on these boundaries
|
* ignored. And therefore no slab options that rely on these boundaries
|
||||||
* may be used with merged slabcaches.
|
* may be used with merged slabcaches.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -574,8 +583,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
|
||||||
/*
|
/*
|
||||||
* No choice but to zap it and thus loose the remainder
|
* No choice but to zap it and thus loose the remainder
|
||||||
* of the free objects in this slab. May cause
|
* of the free objects in this slab. May cause
|
||||||
* another error because the object count maybe
|
* another error because the object count is now wrong.
|
||||||
* wrong now.
|
|
||||||
*/
|
*/
|
||||||
set_freepointer(s, p, NULL);
|
set_freepointer(s, p, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -615,9 +623,8 @@ static int check_slab(struct kmem_cache *s, struct page *page)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine if a certain object on a page is on the freelist and
|
* Determine if a certain object on a page is on the freelist. Must hold the
|
||||||
* therefore free. Must hold the slab lock for cpu slabs to
|
* slab lock to guarantee that the chains are in a consistent state.
|
||||||
* guarantee that the chains are consistent.
|
|
||||||
*/
|
*/
|
||||||
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
||||||
{
|
{
|
||||||
|
@ -663,7 +670,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tracking of fully allocated slabs for debugging
|
* Tracking of fully allocated slabs for debugging purposes.
|
||||||
*/
|
*/
|
||||||
static void add_full(struct kmem_cache_node *n, struct page *page)
|
static void add_full(struct kmem_cache_node *n, struct page *page)
|
||||||
{
|
{
|
||||||
|
@ -714,7 +721,7 @@ bad:
|
||||||
/*
|
/*
|
||||||
* If this is a slab page then lets do the best we can
|
* If this is a slab page then lets do the best we can
|
||||||
* to avoid issues in the future. Marking all objects
|
* to avoid issues in the future. Marking all objects
|
||||||
* as used avoids touching the remainder.
|
* as used avoids touching the remaining objects.
|
||||||
*/
|
*/
|
||||||
printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
|
printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
|
||||||
s->name, page);
|
s->name, page);
|
||||||
|
@ -970,9 +977,9 @@ static void remove_partial(struct kmem_cache *s,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock page and remove it from the partial list
|
* Lock slab and remove from the partial list.
|
||||||
*
|
*
|
||||||
* Must hold list_lock
|
* Must hold list_lock.
|
||||||
*/
|
*/
|
||||||
static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
|
static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
|
||||||
{
|
{
|
||||||
|
@ -985,7 +992,7 @@ static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to get a partial slab from a specific node
|
* Try to allocate a partial slab from a specific node.
|
||||||
*/
|
*/
|
||||||
static struct page *get_partial_node(struct kmem_cache_node *n)
|
static struct page *get_partial_node(struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
|
@ -994,7 +1001,8 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
|
||||||
/*
|
/*
|
||||||
* Racy check. If we mistakenly see no partial slabs then we
|
* Racy check. If we mistakenly see no partial slabs then we
|
||||||
* just allocate an empty slab. If we mistakenly try to get a
|
* just allocate an empty slab. If we mistakenly try to get a
|
||||||
* partial slab then get_partials() will return NULL.
|
* partial slab and there is none available then get_partials()
|
||||||
|
* will return NULL.
|
||||||
*/
|
*/
|
||||||
if (!n || !n->nr_partial)
|
if (!n || !n->nr_partial)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1010,8 +1018,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get a page from somewhere. Search in increasing NUMA
|
* Get a page from somewhere. Search in increasing NUMA distances.
|
||||||
* distances.
|
|
||||||
*/
|
*/
|
||||||
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
||||||
{
|
{
|
||||||
|
@ -1021,24 +1028,22 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The defrag ratio allows to configure the tradeoffs between
|
* The defrag ratio allows a configuration of the tradeoffs between
|
||||||
* inter node defragmentation and node local allocations.
|
* inter node defragmentation and node local allocations. A lower
|
||||||
* A lower defrag_ratio increases the tendency to do local
|
* defrag_ratio increases the tendency to do local allocations
|
||||||
* allocations instead of scanning throught the partial
|
* instead of attempting to obtain partial slabs from other nodes.
|
||||||
* lists on other nodes.
|
|
||||||
*
|
*
|
||||||
* If defrag_ratio is set to 0 then kmalloc() always
|
* If the defrag_ratio is set to 0 then kmalloc() always
|
||||||
* returns node local objects. If its higher then kmalloc()
|
* returns node local objects. If the ratio is higher then kmalloc()
|
||||||
* may return off node objects in order to avoid fragmentation.
|
* may return off node objects because partial slabs are obtained
|
||||||
*
|
* from other nodes and filled up.
|
||||||
* A higher ratio means slabs may be taken from other nodes
|
|
||||||
* thus reducing the number of partial slabs on those nodes.
|
|
||||||
*
|
*
|
||||||
* If /sys/slab/xx/defrag_ratio is set to 100 (which makes
|
* If /sys/slab/xx/defrag_ratio is set to 100 (which makes
|
||||||
* defrag_ratio = 1000) then every (well almost) allocation
|
* defrag_ratio = 1000) then every (well almost) allocation will
|
||||||
* will first attempt to defrag slab caches on other nodes. This
|
* first attempt to defrag slab caches on other nodes. This means
|
||||||
* means scanning over all nodes to look for partial slabs which
|
* scanning over all nodes to look for partial slabs which may be
|
||||||
* may be a bit expensive to do on every slab allocation.
|
* expensive if we do it every time we are trying to find a slab
|
||||||
|
* with available objects.
|
||||||
*/
|
*/
|
||||||
if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
|
if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1098,11 +1103,12 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
|
||||||
} else {
|
} else {
|
||||||
if (n->nr_partial < MIN_PARTIAL) {
|
if (n->nr_partial < MIN_PARTIAL) {
|
||||||
/*
|
/*
|
||||||
* Adding an empty page to the partial slabs in order
|
* Adding an empty slab to the partial slabs in order
|
||||||
* to avoid page allocator overhead. This page needs to
|
* to avoid page allocator overhead. This slab needs
|
||||||
* come after all the others that are not fully empty
|
* to come after the other slabs with objects in
|
||||||
* in order to make sure that we do maximum
|
* order to fill them up. That way the size of the
|
||||||
* defragmentation.
|
* partial list stays small. kmem_cache_shrink can
|
||||||
|
* reclaim empty slabs from the partial list.
|
||||||
*/
|
*/
|
||||||
add_partial_tail(n, page);
|
add_partial_tail(n, page);
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
|
@ -1170,7 +1176,7 @@ static void flush_all(struct kmem_cache *s)
|
||||||
* 1. The page struct
|
* 1. The page struct
|
||||||
* 2. The first cacheline of the object to be allocated.
|
* 2. The first cacheline of the object to be allocated.
|
||||||
*
|
*
|
||||||
* The only cache lines that are read (apart from code) is the
|
* The only other cache lines that are read (apart from code) is the
|
||||||
* per cpu array in the kmem_cache struct.
|
* per cpu array in the kmem_cache struct.
|
||||||
*
|
*
|
||||||
* Fastpath is not possible if we need to get a new slab or have
|
* Fastpath is not possible if we need to get a new slab or have
|
||||||
|
@ -1224,9 +1230,11 @@ have_slab:
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
if (s->cpu_slab[cpu]) {
|
if (s->cpu_slab[cpu]) {
|
||||||
/*
|
/*
|
||||||
* Someone else populated the cpu_slab while we enabled
|
* Someone else populated the cpu_slab while we
|
||||||
* interrupts, or we have got scheduled on another cpu.
|
* enabled interrupts, or we have gotten scheduled
|
||||||
* The page may not be on the requested node.
|
* on another cpu. The page may not be on the
|
||||||
|
* requested node even if __GFP_THISNODE was
|
||||||
|
* specified. So we need to recheck.
|
||||||
*/
|
*/
|
||||||
if (node == -1 ||
|
if (node == -1 ||
|
||||||
page_to_nid(s->cpu_slab[cpu]) == node) {
|
page_to_nid(s->cpu_slab[cpu]) == node) {
|
||||||
|
@ -1239,7 +1247,7 @@ have_slab:
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
/* Dump the current slab */
|
/* New slab does not fit our expectations */
|
||||||
flush_slab(s, s->cpu_slab[cpu], cpu);
|
flush_slab(s, s->cpu_slab[cpu], cpu);
|
||||||
}
|
}
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
|
@ -1280,7 +1288,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
|
||||||
* The fastpath only writes the cacheline of the page struct and the first
|
* The fastpath only writes the cacheline of the page struct and the first
|
||||||
* cacheline of the object.
|
* cacheline of the object.
|
||||||
*
|
*
|
||||||
* No special cachelines need to be read
|
* We read the cpu_slab cacheline to check if the slab is the per cpu
|
||||||
|
* slab for this processor.
|
||||||
*/
|
*/
|
||||||
static void slab_free(struct kmem_cache *s, struct page *page,
|
static void slab_free(struct kmem_cache *s, struct page *page,
|
||||||
void *x, void *addr)
|
void *x, void *addr)
|
||||||
|
@ -1325,7 +1334,7 @@ out_unlock:
|
||||||
slab_empty:
|
slab_empty:
|
||||||
if (prior)
|
if (prior)
|
||||||
/*
|
/*
|
||||||
* Slab on the partial list.
|
* Slab still on the partial list.
|
||||||
*/
|
*/
|
||||||
remove_partial(s, page);
|
remove_partial(s, page);
|
||||||
|
|
||||||
|
@ -1374,22 +1383,16 @@ static struct page *get_object_page(const void *x)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* kmem_cache_open produces objects aligned at "size" and the first object
|
* Object placement in a slab is made very easy because we always start at
|
||||||
* is placed at offset 0 in the slab (We have no metainformation on the
|
* offset 0. If we tune the size of the object to the alignment then we can
|
||||||
* slab, all slabs are in essence "off slab").
|
* get the required alignment by putting one properly sized object after
|
||||||
*
|
* another.
|
||||||
* In order to get the desired alignment one just needs to align the
|
|
||||||
* size.
|
|
||||||
*
|
*
|
||||||
* Notice that the allocation order determines the sizes of the per cpu
|
* Notice that the allocation order determines the sizes of the per cpu
|
||||||
* caches. Each processor has always one slab available for allocations.
|
* caches. Each processor has always one slab available for allocations.
|
||||||
* Increasing the allocation order reduces the number of times that slabs
|
* Increasing the allocation order reduces the number of times that slabs
|
||||||
* must be moved on and off the partial lists and therefore may influence
|
* must be moved on and off the partial lists and is therefore a factor in
|
||||||
* locking overhead.
|
* locking overhead.
|
||||||
*
|
|
||||||
* The offset is used to relocate the free list link in each object. It is
|
|
||||||
* therefore possible to move the free list link behind the object. This
|
|
||||||
* is necessary for RCU to work properly and also useful for debugging.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1400,15 +1403,11 @@ static struct page *get_object_page(const void *x)
|
||||||
*/
|
*/
|
||||||
static int slub_min_order;
|
static int slub_min_order;
|
||||||
static int slub_max_order = DEFAULT_MAX_ORDER;
|
static int slub_max_order = DEFAULT_MAX_ORDER;
|
||||||
|
|
||||||
/*
|
|
||||||
* Minimum number of objects per slab. This is necessary in order to
|
|
||||||
* reduce locking overhead. Similar to the queue size in SLAB.
|
|
||||||
*/
|
|
||||||
static int slub_min_objects = DEFAULT_MIN_OBJECTS;
|
static int slub_min_objects = DEFAULT_MIN_OBJECTS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Merge control. If this is set then no merging of slab caches will occur.
|
* Merge control. If this is set then no merging of slab caches will occur.
|
||||||
|
* (Could be removed. This was introduced to pacify the merge skeptics.)
|
||||||
*/
|
*/
|
||||||
static int slub_nomerge;
|
static int slub_nomerge;
|
||||||
|
|
||||||
|
@ -1422,23 +1421,27 @@ static char *slub_debug_slabs;
|
||||||
/*
|
/*
|
||||||
* Calculate the order of allocation given an slab object size.
|
* Calculate the order of allocation given an slab object size.
|
||||||
*
|
*
|
||||||
* The order of allocation has significant impact on other elements
|
* The order of allocation has significant impact on performance and other
|
||||||
* of the system. Generally order 0 allocations should be preferred
|
* system components. Generally order 0 allocations should be preferred since
|
||||||
* since they do not cause fragmentation in the page allocator. Larger
|
* order 0 does not cause fragmentation in the page allocator. Larger objects
|
||||||
* objects may have problems with order 0 because there may be too much
|
* be problematic to put into order 0 slabs because there may be too much
|
||||||
* space left unused in a slab. We go to a higher order if more than 1/8th
|
* unused space left. We go to a higher order if more than 1/8th of the slab
|
||||||
* of the slab would be wasted.
|
* would be wasted.
|
||||||
*
|
*
|
||||||
* In order to reach satisfactory performance we must ensure that
|
* In order to reach satisfactory performance we must ensure that a minimum
|
||||||
* a minimum number of objects is in one slab. Otherwise we may
|
* number of objects is in one slab. Otherwise we may generate too much
|
||||||
* generate too much activity on the partial lists. This is less a
|
* activity on the partial lists which requires taking the list_lock. This is
|
||||||
* concern for large slabs though. slub_max_order specifies the order
|
* less a concern for large slabs though which are rarely used.
|
||||||
* where we begin to stop considering the number of objects in a slab.
|
|
||||||
*
|
*
|
||||||
* Higher order allocations also allow the placement of more objects
|
* slub_max_order specifies the order where we begin to stop considering the
|
||||||
* in a slab and thereby reduce object handling overhead. If the user
|
* number of objects in a slab as critical. If we reach slub_max_order then
|
||||||
* has requested a higher mininum order then we start with that one
|
* we try to keep the page order as low as possible. So we accept more waste
|
||||||
* instead of zero.
|
* of space in favor of a small page order.
|
||||||
|
*
|
||||||
|
* Higher order allocations also allow the placement of more objects in a
|
||||||
|
* slab and thereby reduce object handling overhead. If the user has
|
||||||
|
* requested a higher mininum order then we start with that one instead of
|
||||||
|
* the smallest order which will fit the object.
|
||||||
*/
|
*/
|
||||||
static int calculate_order(int size)
|
static int calculate_order(int size)
|
||||||
{
|
{
|
||||||
|
@ -1458,18 +1461,18 @@ static int calculate_order(int size)
|
||||||
|
|
||||||
rem = slab_size % size;
|
rem = slab_size % size;
|
||||||
|
|
||||||
if (rem <= (PAGE_SIZE << order) / 8)
|
if (rem <= slab_size / 8)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
}
|
}
|
||||||
if (order >= MAX_ORDER)
|
if (order >= MAX_ORDER)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
|
|
||||||
return order;
|
return order;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Function to figure out which alignment to use from the
|
* Figure out what the alignment of the objects will be.
|
||||||
* various ways of specifying it.
|
|
||||||
*/
|
*/
|
||||||
static unsigned long calculate_alignment(unsigned long flags,
|
static unsigned long calculate_alignment(unsigned long flags,
|
||||||
unsigned long align, unsigned long size)
|
unsigned long align, unsigned long size)
|
||||||
|
@ -1624,18 +1627,16 @@ static int calculate_sizes(struct kmem_cache *s)
|
||||||
size = ALIGN(size, sizeof(void *));
|
size = ALIGN(size, sizeof(void *));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are redzoning then check if there is some space between the
|
* If we are Redzoning then check if there is some space between the
|
||||||
* end of the object and the free pointer. If not then add an
|
* end of the object and the free pointer. If not then add an
|
||||||
* additional word, so that we can establish a redzone between
|
* additional word to have some bytes to store Redzone information.
|
||||||
* the object and the freepointer to be able to check for overwrites.
|
|
||||||
*/
|
*/
|
||||||
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
|
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
|
||||||
size += sizeof(void *);
|
size += sizeof(void *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With that we have determined how much of the slab is in actual
|
* With that we have determined the number of bytes in actual use
|
||||||
* use by the object. This is the potential offset to the free
|
* by the object. This is the potential offset to the free pointer.
|
||||||
* pointer.
|
|
||||||
*/
|
*/
|
||||||
s->inuse = size;
|
s->inuse = size;
|
||||||
|
|
||||||
|
@ -1669,6 +1670,7 @@ static int calculate_sizes(struct kmem_cache *s)
|
||||||
* of the object.
|
* of the object.
|
||||||
*/
|
*/
|
||||||
size += sizeof(void *);
|
size += sizeof(void *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine the alignment based on various parameters that the
|
* Determine the alignment based on various parameters that the
|
||||||
* user specified and the dynamic determination of cache line size
|
* user specified and the dynamic determination of cache line size
|
||||||
|
@ -1770,7 +1772,6 @@ EXPORT_SYMBOL(kmem_cache_open);
|
||||||
int kmem_ptr_validate(struct kmem_cache *s, const void *object)
|
int kmem_ptr_validate(struct kmem_cache *s, const void *object)
|
||||||
{
|
{
|
||||||
struct page * page;
|
struct page * page;
|
||||||
void *addr;
|
|
||||||
|
|
||||||
page = get_object_page(object);
|
page = get_object_page(object);
|
||||||
|
|
||||||
|
@ -1807,7 +1808,8 @@ const char *kmem_cache_name(struct kmem_cache *s)
|
||||||
EXPORT_SYMBOL(kmem_cache_name);
|
EXPORT_SYMBOL(kmem_cache_name);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to free all slabs on a node
|
* Attempt to free all slabs on a node. Return the number of slabs we
|
||||||
|
* were unable to free.
|
||||||
*/
|
*/
|
||||||
static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
|
static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
|
||||||
struct list_head *list)
|
struct list_head *list)
|
||||||
|
@ -1828,7 +1830,7 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release all resources used by slab cache
|
* Release all resources used by a slab cache.
|
||||||
*/
|
*/
|
||||||
static int kmem_cache_close(struct kmem_cache *s)
|
static int kmem_cache_close(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
|
@ -2089,13 +2091,14 @@ void kfree(const void *x)
|
||||||
EXPORT_SYMBOL(kfree);
|
EXPORT_SYMBOL(kfree);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* kmem_cache_shrink removes empty slabs from the partial lists
|
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
|
||||||
* and then sorts the partially allocated slabs by the number
|
* the remaining slabs by the number of items in use. The slabs with the
|
||||||
* of items in use. The slabs with the most items in use
|
* most items in use come first. New allocations will then fill those up
|
||||||
* come first. New allocations will remove these from the
|
* and thus they can be removed from the partial lists.
|
||||||
* partial list because they are full. The slabs with the
|
*
|
||||||
* least items are placed last. If it happens that the objects
|
* The slabs with the least items are placed last. This results in them
|
||||||
* are freed then the page can be returned to the page allocator.
|
* being allocated from last increasing the chance that the last objects
|
||||||
|
* are freed in them.
|
||||||
*/
|
*/
|
||||||
int kmem_cache_shrink(struct kmem_cache *s)
|
int kmem_cache_shrink(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
|
@ -2124,12 +2127,10 @@ int kmem_cache_shrink(struct kmem_cache *s)
|
||||||
spin_lock_irqsave(&n->list_lock, flags);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build lists indexed by the items in use in
|
* Build lists indexed by the items in use in each slab.
|
||||||
* each slab or free slabs if empty.
|
|
||||||
*
|
*
|
||||||
* Note that concurrent frees may occur while
|
* Note that concurrent frees may occur while we hold the
|
||||||
* we hold the list_lock. page->inuse here is
|
* list_lock. page->inuse here is the upper limit.
|
||||||
* the upper limit.
|
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_safe(page, t, &n->partial, lru) {
|
list_for_each_entry_safe(page, t, &n->partial, lru) {
|
||||||
if (!page->inuse && slab_trylock(page)) {
|
if (!page->inuse && slab_trylock(page)) {
|
||||||
|
@ -2153,8 +2154,8 @@ int kmem_cache_shrink(struct kmem_cache *s)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rebuild the partial list with the slabs filled up
|
* Rebuild the partial list with the slabs filled up most
|
||||||
* most first and the least used slabs at the end.
|
* first and the least used slabs at the end.
|
||||||
*/
|
*/
|
||||||
for (i = s->objects - 1; i >= 0; i--)
|
for (i = s->objects - 1; i >= 0; i--)
|
||||||
list_splice(slabs_by_inuse + i, n->partial.prev);
|
list_splice(slabs_by_inuse + i, n->partial.prev);
|
||||||
|
@ -2217,7 +2218,7 @@ void __init kmem_cache_init(void)
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
/*
|
/*
|
||||||
* Must first have the slab cache available for the allocations of the
|
* Must first have the slab cache available for the allocations of the
|
||||||
* struct kmalloc_cache_node's. There is special bootstrap code in
|
* struct kmem_cache_node's. There is special bootstrap code in
|
||||||
* kmem_cache_open for slab_state == DOWN.
|
* kmem_cache_open for slab_state == DOWN.
|
||||||
*/
|
*/
|
||||||
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
|
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
|
||||||
|
@ -2389,8 +2390,8 @@ static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the cpu notifier to insure that the slab are flushed
|
* Use the cpu notifier to insure that the cpu slabs are flushed when
|
||||||
* when necessary.
|
* necessary.
|
||||||
*/
|
*/
|
||||||
static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
|
static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
|
@ -2555,11 +2556,6 @@ static void resiliency_test(void)
|
||||||
static void resiliency_test(void) {};
|
static void resiliency_test(void) {};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* These are not as efficient as kmalloc for the non debug case.
|
|
||||||
* We do not have the page struct available so we have to touch one
|
|
||||||
* cacheline in struct kmem_cache to check slab flags.
|
|
||||||
*/
|
|
||||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
|
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
|
||||||
{
|
{
|
||||||
struct kmem_cache *s = get_slab(size, gfpflags);
|
struct kmem_cache *s = get_slab(size, gfpflags);
|
||||||
|
@ -2677,7 +2673,7 @@ static unsigned long validate_slab_cache(struct kmem_cache *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generate lists of locations where slabcache objects are allocated
|
* Generate lists of code addresses where slabcache objects are allocated
|
||||||
* and freed.
|
* and freed.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -2756,7 +2752,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not found. Insert new tracking element
|
* Not found. Insert new tracking element.
|
||||||
*/
|
*/
|
||||||
if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
|
if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue