mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
[PATCH] Add some comments to slab.c
Also, checks if we get a valid slabp_cache for off slab slab-descriptors. We should always get this. If we don't, then in that case we, will have to disable off-slab descriptors for this cache and do the calculations again. This is a rare case, so add a BUG_ON, for now, just in case. Signed-off-by: Alok N Kataria <alok.kataria@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
dfd54cbcc0
commit
e5ac9c5aec
1 changed files with 27 additions and 2 deletions
29
mm/slab.c
29
mm/slab.c
|
@ -2206,8 +2206,17 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
cachep->gfpflags |= GFP_DMA;
|
cachep->gfpflags |= GFP_DMA;
|
||||||
cachep->buffer_size = size;
|
cachep->buffer_size = size;
|
||||||
|
|
||||||
if (flags & CFLGS_OFF_SLAB)
|
if (flags & CFLGS_OFF_SLAB) {
|
||||||
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
|
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
|
||||||
|
/*
|
||||||
|
* This is a possibility for one of the malloc_sizes caches.
|
||||||
|
* But since we go off slab only for object size greater than
|
||||||
|
* PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
|
||||||
|
* this should not happen at all.
|
||||||
|
* But leave a BUG_ON for some lucky dude.
|
||||||
|
*/
|
||||||
|
BUG_ON(!cachep->slabp_cache);
|
||||||
|
}
|
||||||
cachep->ctor = ctor;
|
cachep->ctor = ctor;
|
||||||
cachep->dtor = dtor;
|
cachep->dtor = dtor;
|
||||||
cachep->name = name;
|
cachep->name = name;
|
||||||
|
@ -2441,7 +2450,17 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
||||||
|
|
||||||
/* Get the memory for a slab management obj. */
|
/*
|
||||||
|
* Get the memory for a slab management obj.
|
||||||
|
* For a slab cache when the slab descriptor is off-slab, slab descriptors
|
||||||
|
* always come from malloc_sizes caches. The slab descriptor cannot
|
||||||
|
* come from the same cache which is getting created because,
|
||||||
|
* when we are searching for an appropriate cache for these
|
||||||
|
* descriptors in kmem_cache_create, we search through the malloc_sizes array.
|
||||||
|
* If we are creating a malloc_sizes cache here it would not be visible to
|
||||||
|
* kmem_find_general_cachep till the initialization is complete.
|
||||||
|
* Hence we cannot have slabp_cache same as the original cache.
|
||||||
|
*/
|
||||||
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
|
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
|
||||||
int colour_off, gfp_t local_flags,
|
int colour_off, gfp_t local_flags,
|
||||||
int nodeid)
|
int nodeid)
|
||||||
|
@ -3125,6 +3144,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
||||||
if (slabp->inuse == 0) {
|
if (slabp->inuse == 0) {
|
||||||
if (l3->free_objects > l3->free_limit) {
|
if (l3->free_objects > l3->free_limit) {
|
||||||
l3->free_objects -= cachep->num;
|
l3->free_objects -= cachep->num;
|
||||||
|
/* No need to drop any previously held
|
||||||
|
* lock here, even if we have a off-slab slab
|
||||||
|
* descriptor it is guaranteed to come from
|
||||||
|
* a different cache, refer to comments before
|
||||||
|
* alloc_slabmgmt.
|
||||||
|
*/
|
||||||
slab_destroy(cachep, slabp);
|
slab_destroy(cachep, slabp);
|
||||||
} else {
|
} else {
|
||||||
list_add(&slabp->list, &l3->slabs_free);
|
list_add(&slabp->list, &l3->slabs_free);
|
||||||
|
|
Loading…
Add table
Reference in a new issue