mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 11:04:25 +00:00
slob: remove bigblock tracking
Remove the bigblock lists in favour of using compound pages and going directly to the page allocator. Allocation size is stored in page->private, which also makes ksize more accurate than it previously was. Saves ~.5K of code, and 12-24 bytes overhead per >= PAGE_SIZE allocation. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
95b35127f1
commit
d87a133fc2
1 changed files with 29 additions and 74 deletions
103
mm/slob.c
103
mm/slob.c
|
@ -18,9 +18,11 @@
|
||||||
* Above this is an implementation of kmalloc/kfree. Blocks returned
|
* Above this is an implementation of kmalloc/kfree. Blocks returned
|
||||||
* from kmalloc are 4-byte aligned and prepended with a 4-byte header.
|
* from kmalloc are 4-byte aligned and prepended with a 4-byte header.
|
||||||
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
|
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
|
||||||
* __get_free_pages directly so that it can return page-aligned blocks
|
* __get_free_pages directly, allocating compound pages so the page order
|
||||||
* and keeps a linked list of such pages and their orders. These
|
* does not have to be separately tracked, and also stores the exact
|
||||||
* objects are detected in kfree() by their page alignment.
|
* allocation size in page->private so that it can be used to accurately
|
||||||
|
* provide ksize(). These objects are detected in kfree() because slob_page()
|
||||||
|
* is false for them.
|
||||||
*
|
*
|
||||||
* SLAB is emulated on top of SLOB by simply calling constructors and
|
* SLAB is emulated on top of SLOB by simply calling constructors and
|
||||||
* destructors for every SLAB allocation. Objects are returned with the
|
* destructors for every SLAB allocation. Objects are returned with the
|
||||||
|
@ -29,7 +31,8 @@
|
||||||
* alignment. Again, objects of page-size or greater are allocated by
|
* alignment. Again, objects of page-size or greater are allocated by
|
||||||
* calling __get_free_pages. As SLAB objects know their size, no separate
|
* calling __get_free_pages. As SLAB objects know their size, no separate
|
||||||
* size bookkeeping is necessary and there is essentially no allocation
|
* size bookkeeping is necessary and there is essentially no allocation
|
||||||
* space overhead.
|
* space overhead, and compound pages aren't needed for multi-page
|
||||||
|
* allocations.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -381,48 +384,26 @@ out:
|
||||||
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
|
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct bigblock {
|
|
||||||
int order;
|
|
||||||
void *pages;
|
|
||||||
struct bigblock *next;
|
|
||||||
};
|
|
||||||
typedef struct bigblock bigblock_t;
|
|
||||||
|
|
||||||
static bigblock_t *bigblocks;
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(block_lock);
|
|
||||||
|
|
||||||
|
|
||||||
void *__kmalloc(size_t size, gfp_t gfp)
|
void *__kmalloc(size_t size, gfp_t gfp)
|
||||||
{
|
{
|
||||||
slob_t *m;
|
|
||||||
bigblock_t *bb;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (size < PAGE_SIZE - SLOB_UNIT) {
|
if (size < PAGE_SIZE - SLOB_UNIT) {
|
||||||
|
slob_t *m;
|
||||||
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
|
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
|
||||||
if (m)
|
if (m)
|
||||||
m->units = size;
|
m->units = size;
|
||||||
return m+1;
|
return m+1;
|
||||||
|
} else {
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
ret = (void *) __get_free_pages(gfp | __GFP_COMP,
|
||||||
|
get_order(size));
|
||||||
|
if (ret) {
|
||||||
|
struct page *page;
|
||||||
|
page = virt_to_page(ret);
|
||||||
|
page->private = size;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
|
|
||||||
if (!bb)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
bb->order = get_order(size);
|
|
||||||
bb->pages = (void *)__get_free_pages(gfp, bb->order);
|
|
||||||
|
|
||||||
if (bb->pages) {
|
|
||||||
spin_lock_irqsave(&block_lock, flags);
|
|
||||||
bb->next = bigblocks;
|
|
||||||
bigblocks = bb;
|
|
||||||
spin_unlock_irqrestore(&block_lock, flags);
|
|
||||||
return bb->pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
slob_free(bb, sizeof(bigblock_t));
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__kmalloc);
|
EXPORT_SYMBOL(__kmalloc);
|
||||||
|
|
||||||
|
@ -462,59 +443,33 @@ EXPORT_SYMBOL(krealloc);
|
||||||
void kfree(const void *block)
|
void kfree(const void *block)
|
||||||
{
|
{
|
||||||
struct slob_page *sp;
|
struct slob_page *sp;
|
||||||
slob_t *m;
|
|
||||||
bigblock_t *bb, **last = &bigblocks;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!block)
|
if (!block)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sp = (struct slob_page *)virt_to_page(block);
|
sp = (struct slob_page *)virt_to_page(block);
|
||||||
if (!slob_page(sp)) {
|
if (slob_page(sp)) {
|
||||||
/* on the big block list */
|
slob_t *m = (slob_t *)block - 1;
|
||||||
spin_lock_irqsave(&block_lock, flags);
|
slob_free(m, m->units + SLOB_UNIT);
|
||||||
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
|
} else
|
||||||
if (bb->pages == block) {
|
put_page(&sp->page);
|
||||||
*last = bb->next;
|
|
||||||
spin_unlock_irqrestore(&block_lock, flags);
|
|
||||||
free_pages((unsigned long)block, bb->order);
|
|
||||||
slob_free(bb, sizeof(bigblock_t));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&block_lock, flags);
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
m = (slob_t *)block - 1;
|
|
||||||
slob_free(m, m->units + SLOB_UNIT);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(kfree);
|
EXPORT_SYMBOL(kfree);
|
||||||
|
|
||||||
|
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
|
||||||
size_t ksize(const void *block)
|
size_t ksize(const void *block)
|
||||||
{
|
{
|
||||||
struct slob_page *sp;
|
struct slob_page *sp;
|
||||||
bigblock_t *bb;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!block)
|
if (!block)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sp = (struct slob_page *)virt_to_page(block);
|
sp = (struct slob_page *)virt_to_page(block);
|
||||||
if (!slob_page(sp)) {
|
if (slob_page(sp))
|
||||||
spin_lock_irqsave(&block_lock, flags);
|
return ((slob_t *)block - 1)->units + SLOB_UNIT;
|
||||||
for (bb = bigblocks; bb; bb = bb->next)
|
else
|
||||||
if (bb->pages == block) {
|
return sp->page.private;
|
||||||
spin_unlock_irqrestore(&slob_lock, flags);
|
|
||||||
return PAGE_SIZE << bb->order;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&block_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ((slob_t *)block - 1)->units + SLOB_UNIT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kmem_cache {
|
struct kmem_cache {
|
||||||
|
|
Loading…
Add table
Reference in a new issue