mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-08 23:55:14 +00:00
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a79316c617
commit
0316bec22e
9 changed files with 197 additions and 5 deletions
|
@ -104,6 +104,7 @@
|
|||
(unsigned long)ZERO_SIZE_PTR)
|
||||
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
struct mem_cgroup;
|
||||
/*
|
||||
|
@ -325,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|||
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
|
||||
gfp_t flags, size_t size)
|
||||
{
|
||||
return kmem_cache_alloc(s, flags);
|
||||
void *ret = kmem_cache_alloc(s, flags);
|
||||
|
||||
kasan_kmalloc(s, ret, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline void *
|
||||
|
@ -333,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|||
gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
{
|
||||
return kmem_cache_alloc_node(s, gfpflags, node);
|
||||
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
|
||||
|
||||
kasan_kmalloc(s, ret, size);
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue