mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
We now have memory organised in a way that allows implementing KASAN. Unlike book3s/64, book3e always has translation active so the only thing needed to use KASAN is to setup an early zero shadow mapping just after setting a stack pointer and before calling early_setup(). The memory layout is now as follows +------------------------+ Kernel virtual map end (0xc000200000000000) | | | 16TB of KASAN map | | | +------------------------+ Kernel KASAN shadow map start | | | 16TB of IO map | | | +------------------------+ Kernel IO map start | | | 16TB of vmemmap | | | +------------------------+ Kernel vmemmap start | | | 16TB of vmap | | | +------------------------+ Kernel virt start (0xc000100000000000) | | | 64TB of linear mem | | | +------------------------+ Kernel linear (0xc.....) Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/0bef8beda27baf71e3b9e8b13e620fba6e19499b.1656427701.git.christophe.leroy@csgroup.eu
84 lines
2.2 KiB
C
84 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_KASAN_H
|
|
#define __ASM_KASAN_H
|
|
|
|
#ifdef CONFIG_KASAN
|
|
#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
|
|
#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
|
|
#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
|
|
#else
|
|
#define _GLOBAL_KASAN(fn) _GLOBAL(fn)
|
|
#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(fn)
|
|
#define EXPORT_SYMBOL_KASAN(fn)
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/page.h>
|
|
#include <linux/sizes.h>
|
|
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
|
|
#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
|
|
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
|
|
#else
|
|
#define KASAN_KERN_START PAGE_OFFSET
|
|
#endif
|
|
|
|
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
|
|
(KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
|
|
|
|
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
|
|
|
|
#ifdef CONFIG_PPC32
|
|
#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
|
|
#elif defined(CONFIG_PPC_BOOK3S_64)
|
|
/*
|
|
* The shadow ends before the highest accessible address
|
|
* because we don't need a shadow for the shadow. Instead:
|
|
* c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
|
|
*/
|
|
#define KASAN_SHADOW_END 0xc00fc00000000000UL
|
|
|
|
#else
|
|
|
|
/*
|
|
* The shadow ends before the highest accessible address
|
|
* because we don't need a shadow for the shadow.
|
|
* But it doesn't hurt to have a shadow for the shadow,
|
|
* keep shadow end aligned eases things.
|
|
*/
|
|
#define KASAN_SHADOW_END 0xc000200000000000UL
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KASAN
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
|
|
|
|
static __always_inline bool kasan_arch_is_ready(void)
|
|
{
|
|
if (static_branch_likely(&powerpc_kasan_enabled_key))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
#define kasan_arch_is_ready kasan_arch_is_ready
|
|
#endif
|
|
|
|
void kasan_early_init(void);
|
|
void kasan_mmu_init(void);
|
|
void kasan_init(void);
|
|
void kasan_late_init(void);
|
|
#else
|
|
static inline void kasan_init(void) { }
|
|
static inline void kasan_mmu_init(void) { }
|
|
static inline void kasan_late_init(void) { }
|
|
#endif
|
|
|
|
void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte);
|
|
int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
|
|
int kasan_init_region(void *start, size_t size);
|
|
|
|
#endif /* __ASSEMBLY */
|
|
#endif
|