Merge remote-tracking branch 'riscv/for-next' into starlight

This commit is contained in:
Emil Renner Berthing 2021-06-07 00:55:22 +02:00
commit 91d02ad770
16 changed files with 246 additions and 98 deletions

View file

@ -83,6 +83,8 @@ config RISCV
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@ -104,6 +106,7 @@ config RISCV
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select UACCESS_MEMCPY if !MMU
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT

View file

@ -29,18 +29,11 @@ struct prev_kprobe {
unsigned int status;
};
/* Single step context for kprobe */
struct kprobe_step_ctx {
unsigned long ss_pending;
unsigned long match_addr;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_status;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
};
void arch_remove_kprobe(struct kprobe *p);

View file

@ -18,6 +18,8 @@
/* RISC-V shim does not initialize PCI bus */
#define pcibios_assign_all_busses() 1
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI

View file

@ -46,8 +46,7 @@ static inline int pud_bad(pud_t pud)
#define pud_leaf pud_leaf
static inline int pud_leaf(pud_t pud)
{
return pud_present(pud) &&
(pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
}
static inline void set_pud(pud_t *pudp, pud_t pud)

View file

@ -39,5 +39,10 @@
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXEC | \
_PAGE_USER | _PAGE_GLOBAL))
/*
* when all of R/W/X are zero, the PTE is a pointer to the next level
* of the page table; otherwise, it is a leaf PTE.
*/
#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#endif /* _ASM_RISCV_PGTABLE_BITS_H */

View file

@ -134,7 +134,8 @@
| _PAGE_WRITE \
| _PAGE_PRESENT \
| _PAGE_ACCESSED \
| _PAGE_DIRTY)
| _PAGE_DIRTY \
| _PAGE_GLOBAL)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
@ -172,10 +173,23 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
/*
* Checking for _PAGE_LEAF is needed too because:
* When splitting a THP, split_huge_page() will temporarily clear
* the present bit, in this situation, pmd_present() and
* pmd_trans_huge() still needs to return true.
*/
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
}
#else
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
#endif
static inline int pmd_none(pmd_t pmd)
{
@ -184,14 +198,13 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
return !pmd_present(pmd);
return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
}
#define pmd_leaf pmd_leaf
static inline int pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) &&
(pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
@ -229,6 +242,11 @@ static inline pte_t pmd_pte(pmd_t pmd)
return __pte(pmd_val(pmd));
}
static inline pte_t pud_pte(pud_t pud)
{
return __pte(pud_val(pud));
}
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
@ -267,8 +285,7 @@ static inline int pte_exec(pte_t pte)
static inline int pte_huge(pte_t pte)
{
return pte_present(pte)
&& (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
}
static inline int pte_dirty(pte_t pte)
@ -371,6 +388,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
local_flush_tlb_page(address);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pte_t *ptep = (pte_t *)pmdp;
update_mmu_cache(vma, address, ptep);
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
@ -464,6 +489,147 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
/*
* THP functions
*/
static inline pmd_t pte_pmd(pte_t pte)
{
return __pmd(pte_val(pte));
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}
#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}
static inline pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
return pfn_pmd(page_to_pfn(page), prot);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return pte_write(pmd_pte(pmd));
}
static inline int pmd_dirty(pmd_t pmd)
{
return pte_dirty(pmd_pte(pmd));
}
static inline int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pte_pmd(pte_mkold(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
return pmd_leaf(pmd);
}
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Encode and decode a swap entry
*
@ -533,7 +699,6 @@ extern uintptr_t _dtb_early_pa;
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
void setup_bootmem(void);
void paging_init(void);
void misc_mem_init(void);

View file

@ -27,7 +27,7 @@ static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0;
#endif
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void protect_kernel_linear_mapping_text_rodata(void);
void __init protect_kernel_linear_mapping_text_rodata(void);
#else
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
#endif

View file

@ -6,6 +6,7 @@
#ifndef _ASM_RISCV_SWITCH_TO_H
#define _ASM_RISCV_SWITCH_TO_H
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@ -55,9 +56,13 @@ static inline void __switch_to_aux(struct task_struct *prev,
fstate_restore(next, task_pt_regs(next));
}
extern bool has_fpu;
extern struct static_key_false cpu_hwcap_fpu;
static __always_inline bool has_fpu(void)
{
return static_branch_likely(&cpu_hwcap_fpu);
}
#else
#define has_fpu false
static __always_inline bool has_fpu(void) { return false; }
#define fstate_save(task, regs) do { } while (0)
#define fstate_restore(task, regs) do { } while (0)
#define __switch_to_aux(__prev, __next) do { } while (0)
@ -70,7 +75,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
if (has_fpu) \
if (has_fpu()) \
__switch_to_aux(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)

View file

@ -19,7 +19,7 @@ unsigned long elf_hwcap __read_mostly;
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
#ifdef CONFIG_FPU
bool has_fpu __read_mostly;
__ro_after_init DEFINE_STATIC_KEY_FALSE(cpu_hwcap_fpu);
#endif
/**
@ -59,7 +59,7 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
void riscv_fill_hwcap(void)
void __init riscv_fill_hwcap(void)
{
struct device_node *node;
const char *isa;
@ -146,6 +146,6 @@ void riscv_fill_hwcap(void)
#ifdef CONFIG_FPU
if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
has_fpu = true;
static_branch_enable(&cpu_hwcap_fpu);
#endif
}

View file

@ -17,7 +17,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
@ -43,7 +43,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
p->ainsn.api.handler((u32)p->opcode,
(unsigned long)p->addr, regs);
post_kprobe_handler(kcb, regs);
post_kprobe_handler(p, kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
@ -151,21 +151,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
regs->status = kcb->saved_status;
}
static void __kprobes
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
{
unsigned long offset = GET_INSN_LENGTH(p->opcode);
kcb->ss_ctx.ss_pending = true;
kcb->ss_ctx.match_addr = addr + offset;
}
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
{
kcb->ss_ctx.ss_pending = false;
kcb->ss_ctx.match_addr = 0;
}
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
@ -184,8 +169,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot, p); /* mark pending ss */
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
@ -221,13 +204,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
}
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
if (!cur)
return;
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->epc = cur->ainsn.api.restore;
@ -359,16 +337,16 @@ bool __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
if ((kcb->ss_ctx.ss_pending)
&& (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
clear_ss_context(kcb); /* clear pending ss */
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
return true;
}
/* not ours, kprobes should ignore it */
return false;
}

View file

@ -87,7 +87,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
regs->status = SR_PIE;
if (has_fpu) {
if (has_fpu()) {
regs->status |= SR_FS_INITIAL;
/*
* Restore the initial value to the FP register

View file

@ -276,7 +276,6 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
efi_init();
setup_bootmem();
paging_init();
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
unflatten_and_copy_device_tree();

View file

@ -90,7 +90,7 @@ static long restore_sigcontext(struct pt_regs *regs,
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
/* Restore the floating-point state. */
if (has_fpu)
if (has_fpu())
err |= restore_fp_state(regs, &sc->sc_fpregs);
return err;
}
@ -143,7 +143,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
/* Save the floating-point state. */
if (has_fpu)
if (has_fpu())
err |= save_fp_state(regs, &sc->sc_fpregs);
return err;
}

View file

@ -213,7 +213,7 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
set_mm_noasid(mm);
}
static int asids_init(void)
static int __init asids_init(void)
{
unsigned long old;
@ -280,11 +280,12 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
* cache flush to be performed before execution resumes on each hart. This
* actually performs that local instruction cache flush, which implicitly only
* refers to the current hart.
*
* The "cpu" argument must be the current local CPU number.
*/
static inline void flush_icache_deferred(struct mm_struct *mm)
static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu)
{
#ifdef CONFIG_SMP
unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
@ -320,5 +321,5 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
set_mm(next, cpu);
flush_icache_deferred(next);
flush_icache_deferred(next, cpu);
}

View file

@ -33,6 +33,7 @@ unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
EXPORT_SYMBOL(kernel_virt_addr);
#ifdef CONFIG_XIP_KERNEL
#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr)))
extern char _xiprom[], _exiprom[];
#endif
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
@ -53,7 +54,7 @@ struct pt_alloc_ops {
#endif
};
static phys_addr_t dma32_phys_limit __ro_after_init;
static phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
{
@ -67,11 +68,6 @@ static void __init zone_sizes_init(void)
free_area_init(max_zone_pfns);
}
static void __init setup_zero_page(void)
{
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
@ -119,7 +115,7 @@ void __init mem_init(void)
print_vm_layout();
}
void __init setup_bootmem(void)
static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
@ -176,15 +172,8 @@ void __init setup_bootmem(void)
memblock_allow_resize();
}
#ifdef CONFIG_XIP_KERNEL
extern char _xiprom[], _exiprom[];
extern char _sdata[], _edata[];
#endif /* CONFIG_XIP_KERNEL */
#ifdef CONFIG_MMU
static struct pt_alloc_ops _pt_ops __ro_after_init;
static struct pt_alloc_ops _pt_ops __initdata;
#ifdef CONFIG_XIP_KERNEL
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
@ -200,13 +189,13 @@ EXPORT_SYMBOL(va_pa_offset);
#endif
/* Offset between kernel mapping virtual address and kernel load address */
#ifdef CONFIG_64BIT
unsigned long va_kernel_pa_offset;
unsigned long va_kernel_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_kernel_pa_offset);
#endif
#ifdef CONFIG_XIP_KERNEL
#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
#endif
unsigned long va_kernel_xip_pa_offset;
unsigned long va_kernel_xip_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_kernel_xip_pa_offset);
#ifdef CONFIG_XIP_KERNEL
#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
@ -216,7 +205,7 @@ EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
@ -253,7 +242,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
static inline pte_t *get_pte_virt_late(phys_addr_t pa)
static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
@ -272,7 +261,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pte_late(uintptr_t va)
static phys_addr_t __init alloc_pte_late(uintptr_t va)
{
unsigned long vaddr;
@ -296,10 +285,10 @@ static void __init create_pte_mapping(pte_t *ptep,
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@ -319,7 +308,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
static pmd_t *get_pmd_virt_late(phys_addr_t pa)
static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
@ -336,7 +325,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pmd_late(uintptr_t va)
static phys_addr_t __init alloc_pmd_late(uintptr_t va)
{
unsigned long vaddr;
@ -454,14 +443,16 @@ asmlinkage void __init __copy_data(void)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
uintptr_t load_pa, load_sz;
static uintptr_t load_pa __initdata;
static uintptr_t load_sz __initdata;
#ifdef CONFIG_XIP_KERNEL
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
#endif
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom, xiprom_sz;
static uintptr_t xiprom __inidata;
static uintptr_t xiprom_sz __initdata;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
@ -646,7 +637,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
}
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void protect_kernel_linear_mapping_text_rodata(void)
void __init protect_kernel_linear_mapping_text_rodata(void)
{
unsigned long text_start = (unsigned long)lm_alias(_start);
unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
@ -858,7 +849,7 @@ static void __init reserve_crashkernel(void)
* reserved once we call early_init_fdt_scan_reserved_mem()
* later on.
*/
static int elfcore_hdr_setup(struct reserved_mem *rmem)
static int __init elfcore_hdr_setup(struct reserved_mem *rmem)
{
elfcorehdr_addr = rmem->base;
elfcorehdr_size = rmem->size;
@ -870,8 +861,8 @@ RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup);
void __init paging_init(void)
{
setup_bootmem();
setup_vm_final();
setup_zero_page();
}
void __init misc_mem_init(void)

View file

@ -15,7 +15,7 @@ void flush_tlb_all(void)
* Kernel may panic if cmask is NULL.
*/
static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
unsigned long size)
unsigned long size, unsigned long stride)
{
struct cpumask hmask;
unsigned int cpuid;
@ -27,7 +27,7 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
/* local cpu is the only cpu present in cpumask */
if (size <= PAGE_SIZE)
if (size <= stride)
local_flush_tlb_page(start);
else
local_flush_tlb_all();
@ -41,16 +41,23 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm)
{
__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1, PAGE_SIZE);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE);
}
#endif