mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-29 02:24:04 +00:00
Huge page backed vmalloc memory could benefit performance in many cases.
However, some users of vmalloc may not be ready to handle huge pages for
various reasons: hardware constraints, potential pages split, etc.
VM_NO_HUGE_VMAP was introduced to allow vmalloc users to opt-out huge
pages. However, it is not easy to track down all the users that require
the opt-out, as the allocation are passed different stacks and may cause
issues in different layers.
To address this issue, replace VM_NO_HUGE_VMAP with an opt-in flag,
VM_ALLOW_HUGE_VMAP, so that users that benefit from huge pages could ask
specificially.
Also, remove vmalloc_no_huge() and add opt-in helper vmalloc_huge().
Fixes: fac54e2bfb
("x86/Kconfig: Select HAVE_ARCH_HUGE_VMALLOC with HAVE_ARCH_HUGE_VMAP")
Link: https://lore.kernel.org/netdev/14444103-d51b-0fb3-ee63-c3f182f0b546@molgen.mpg.de/"
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Song Liu <song@kernel.org>
Reviewed-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
127 lines
3.5 KiB
C
127 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Kernel module help for powerpc.
|
|
Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
|
|
Copyright (C) 2008 Freescale Semiconductor, Inc.
|
|
|
|
*/
|
|
#include <linux/elf.h>
|
|
#include <linux/moduleloader.h>
|
|
#include <linux/err.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/bug.h>
|
|
#include <asm/module.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/firmware.h>
|
|
#include <linux/sort.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/sections.h>
|
|
|
|
static LIST_HEAD(module_bug_list);
|
|
|
|
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
|
|
const Elf_Shdr *sechdrs,
|
|
const char *name)
|
|
{
|
|
char *secstrings;
|
|
unsigned int i;
|
|
|
|
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
|
for (i = 1; i < hdr->e_shnum; i++)
|
|
if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
|
|
return &sechdrs[i];
|
|
return NULL;
|
|
}
|
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
const Elf_Shdr *sechdrs, struct module *me)
|
|
{
|
|
const Elf_Shdr *sect;
|
|
int rc;
|
|
|
|
rc = module_finalize_ftrace(me, sechdrs);
|
|
if (rc)
|
|
return rc;
|
|
|
|
/* Apply feature fixups */
|
|
sect = find_section(hdr, sechdrs, "__ftr_fixup");
|
|
if (sect != NULL)
|
|
do_feature_fixups(cur_cpu_spec->cpu_features,
|
|
(void *)sect->sh_addr,
|
|
(void *)sect->sh_addr + sect->sh_size);
|
|
|
|
sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
|
|
if (sect != NULL)
|
|
do_feature_fixups(cur_cpu_spec->mmu_features,
|
|
(void *)sect->sh_addr,
|
|
(void *)sect->sh_addr + sect->sh_size);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
|
|
if (sect != NULL)
|
|
do_feature_fixups(powerpc_firmware_features,
|
|
(void *)sect->sh_addr,
|
|
(void *)sect->sh_addr + sect->sh_size);
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
#ifdef PPC64_ELF_ABI_v1
|
|
sect = find_section(hdr, sechdrs, ".opd");
|
|
if (sect != NULL) {
|
|
me->arch.start_opd = sect->sh_addr;
|
|
me->arch.end_opd = sect->sh_addr + sect->sh_size;
|
|
}
|
|
#endif /* PPC64_ELF_ABI_v1 */
|
|
|
|
#ifdef CONFIG_PPC_BARRIER_NOSPEC
|
|
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
|
|
if (sect != NULL)
|
|
do_barrier_nospec_fixups_range(barrier_nospec_enabled,
|
|
(void *)sect->sh_addr,
|
|
(void *)sect->sh_addr + sect->sh_size);
|
|
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
|
|
|
|
sect = find_section(hdr, sechdrs, "__lwsync_fixup");
|
|
if (sect != NULL)
|
|
do_lwsync_fixups(cur_cpu_spec->cpu_features,
|
|
(void *)sect->sh_addr,
|
|
(void *)sect->sh_addr + sect->sh_size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline void *
|
|
__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
|
|
{
|
|
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
|
|
gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
|
|
|
|
/*
|
|
* Don't do huge page allocations for modules yet until more testing
|
|
* is done. STRICT_MODULE_RWX may require extra work to support this
|
|
* too.
|
|
*/
|
|
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
|
|
VM_FLUSH_RESET_PERMS,
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
}
|
|
|
|
void *module_alloc(unsigned long size)
|
|
{
|
|
#ifdef MODULES_VADDR
|
|
unsigned long limit = (unsigned long)_etext - SZ_32M;
|
|
void *ptr = NULL;
|
|
|
|
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
|
|
|
/* First try within 32M limit from _etext to avoid branch trampolines */
|
|
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
|
|
ptr = __module_alloc(size, limit, MODULES_END, true);
|
|
|
|
if (!ptr)
|
|
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
|
|
|
|
return ptr;
|
|
#else
|
|
return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
|
|
#endif
|
|
}
|