mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEq5lC5tSkz8NBJiCnSfxwEqXeA64FAmOU+U8ACgkQSfxwEqXe A67NnQ//Y5DltmvibyPd7r1TFT2gUYv+Rx3sUV9ZE1NYptd/SWhhcL8c5FZ70Fuw bSKCa1uiWjOxosjXT1kGrWq3de7q7oUpAPSOGxgxzoaNURIt58N/ajItCX/4Au8I RlGAScHy5e5t41/26a498kB6qJ441fBEqCYKQpPLINMBAhe8TQ+NVp0rlpUwNHFX WrUGg4oKWxdBIW3HkDirQjJWDkkAiklRTifQh/Al4b6QDbOnRUGGCeckNOhixsvS waHWTld+Td8jRrA4b82tUb2uVZ2/b8dEvj/A8CuTv4yC0lywoyMgBWmJAGOC+UmT ZVNdGW02Jc2T+Iap8ZdsEmeLHNqbli4+IcbY5xNlov+tHJ2oz41H9TZoYKbudlr6 /ReAUPSn7i50PhbQlEruj3eg+M2gjOeh8OF8UKwwRK8PghvyWQ1ScW0l3kUhPIhI PdIG6j4+D2mJc1FIj2rTVB+Bg933x6S+qx4zDxGlNp62AARUFYf6EgyD6aXFQVuX RxcKb6cjRuFkzFiKc8zkqg5edZH+IJcPNuIBmABqTGBOxbZWURXzIQvK/iULqZa4 CdGAFIs6FuOh8pFHLI3R4YoHBopbHup/xKDEeAO9KZGyeVIuOSERDxxo5f/ITzcq APvT77DFOEuyvanr8RMqqh0yUjzcddXqw9+ieufsAyDwjD9DTuE= =QRhK -----END PGP SIGNATURE----- Merge tag 'random-6.2-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random Pull random number generator updates from Jason Donenfeld: - Replace prandom_u32_max() and various open-coded variants of it, there is now a new family of functions that uses fast rejection sampling to choose properly uniformly random numbers within an interval: get_random_u32_below(ceil) - [0, ceil) get_random_u32_above(floor) - (floor, U32_MAX] get_random_u32_inclusive(floor, ceil) - [floor, ceil] Coccinelle was used to convert all current users of prandom_u32_max(), as well as many open-coded patterns, resulting in improvements throughout the tree. I'll have a "late" 6.1-rc1 pull for you that removes the now unused prandom_u32_max() function, just in case any other trees add a new use case of it that needs to converted. According to linux-next, there may be two trivial cases of prandom_u32_max() reintroductions that are fixable with a 's/.../.../'. So I'll have for you a final conversion patch doing that alongside the removal patch during the second week. This is a treewide change that touches many files throughout. - More consistent use of get_random_canary(). - Updates to comments, documentation, tests, headers, and simplification in configuration. - The arch_get_random*_early() abstraction was only used by arm64 and wasn't entirely useful, so this has been replaced by code that works in all relevant contexts. - The kernel will use and manage random seeds in non-volatile EFI variables, refreshing a variable with a fresh seed when the RNG is initialized. The RNG GUID namespace is then hidden from efivarfs to prevent accidental leakage. These changes are split into random.c infrastructure code used in the EFI subsystem, in this pull request, and related support inside of EFISTUB, in Ard's EFI tree. These are co-dependent for full functionality, but the order of merging doesn't matter. - Part of the infrastructure added for the EFI support is also used for an improvement to the way vsprintf initializes its siphash key, replacing an sleep loop wart. - The hardware RNG framework now always calls its correct random.c input function, add_hwgenerator_randomness(), rather than sometimes going through helpers better suited for other cases. - The add_latent_entropy() function has long been called from the fork handler, but is a no-op when the latent entropy gcc plugin isn't used, which is fine for the purposes of latent entropy. But it was missing out on the cycle counter that was also being mixed in beside the latent entropy variable. So now, if the latent entropy gcc plugin isn't enabled, add_latent_entropy() will expand to a call to add_device_randomness(NULL, 0), which adds a cycle counter, without the absent latent entropy variable. - The RNG is now reseeded from a delayed worker, rather than on demand when used. Always running from a worker allows it to make use of the CPU RNG on platforms like S390x, whose instructions are too slow to do so from interrupts. It also has the effect of adding in new inputs more frequently with more regularity, amounting to a long term transcript of random values. Plus, it helps a bit with the upcoming vDSO implementation (which isn't yet ready for 6.2). - The jitter entropy algorithm now tries to execute on many different CPUs, round-robining, in hopes of hitting even more memory latencies and other unpredictable effects. It also will mix in a cycle counter when the entropy timer fires, in addition to being mixed in from the main loop, to account more explicitly for fluctuations in that timer firing. And the state it touches is now kept within the same cache line, so that it's assured that the different execution contexts will cause latencies. * tag 'random-6.2-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: (23 commits) random: include <linux/once.h> in the right header random: align entropy_timer_state to cache line random: mix in cycle counter when jitter timer fires random: spread out jitter callback to different CPUs random: remove extraneous period and add a missing one in comments efi: random: refresh non-volatile random seed when RNG is initialized vsprintf: initialize siphash key using notifier random: add back async readiness notifier random: reseed in delayed work rather than on-demand random: always mix cycle counter in add_latent_entropy() hw_random: use add_hwgenerator_randomness() for early entropy random: modernize documentation comment on get_random_bytes() random: adjust comment to account for removed function random: remove early archrandom abstraction random: use random.trust_{bootloader,cpu} command line option only stackprotector: actually use get_random_canary() stackprotector: move get_random_canary() into stackprotector.h treewide: use get_random_u32_inclusive() when possible treewide: use get_random_u32_{above,below}() instead of manual loop treewide: use get_random_u32_below() instead of deprecated function ...
435 lines
11 KiB
C
435 lines
11 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright 2007 Andi Kleen, SUSE Labs.
|
|
*
|
|
* This contains most of the x86 vDSO kernel-side code.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/err.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/init.h>
|
|
#include <linux/random.h>
|
|
#include <linux/elf.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/time_namespace.h>
|
|
|
|
#include <asm/pvclock.h>
|
|
#include <asm/vgtod.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/vvar.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/page.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <clocksource/hyperv_timer.h>
|
|
|
|
#undef _ASM_X86_VVAR_H
|
|
#define EMIT_VVAR(name, offset) \
|
|
const size_t name ## _offset = offset;
|
|
#include <asm/vvar.h>
|
|
|
|
struct vdso_data *arch_get_vdso_data(void *vvar_page)
|
|
{
|
|
return (struct vdso_data *)(vvar_page + _vdso_data_offset);
|
|
}
|
|
#undef EMIT_VVAR
|
|
|
|
unsigned int vclocks_used __read_mostly;
|
|
|
|
#if defined(CONFIG_X86_64)
|
|
unsigned int __read_mostly vdso64_enabled = 1;
|
|
#endif
|
|
|
|
void __init init_vdso_image(const struct vdso_image *image)
|
|
{
|
|
BUG_ON(image->size % PAGE_SIZE != 0);
|
|
|
|
apply_alternatives((struct alt_instr *)(image->data + image->alt),
|
|
(struct alt_instr *)(image->data + image->alt +
|
|
image->alt_len));
|
|
}
|
|
|
|
static const struct vm_special_mapping vvar_mapping;
|
|
struct linux_binprm;
|
|
|
|
static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
|
|
struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
{
|
|
const struct vdso_image *image = vma->vm_mm->context.vdso_image;
|
|
|
|
if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
|
|
get_page(vmf->page);
|
|
return 0;
|
|
}
|
|
|
|
static void vdso_fix_landing(const struct vdso_image *image,
|
|
struct vm_area_struct *new_vma)
|
|
{
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
if (in_ia32_syscall() && image == &vdso_image_32) {
|
|
struct pt_regs *regs = current_pt_regs();
|
|
unsigned long vdso_land = image->sym_int80_landing_pad;
|
|
unsigned long old_land_addr = vdso_land +
|
|
(unsigned long)current->mm->context.vdso;
|
|
|
|
/* Fixing userspace landing - look at do_fast_syscall_32 */
|
|
if (regs->ip == old_land_addr)
|
|
regs->ip = new_vma->vm_start + vdso_land;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static int vdso_mremap(const struct vm_special_mapping *sm,
|
|
struct vm_area_struct *new_vma)
|
|
{
|
|
const struct vdso_image *image = current->mm->context.vdso_image;
|
|
|
|
vdso_fix_landing(image, new_vma);
|
|
current->mm->context.vdso = (void __user *)new_vma->vm_start;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_TIME_NS
|
|
/*
|
|
* The vvar page layout depends on whether a task belongs to the root or
|
|
* non-root time namespace. Whenever a task changes its namespace, the VVAR
|
|
* page tables are cleared and then they will re-faulted with a
|
|
* corresponding layout.
|
|
* See also the comment near timens_setup_vdso_data() for details.
|
|
*/
|
|
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
|
{
|
|
struct mm_struct *mm = task->mm;
|
|
struct vm_area_struct *vma;
|
|
VMA_ITERATOR(vmi, mm, 0);
|
|
|
|
mmap_read_lock(mm);
|
|
for_each_vma(vmi, vma) {
|
|
unsigned long size = vma->vm_end - vma->vm_start;
|
|
|
|
if (vma_is_special_mapping(vma, &vvar_mapping))
|
|
zap_page_range(vma, vma->vm_start, size);
|
|
}
|
|
mmap_read_unlock(mm);
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
|
struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
{
|
|
const struct vdso_image *image = vma->vm_mm->context.vdso_image;
|
|
unsigned long pfn;
|
|
long sym_offset;
|
|
|
|
if (!image)
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
|
|
image->sym_vvar_start;
|
|
|
|
/*
|
|
* Sanity check: a symbol offset of zero means that the page
|
|
* does not exist for this vdso image, not that the page is at
|
|
* offset zero relative to the text mapping. This should be
|
|
* impossible here, because sym_offset should only be zero for
|
|
* the page past the end of the vvar mapping.
|
|
*/
|
|
if (sym_offset == 0)
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
if (sym_offset == image->sym_vvar_page) {
|
|
struct page *timens_page = find_timens_vvar_page(vma);
|
|
|
|
pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
|
|
|
|
/*
|
|
* If a task belongs to a time namespace then a namespace
|
|
* specific VVAR is mapped with the sym_vvar_page offset and
|
|
* the real VVAR page is mapped with the sym_timens_page
|
|
* offset.
|
|
* See also the comment near timens_setup_vdso_data().
|
|
*/
|
|
if (timens_page) {
|
|
unsigned long addr;
|
|
vm_fault_t err;
|
|
|
|
/*
|
|
* Optimization: inside time namespace pre-fault
|
|
* VVAR page too. As on timens page there are only
|
|
* offsets for clocks on VVAR, it'll be faulted
|
|
* shortly by VDSO code.
|
|
*/
|
|
addr = vmf->address + (image->sym_timens_page - sym_offset);
|
|
err = vmf_insert_pfn(vma, addr, pfn);
|
|
if (unlikely(err & VM_FAULT_ERROR))
|
|
return err;
|
|
|
|
pfn = page_to_pfn(timens_page);
|
|
}
|
|
|
|
return vmf_insert_pfn(vma, vmf->address, pfn);
|
|
} else if (sym_offset == image->sym_pvclock_page) {
|
|
struct pvclock_vsyscall_time_info *pvti =
|
|
pvclock_get_pvti_cpu0_va();
|
|
if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
|
|
return vmf_insert_pfn_prot(vma, vmf->address,
|
|
__pa(pvti) >> PAGE_SHIFT,
|
|
pgprot_decrypted(vma->vm_page_prot));
|
|
}
|
|
} else if (sym_offset == image->sym_hvclock_page) {
|
|
pfn = hv_get_tsc_pfn();
|
|
|
|
if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
|
|
return vmf_insert_pfn(vma, vmf->address, pfn);
|
|
} else if (sym_offset == image->sym_timens_page) {
|
|
struct page *timens_page = find_timens_vvar_page(vma);
|
|
|
|
if (!timens_page)
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
|
|
return vmf_insert_pfn(vma, vmf->address, pfn);
|
|
}
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
static const struct vm_special_mapping vdso_mapping = {
|
|
.name = "[vdso]",
|
|
.fault = vdso_fault,
|
|
.mremap = vdso_mremap,
|
|
};
|
|
static const struct vm_special_mapping vvar_mapping = {
|
|
.name = "[vvar]",
|
|
.fault = vvar_fault,
|
|
};
|
|
|
|
/*
|
|
* Add vdso and vvar mappings to current process.
|
|
* @image - blob to map
|
|
* @addr - request a specific address (zero to map at free addr)
|
|
*/
|
|
static int map_vdso(const struct vdso_image *image, unsigned long addr)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long text_start;
|
|
int ret = 0;
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
addr = get_unmapped_area(NULL, addr,
|
|
image->size - image->sym_vvar_start, 0, 0);
|
|
if (IS_ERR_VALUE(addr)) {
|
|
ret = addr;
|
|
goto up_fail;
|
|
}
|
|
|
|
text_start = addr - image->sym_vvar_start;
|
|
|
|
/*
|
|
* MAYWRITE to allow gdb to COW and set breakpoints
|
|
*/
|
|
vma = _install_special_mapping(mm,
|
|
text_start,
|
|
image->size,
|
|
VM_READ|VM_EXEC|
|
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
|
&vdso_mapping);
|
|
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto up_fail;
|
|
}
|
|
|
|
vma = _install_special_mapping(mm,
|
|
addr,
|
|
-image->sym_vvar_start,
|
|
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
|
|
VM_PFNMAP,
|
|
&vvar_mapping);
|
|
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
do_munmap(mm, text_start, image->size, NULL);
|
|
} else {
|
|
current->mm->context.vdso = (void __user *)text_start;
|
|
current->mm->context.vdso_image = image;
|
|
}
|
|
|
|
up_fail:
|
|
mmap_write_unlock(mm);
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
* Put the vdso above the (randomized) stack with another randomized
|
|
* offset. This way there is no hole in the middle of address space.
|
|
* To save memory make sure it is still in the same PTE as the stack
|
|
* top. This doesn't give that many random bits.
|
|
*
|
|
* Note that this algorithm is imperfect: the distribution of the vdso
|
|
* start address within a PMD is biased toward the end.
|
|
*
|
|
* Only used for the 64-bit and x32 vdsos.
|
|
*/
|
|
static unsigned long vdso_addr(unsigned long start, unsigned len)
|
|
{
|
|
unsigned long addr, end;
|
|
unsigned offset;
|
|
|
|
/*
|
|
* Round up the start address. It can start out unaligned as a result
|
|
* of stack start randomization.
|
|
*/
|
|
start = PAGE_ALIGN(start);
|
|
|
|
/* Round the lowest possible end address up to a PMD boundary. */
|
|
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
|
|
if (end >= TASK_SIZE_MAX)
|
|
end = TASK_SIZE_MAX;
|
|
end -= len;
|
|
|
|
if (end > start) {
|
|
offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
|
|
addr = start + (offset << PAGE_SHIFT);
|
|
} else {
|
|
addr = start;
|
|
}
|
|
|
|
/*
|
|
* Forcibly align the final address in case we have a hardware
|
|
* issue that requires alignment for performance reasons.
|
|
*/
|
|
addr = align_vdso_addr(addr);
|
|
|
|
return addr;
|
|
}
|
|
|
|
static int map_vdso_randomized(const struct vdso_image *image)
|
|
{
|
|
unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
|
|
|
|
return map_vdso(image, addr);
|
|
}
|
|
#endif
|
|
|
|
int map_vdso_once(const struct vdso_image *image, unsigned long addr)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
VMA_ITERATOR(vmi, mm, 0);
|
|
|
|
mmap_write_lock(mm);
|
|
/*
|
|
* Check if we have already mapped vdso blob - fail to prevent
|
|
* abusing from userspace install_special_mapping, which may
|
|
* not do accounting and rlimit right.
|
|
* We could search vma near context.vdso, but it's a slowpath,
|
|
* so let's explicitly check all VMAs to be completely sure.
|
|
*/
|
|
for_each_vma(vmi, vma) {
|
|
if (vma_is_special_mapping(vma, &vdso_mapping) ||
|
|
vma_is_special_mapping(vma, &vvar_mapping)) {
|
|
mmap_write_unlock(mm);
|
|
return -EEXIST;
|
|
}
|
|
}
|
|
mmap_write_unlock(mm);
|
|
|
|
return map_vdso(image, addr);
|
|
}
|
|
|
|
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
|
static int load_vdso32(void)
|
|
{
|
|
if (vdso32_enabled != 1) /* Other values all mean "disabled" */
|
|
return 0;
|
|
|
|
return map_vdso(&vdso_image_32, 0);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
{
|
|
if (!vdso64_enabled)
|
|
return 0;
|
|
|
|
return map_vdso_randomized(&vdso_image_64);
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
int uses_interp, bool x32)
|
|
{
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
if (x32) {
|
|
if (!vdso64_enabled)
|
|
return 0;
|
|
return map_vdso_randomized(&vdso_image_x32);
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
return load_vdso32();
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
#endif
|
|
#else
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
{
|
|
return load_vdso32();
|
|
}
|
|
#endif
|
|
|
|
bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
|
|
{
|
|
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
|
const struct vdso_image *image = current->mm->context.vdso_image;
|
|
unsigned long vdso = (unsigned long) current->mm->context.vdso;
|
|
|
|
if (in_ia32_syscall() && image == &vdso_image_32) {
|
|
if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
|
|
regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
|
|
return true;
|
|
}
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
static __init int vdso_setup(char *s)
|
|
{
|
|
vdso64_enabled = simple_strtoul(s, NULL, 0);
|
|
return 1;
|
|
}
|
|
__setup("vdso=", vdso_setup);
|
|
|
|
static int __init init_vdso(void)
|
|
{
|
|
BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
|
|
|
|
init_vdso_image(&vdso_image_64);
|
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
init_vdso_image(&vdso_image_x32);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(init_vdso);
|
|
#endif /* CONFIG_X86_64 */
|