From 0710442a88d1c646d37ac83c52de85f456e99171 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 14 Mar 2021 20:26:50 -0500 Subject: [PATCH 1/9] arm64: csum: cast to the proper type The last line of ip_fast_csum() calls csum_fold(), forcing the type of the argument passed to be u32. But csum_fold() takes a __wsum argument (which is __u32 __bitwise for arm64). As long as we're forcing the cast, cast it to the right type. Signed-off-by: Alex Elder Acked-by: Robin Murphy Link: https://lore.kernel.org/r/20210315012650.1221328-1-elder@linaro.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/checksum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 93a161b3bf3f..dc52b733675d 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); - return csum_fold((__force u32)(sum >> 32)); + return csum_fold((__force __wsum)(sum >> 32)); } #define ip_fast_csum ip_fast_csum From c607ab4f916d4d5259072eca34055d3f5a795c21 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 19 Mar 2021 18:41:06 +0000 Subject: [PATCH 2/9] arm64: stacktrace: don't trace arch_stack_walk() We recently converted arm64 to use arch_stack_walk() in commit: 5fc57df2f6fd ("arm64: stacktrace: Convert to ARCH_STACKWALK") The core stacktrace code expects that (when tracing the current task) arch_stack_walk() starts a trace at its caller, and does not include itself in the trace. However, arm64's arch_stack_walk() includes itself, and so traces include one more entry than callers expect. The core stacktrace code which calls arch_stack_walk() tries to skip a number of entries to prevent itself appearing in a trace, and the additional entry prevents skipping one of the core stacktrace functions, leaving this in the trace unexpectedly. We can fix this by having arm64's arch_stack_walk() begin the trace with its caller. The first value returned by the trace will be __builtin_return_address(0), i.e. the caller of arch_stack_walk(). The first frame record to be unwound will be __builtin_frame_address(1), i.e. the caller's frame record. To prevent surprises, arch_stack_walk() is also marked noinline. While __builtin_frame_address(1) is not safe in portable code, local GCC developers have confirmed that it is safe on arm64. To find the caller's frame record, the builtin can safely dereference the current function's frame record or (in theory) could stash the original FP into another GPR at function entry time, neither of which are problematic. Prior to this patch, the tracing code would unexpectedly show up in traces of the current task, e.g. | # cat /proc/self/stack | [<0>] stack_trace_save_tsk+0x98/0x100 | [<0>] proc_pid_stack+0xb4/0x130 | [<0>] proc_single_show+0x60/0x110 | [<0>] seq_read_iter+0x230/0x4d0 | [<0>] seq_read+0xdc/0x130 | [<0>] vfs_read+0xac/0x1e0 | [<0>] ksys_read+0x6c/0xfc | [<0>] __arm64_sys_read+0x20/0x30 | [<0>] el0_svc_common.constprop.0+0x60/0x120 | [<0>] do_el0_svc+0x24/0x90 | [<0>] el0_svc+0x2c/0x54 | [<0>] el0_sync_handler+0x1a4/0x1b0 | [<0>] el0_sync+0x170/0x180 After this patch, the tracing code will not show up in such traces: | # cat /proc/self/stack | [<0>] proc_pid_stack+0xb4/0x130 | [<0>] proc_single_show+0x60/0x110 | [<0>] seq_read_iter+0x230/0x4d0 | [<0>] seq_read+0xdc/0x130 | [<0>] vfs_read+0xac/0x1e0 | [<0>] ksys_read+0x6c/0xfc | [<0>] __arm64_sys_read+0x20/0x30 | [<0>] el0_svc_common.constprop.0+0x60/0x120 | [<0>] do_el0_svc+0x24/0x90 | [<0>] el0_svc+0x2c/0x54 | [<0>] el0_sync_handler+0x1a4/0x1b0 | [<0>] el0_sync+0x170/0x180 Erring on the side of caution, I've given this a spin with a bunch of toolchains, verifying the output of /proc/self/stack and checking that the assembly looked sound. For GCC (where we require version 5.1.0 or later) I tested with the kernel.org crosstool binares for versions 5.5.0, 6.4.0, 6.5.0, 7.3.0, 7.5.0, 8.1.0, 8.3.0, 8.4.0, 9.2.0, and 10.1.0. For clang (where we require version 10.0.1 or later) I tested with the llvm.org binary releases of 11.0.0, and 11.0.1. Fixes: 5fc57df2f6fd ("arm64: stacktrace: Convert to ARCH_STACKWALK") Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Chen Jun Cc: Marco Elver Cc: Mark Brown Cc: Will Deacon Cc: # 5.10.x Reviewed-by: Catalin Marinas Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20210319184106.5688-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ad20981dfda4..d55bdfb7789c 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) #ifdef CONFIG_STACKTRACE -void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, - struct task_struct *task, struct pt_regs *regs) +noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) { struct stackframe frame; @@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, start_backtrace(&frame, regs->regs[29], regs->pc); else if (task == current) start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)arch_stack_walk); + (unsigned long)__builtin_frame_address(1), + (unsigned long)__builtin_return_address(0)); else start_backtrace(&frame, thread_saved_fp(task), thread_saved_pc(task)); From e14a371f732b969d4dc881bdd874c93f1b4fdd30 Mon Sep 17 00:00:00 2001 From: Tom Saeger Date: Tue, 16 Mar 2021 12:50:41 -0600 Subject: [PATCH 3/9] Documentation: arm64/acpi : clarify arm64 support of IBFT In commit 94bccc340710 ("iscsi_ibft: make ISCSI_IBFT dependson ACPI instead of ISCSI_IBFT_FIND") Kconfig was disentangled to make ISCSI_IBFT selection not depend on x86. Update arm64 acpi documentation, changing IBFT support status from "Not Supported" to "Optional". Opportunistically re-flow paragraph for changed lines. Link: https://lore.kernel.org/lkml/1563475054-10680-1-git-send-email-thomas.tai@oracle.com/ Signed-off-by: Tom Saeger Reviewed-by: Konrad Rzeszutek Wilk Acked-by: Ard Biesheuvel Acked-by: Lorenzo Pieralisi Link: https://lore.kernel.org/r/9efc652df2b8d6b53d9acb170eb7c9ca3938dfef.1615920441.git.tom.saeger@oracle.com Signed-off-by: Will Deacon --- Documentation/arm64/acpi_object_usage.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/arm64/acpi_object_usage.rst b/Documentation/arm64/acpi_object_usage.rst index 377e9d224db0..0609da73970b 100644 --- a/Documentation/arm64/acpi_object_usage.rst +++ b/Documentation/arm64/acpi_object_usage.rst @@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories: - Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT - - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT, - MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO, - TCPA, TPM2, UEFI, XENV + - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT, + IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, + STAO, TCPA, TPM2, UEFI, XENV - - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT, - MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT + - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx, + PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT ====== ======================================================================== Table Usage for ARMv8 Linux From d1296f1265f7ebb66c2bfab387bc1a0f969a5968 Mon Sep 17 00:00:00 2001 From: Bhaskar Chowdhury Date: Sat, 20 Mar 2021 03:58:48 +0530 Subject: [PATCH 4/9] arm64: cpuinfo: Fix a typo s/acurate/accurate/ Signed-off-by: Bhaskar Chowdhury Acked-by: Randy Dunlap Link: https://lore.kernel.org/r/20210319222848.29928-1-unixbhaskar@gmail.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 77605aec25fe..51fcf99d5351 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) * with the CLIDR_EL1 fields to avoid triggering false warnings * when there is a mismatch across the CPUs. Keep track of the * effective value of the CTR_EL0 in our internal records for - * acurate sanity check and feature enablement. + * accurate sanity check and feature enablement. */ info->reg_ctr = read_cpuid_effective_cachetype(); info->reg_dczid = read_cpuid(DCZID_EL0); From 141f8202cfa4192c3af79b6cbd68e7760bb01b5a Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Fri, 19 Mar 2021 16:50:54 -0400 Subject: [PATCH 5/9] arm64: kdump: update ppos when reading elfcorehdr The ppos points to a position in the old kernel memory (and in case of arm64 in the crash kernel since elfcorehdr is passed as a segment). The function should update the ppos by the amount that was read. This bug is not exposed by accident, but other platforms update this value properly. So, fix it in ARM64 version of elfcorehdr_read() as well. Signed-off-by: Pavel Tatashin Fixes: e62aaeac426a ("arm64: kdump: provide /proc/vmcore file") Reviewed-by: Tyler Hicks Link: https://lore.kernel.org/r/20210319205054.743368-1-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/crash_dump.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c index e6e284265f19..58303a9ec32c 100644 --- a/arch/arm64/kernel/crash_dump.c +++ b/arch/arm64/kernel/crash_dump.c @@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); + *ppos += count; + return count; } From ee7febce051945be28ad86d16a15886f878204de Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Tue, 16 Feb 2021 10:03:51 -0500 Subject: [PATCH 6/9] arm64: mm: correct the inside linear map range during hotplug check Memory hotplug may fail on systems with CONFIG_RANDOMIZE_BASE because the linear map range is not checked correctly. The start physical address that linear map covers can be actually at the end of the range because of randomization. Check that and if so reduce it to 0. This can be verified on QEMU with setting kaslr-seed to ~0ul: memstart_offset_seed = 0xffff START: __pa(_PAGE_OFFSET(vabits_actual)) = ffff9000c0000000 END: __pa(PAGE_END - 1) = 1000bfffffff Signed-off-by: Pavel Tatashin Fixes: 58284a901b42 ("arm64/mm: Validate hotplug range before creating linear mapping") Tested-by: Tyler Hicks Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20210216150351.129018-2-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7484ea4f6ba0..5d9550fdb9cf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) struct range arch_get_mappable_range(void) { struct range mhp_range; + u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); + u64 end_linear_pa = __pa(PAGE_END - 1); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + /* + * Check for a wrap, it is possible because of randomized linear + * mapping the start physical address is actually bigger than + * the end physical address. In this case set start to zero + * because [0, end_linear_pa] range must still be able to cover + * all addressable physical addresses. + */ + if (start_linear_pa > end_linear_pa) + start_linear_pa = 0; + } + + WARN_ON(start_linear_pa > end_linear_pa); /* * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] @@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void) * range which can be mapped inside this linear mapping range, must * also be derived from its end points. */ - mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); - mhp_range.end = __pa(PAGE_END - 1); + mhp_range.start = start_linear_pa; + mhp_range.end = end_linear_pa; + return mhp_range; } From 7011d72588d16a9e5f5d85acbc8b10019809599c Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 19 Mar 2021 12:01:28 +0000 Subject: [PATCH 7/9] kselftest/arm64: sve: Do not use non-canonical FFR register value The "First Fault Register" (FFR) is an SVE register that mimics a predicate register, but clears bits when a load or store fails to handle an element of a vector. The supposed usage scenario is to initialise this register (using SETFFR), then *read* it later on to learn about elements that failed to load or store. Explicit writes to this register using the WRFFR instruction are only supposed to *restore* values previously read from the register (for context-switching only). As the manual describes, this register holds only certain values, it: "... contains a monotonic predicate value, in which starting from bit 0 there are zero or more 1 bits, followed only by 0 bits in any remaining bit positions." Any other value is UNPREDICTABLE and is not supposed to be "restored" into the register. The SVE test currently tries to write a signature pattern into the register, which is *not* a canonical FFR value. Apparently the existing setups treat UNPREDICTABLE as "read-as-written", but a new implementation actually only stores canonical values. As a consequence, the sve-test fails immediately when comparing the FFR value: ----------- # ./sve-test Vector length: 128 bits PID: 207 Mismatch: PID=207, iteration=0, reg=48 Expected [cf00] Got [0f00] Aborted ----------- Fix this by only populating the FFR with proper canonical values. Effectively the requirement described above limits us to 17 unique values over 16 bits worth of FFR, so we condense our signature down to 4 bits (2 bits from the PID, 2 bits from the generation) and generate the canonical pattern from it. Any bits describing elements above the minimum 128 bit are set to 0. This aligns the FFR usage to the architecture and fixes the test on microarchitectures implementing FFR in a more restricted way. Signed-off-by: Andre Przywara Reviwed-by: Mark Brown Link: https://lore.kernel.org/r/20210319120128.29452-1-andre.przywara@arm.com Signed-off-by: Will Deacon --- tools/testing/selftests/arm64/fp/sve-test.S | 22 ++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S index 9210691aa998..e3e08d9c7020 100644 --- a/tools/testing/selftests/arm64/fp/sve-test.S +++ b/tools/testing/selftests/arm64/fp/sve-test.S @@ -284,16 +284,28 @@ endfunction // Set up test pattern in the FFR // x0: pid // x2: generation +// +// We need to generate a canonical FFR value, which consists of a number of +// low "1" bits, followed by a number of zeros. This gives us 17 unique values +// per 16 bits of FFR, so we create a 4 bit signature out of the PID and +// generation, and use that as the initial number of ones in the pattern. +// We fill the upper lanes of FFR with zeros. // Beware: corrupts P0. function setup_ffr mov x4, x30 - bl pattern + and w0, w0, #0x3 + bfi w0, w2, #2, #2 + mov w1, #1 + lsl w1, w1, w0 + sub w1, w1, #1 + ldr x0, =ffrref - ldr x1, =scratch - rdvl x2, #1 - lsr x2, x2, #3 - bl memcpy + strh w1, [x0], 2 + rdvl x1, #1 + lsr x1, x1, #3 + sub x1, x1, #2 + bl memclr mov x0, #0 ldr x1, =ffrref From baa96377bc7b5aa7b8cf038db09cb99642321490 Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Wed, 24 Mar 2021 12:24:58 +0530 Subject: [PATCH 8/9] arm64/process.c: fix Wmissing-prototypes build warnings Fix GCC warnings reported when building with "-Wmissing-prototypes": arch/arm64/kernel/process.c:261:6: warning: no previous prototype for '__show_regs' [-Wmissing-prototypes] 261 | void __show_regs(struct pt_regs *regs) | ^~~~~~~~~~~ arch/arm64/kernel/process.c:307:6: warning: no previous prototype for '__show_regs_alloc_free' [-Wmissing-prototypes] 307 | void __show_regs_alloc_free(struct pt_regs *regs) | ^~~~~~~~~~~~~~~~~~~~~~ arch/arm64/kernel/process.c:365:5: warning: no previous prototype for 'arch_dup_task_struct' [-Wmissing-prototypes] 365 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | ^~~~~~~~~~~~~~~~~~~~ arch/arm64/kernel/process.c:546:41: warning: no previous prototype for '__switch_to' [-Wmissing-prototypes] 546 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, | ^~~~~~~~~~~ arch/arm64/kernel/process.c:710:25: warning: no previous prototype for 'arm64_preempt_schedule_irq' [-Wmissing-prototypes] 710 | asmlinkage void __sched arm64_preempt_schedule_irq(void) | ^~~~~~~~~~~~~~~~~~~~~~~~~~ Link: https://lore.kernel.org/lkml/202103192250.AennsfXM-lkp@intel.com Reported-by: kernel test robot Signed-off-by: Maninder Singh Link: https://lore.kernel.org/r/1616568899-986-1-git-send-email-maninder1.s@samsung.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 2 ++ arch/arm64/include/asm/thread_info.h | 2 ++ arch/arm64/kernel/process.c | 2 ++ 3 files changed, 6 insertions(+) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ca2cd75d3286..efc10e9041a0 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p); extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); +asmlinkage void arm64_preempt_schedule_irq(void); + #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 9f4e3b266f21..6623c99f0984 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -55,6 +55,8 @@ void arch_setup_new_exec(void); #define arch_setup_new_exec arch_setup_new_exec void arch_release_task_struct(struct task_struct *tsk); +int arch_dup_task_struct(struct task_struct *dst, + struct task_struct *src); #endif diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 325c83b1a24d..6e60aa3b5ea9 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -57,6 +57,8 @@ #include #include #include +#include +#include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include From 20109a859a9b514eb10c22b8a14b5704ffe93897 Mon Sep 17 00:00:00 2001 From: Rich Wiley Date: Tue, 23 Mar 2021 17:28:09 -0700 Subject: [PATCH 9/9] arm64: kernel: disable CNP on Carmel On NVIDIA Carmel cores, CNP behaves differently than it does on standard ARM cores. On Carmel, if two cores have CNP enabled and share an L2 TLB entry created by core0 for a specific ASID, a non-shareable TLBI from core1 may still see the shared entry. On standard ARM cores, that TLBI will invalidate the shared entry as well. This causes issues with patchsets that attempt to do local TLBIs based on cpumasks instead of broadcast TLBIs. Avoid these issues by disabling CNP support for NVIDIA Carmel cores. Signed-off-by: Rich Wiley Link: https://lore.kernel.org/r/20210324002809.30271-1-rwiley@nvidia.com [will: Fix pre-existing whitespace issue] Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.rst | 3 +++ arch/arm64/Kconfig | 10 ++++++++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/kernel/cpu_errata.c | 8 ++++++++ arch/arm64/kernel/cpufeature.c | 5 ++++- 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 719510247292..d410a47ffa57 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -130,6 +130,9 @@ stable kernels. | Marvell | ARM-MMU-500 | #582743 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ +| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM | ++----------------+-----------------+-----------------+-----------------------------+ ++----------------+-----------------+-----------------+-----------------------------+ | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5656e7aacd69..e4e1b6550115 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config NVIDIA_CARMEL_CNP_ERRATUM + bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores" + default y + help + If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not + invalidate shared TLB entries installed by a different core, as it would + on standard ARM cores. + + If unsure, say Y. + config SOCIONEXT_SYNQUACER_PREITS bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b77d997b173b..c40f2490cd7b 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -66,7 +66,8 @@ #define ARM64_WORKAROUND_1508412 58 #define ARM64_HAS_LDAPR 59 #define ARM64_KVM_PROTECTED_MODE 60 +#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61 -#define ARM64_NCAPS 61 +#define ARM64_NCAPS 62 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 506a1cd37973..e2c20c036442 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { 0, 0, 1, 0), }, +#endif +#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM + { + /* NVIDIA Carmel */ + .desc = "NVIDIA Carmel CNP erratum", + .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, + ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 066030717a4c..2a5d9854d664 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1321,7 +1321,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) * may share TLB entries with a CPU stuck in the crashed * kernel. */ - if (is_kdump_kernel()) + if (is_kdump_kernel()) + return false; + + if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) return false; return has_cpuid_feature(entry, scope);